diff --git a/docsrc/conf.py b/docsrc/conf.py
index f9abd5918da11fceb724249629160d30c2483cf3..629c7e180939edad686cb1856aa808f8bc943c94 100644
--- a/docsrc/conf.py
+++ b/docsrc/conf.py
@@ -5,19 +5,20 @@
 # sys.path.insert(0, os.path.abspath('.'))
 import os
 import sys
-sys.path.insert(0, os.path.abspath('..'))
+
+sys.path.insert(0, os.path.abspath(".."))
 
 
 # -- Project information -----------------------------------------------------
 
-project = 'experiment-impact-tracker'
-copyright = '2019, Peter Henderson'
-author = 'Peter Henderson'
+project = "experiment-impact-tracker"
+copyright = "2019, Peter Henderson"
+author = "Peter Henderson"
 
 # The short X.Y version
-version = '1.1'
+version = "1.1"
 # The full version, including alpha/beta/rc tags
-release = '1.1.0'
+release = "1.1.0"
 
 
 # -- General configuration ---------------------------------------------------
@@ -30,24 +31,24 @@ release = '1.1.0'
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 # ones.
 extensions = [
-    'sphinx.ext.autodoc',
-    'sphinx.ext.coverage',
-    'sphinx.ext.napoleon',
-    'sphinx.ext.todo',
-    'sphinx.ext.githubpages',
+    "sphinx.ext.autodoc",
+    "sphinx.ext.coverage",
+    "sphinx.ext.napoleon",
+    "sphinx.ext.todo",
+    "sphinx.ext.githubpages",
 ]
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix(es) of source filenames.
 # You can specify multiple suffix as a list of string:
 #
 # source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -59,10 +60,10 @@ language = None
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
 # This pattern also affects html_static_path and html_extra_path .
-exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
 
 
 # -- Options for HTML output -------------------------------------------------
@@ -80,7 +81,7 @@ pygments_style = 'sphinx'
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
 
 # Custom sidebar templates, must be a dictionary that maps document names
 # to template names.
@@ -91,4 +92,4 @@ html_static_path = ['_static']
 # 'searchbox.html']``.
 #
 # html_sidebars = {}
-html_theme = 'nature'
+html_theme = "nature"
diff --git a/examples/my_experiment.py b/examples/my_experiment.py
index 3dc935d95108795286b263c65da049140e533ac2..b2d70958e67de5dfb7b7f8d1356db2c5ab5e9413 100644
--- a/examples/my_experiment.py
+++ b/examples/my_experiment.py
@@ -1,11 +1,14 @@
-from experiment_impact_tracker.compute_tracker import ImpactTracker
 import sys
-import torch
 import tempfile
 
+import torch
+
+from experiment_impact_tracker.compute_tracker import ImpactTracker
+
+
 class Experiment:
     def __init__(self):
-        device = torch.device('cpu')
+        device = torch.device("cpu")
         # device = torch.device('cuda') # Uncomment this to run on GPU
 
         # N is batch size; D_in is input dimension;
@@ -65,4 +68,3 @@ def my_experiment() -> None:
 
 if __name__ == "__main__":
     my_experiment()
-
diff --git a/experiment_impact_tracker/compute_tracker.py b/experiment_impact_tracker/compute_tracker.py
index 14d8259257f4b0ee168695241030f97c97bddf06..748b5d5f1faaa0e0f52c5cc4d59b5e0969291eff 100644
--- a/experiment_impact_tracker/compute_tracker.py
+++ b/experiment_impact_tracker/compute_tracker.py
@@ -7,31 +7,36 @@ import sys
 import time
 import traceback
 from datetime import datetime
+from pathlib import Path
 from queue import Empty as EmptyQueueException
 from subprocess import PIPE, Popen
 from sys import platform
-from pathlib import Path
-
 
-from pandas.io.json import json_normalize
 import numpy as np
 import pandas as pd
+import psutil
 import ujson as json
+from pandas.io.json import json_normalize
 
-import psutil
 from experiment_impact_tracker.cpu import rapl
-from experiment_impact_tracker.data_utils import *
 from experiment_impact_tracker.cpu.common import get_my_cpu_info
 from experiment_impact_tracker.cpu.intel import get_rapl_power
-from experiment_impact_tracker.data_info_and_router import DATA_HEADERS, INITIAL_INFO
+from experiment_impact_tracker.data_info_and_router import (DATA_HEADERS,
+                                                            INITIAL_INFO)
+from experiment_impact_tracker.data_utils import *
+from experiment_impact_tracker.emissions.common import \
+    is_capable_realtime_carbon_intensity
+from experiment_impact_tracker.emissions.get_region_metrics import \
+    get_current_region_info_cached
 from experiment_impact_tracker.gpu.nvidia import (get_gpu_info,
                                                   get_nvidia_gpu_power)
-from experiment_impact_tracker.utils import write_json_data_to_file, safe_file_path, processify, get_timestamp
-from experiment_impact_tracker.emissions.common import is_capable_realtime_carbon_intensity
-from experiment_impact_tracker.emissions.get_region_metrics import get_current_region_info_cached
+from experiment_impact_tracker.utils import (get_timestamp, processify,
+                                             safe_file_path,
+                                             write_json_data_to_file)
 
 SLEEP_TIME = 1
 
+
 def read_latest_stats(log_dir):
     log_path = os.path.join(log_dir, DATAPATH)
 
@@ -48,9 +53,12 @@ def read_latest_stats(log_dir):
 
 def _sample_and_log_power(log_dir, initial_info, logger=None):
     current_process = psutil.Process(os.getppid())
-    process_ids = [current_process.pid] + \
-        [child.pid for child in current_process.children(recursive=True)]
-    process_ids = list(set(process_ids)) # dedupe so that we don't double count by accident
+    process_ids = [current_process.pid] + [
+        child.pid for child in current_process.children(recursive=True)
+    ]
+    process_ids = list(
+        set(process_ids)
+    )  # dedupe so that we don't double count by accident
 
     required_headers = _get_compatible_data_headers(get_current_region_info_cached()[0])
 
@@ -66,9 +74,16 @@ def _sample_and_log_power(log_dir, initial_info, logger=None):
             continue
 
         start = time.time()
-        results = header["routing"]["function"](process_ids, logger=logger, region=initial_info['region']['id'], log_dir=log_dir)
+        results = header["routing"]["function"](
+            process_ids,
+            logger=logger,
+            region=initial_info["region"]["id"],
+            log_dir=log_dir,
+        )
         end = time.time()
-        logger.warn("Datapoint {} took {} seconds".format(header["name"], (end-start)))
+        logger.warn(
+            "Datapoint {} took {} seconds".format(header["name"], (end - start))
+        )
 
         if isinstance(results, dict):
             # if we return a dict of results, could account for multiple headers
@@ -106,7 +121,7 @@ def launch_power_monitor(queue, log_dir, initial_info, logger=None):
         except:
             ex_type, ex_value, tb = sys.exc_info()
             logger.error("Encountered exception within power monitor thread!")
-            logger.error(''.join(traceback.format_tb(tb)))
+            logger.error("".join(traceback.format_tb(tb)))
             raise
         time.sleep(SLEEP_TIME)
 
@@ -122,22 +137,21 @@ def _get_compatible_data_headers(region=None):
                 break
         if compat:
             compatible_headers.append(header)
-            
+
     return compatible_headers
 
+
 def _validate_compatabilities(compatabilities, *args, **kwargs):
     for compatability_fn in compatabilities:
         if not compatability_fn(*args, **kwargs):
             return False
-    return True          
-
+    return True
 
 
 def gather_initial_info(log_dir):
     # TODO: log one time info: CPU/GPU info, version of this package, region, datetime for start of experiment, CO2 estimate data.
     # this will be used to build a latex table later.
 
-
     info_path = safe_file_path(os.path.join(log_dir, INFOPATH))
 
     data = {}
@@ -149,7 +163,7 @@ def gather_initial_info(log_dir):
         if _validate_compatabilities(compatabilities):
             data[key] = info_["routing"]["function"]()
 
-    with open(info_path, 'wb') as info_file:
+    with open(info_path, "wb") as info_file:
         pickle.dump(data, info_file)
 
     # touch datafile to clear out any past cruft and write headers
@@ -163,9 +177,7 @@ def gather_initial_info(log_dir):
     return data
 
 
-
 class ImpactTracker(object):
-
     def __init__(self, logdir):
         self.logdir = logdir
         self._setup_logging()
@@ -176,19 +188,24 @@ class ImpactTracker(object):
     def _setup_logging(self):
         # Create a custom logger
         logger = logging.getLogger(
-            "experiment_impact_tracker.compute_tracker.ImpactTracker")
+            "experiment_impact_tracker.compute_tracker.ImpactTracker"
+        )
 
         # Create handlers
         c_handler = logging.StreamHandler()
-        f_handler = logging.FileHandler(safe_file_path(os.path.join(
-            self.logdir, BASE_LOG_PATH, 'impact_tracker_log.log')))
+        f_handler = logging.FileHandler(
+            safe_file_path(
+                os.path.join(self.logdir, BASE_LOG_PATH, "impact_tracker_log.log")
+            )
+        )
         c_handler.setLevel(logging.WARNING)
         f_handler.setLevel(logging.ERROR)
 
         # Create formatters and add it to handlers
-        c_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
+        c_format = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
         f_format = logging.Formatter(
-            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+            "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+        )
         c_handler.setFormatter(c_format)
         f_handler.setFormatter(f_format)
 
@@ -199,16 +216,21 @@ class ImpactTracker(object):
 
     def launch_impact_monitor(self):
         try:
-            self.p, self.queue = launch_power_monitor(self.logdir, self.initial_info, self.logger)
+            self.p, self.queue = launch_power_monitor(
+                self.logdir, self.initial_info, self.logger
+            )
+
             def _terminate_monitor_and_log_final_info(p):
-                p.terminate(); log_final_info(self.logdir)
+                p.terminate()
+                log_final_info(self.logdir)
+
             atexit.register(_terminate_monitor_and_log_final_info, self.p)
         except:
             ex_type, ex_value, tb = sys.exc_info()
             self.logger.error(
-                "Encountered exception when launching power monitor thread.")
-            self.logger.error(ex_type, ex_value,
-                              ''.join(traceback.format_tb(tb)))
+                "Encountered exception when launching power monitor thread."
+            )
+            self.logger.error(ex_type, ex_value, "".join(traceback.format_tb(tb)))
             raise
 
     def get_latest_info_and_check_for_errors(self):
@@ -220,7 +242,7 @@ class ImpactTracker(object):
                 self.queue.put(message)
             if error:
                 ex_type, ex_value, tb_str = error
-                message = '%s (in subprocess)\n%s' % (str(ex_value), tb_str)
+                message = "%s (in subprocess)\n%s" % (str(ex_value), tb_str)
                 raise ex_type(message)
         except EmptyQueueException:
             # Nothing in the message queue
diff --git a/experiment_impact_tracker/cpu/common.py b/experiment_impact_tracker/cpu/common.py
index 0189b6843bc16192a178c8b703890d45a5d6ee21..56b9c6c50f56b9374099b0903146d077250f0009 100644
--- a/experiment_impact_tracker/cpu/common.py
+++ b/experiment_impact_tracker/cpu/common.py
@@ -1,7 +1,10 @@
+from sys import platform
+
 import cpuinfo
 import psutil
+
 from .exceptions import CPUAttributeAssertionError
-from sys import platform
+
 
 def get_my_cpu_info():
     """ Gather current cpu hardware info for this machine.
@@ -14,22 +17,26 @@ def get_my_cpu_info():
         most_info["usable_cpus"] = len(psutil.Process().cpu_affinity())
     return most_info
 
+
 def get_hz_actual(*args, **kwargs):
     """ Gets the current effective Hz of the CPU
     
     Returns:
         str : Hz
     """
-    return cpuinfo.get_cpu_info()['hz_actual']
+    return cpuinfo.get_cpu_info()["hz_actual"]
+
 
 def get_cpu_freq(*args, **kwargs):
     """ Returns all cpu freq of all cpu's available
     """
     return [x._asdict() for x in psutil.cpu_freq(percpu=True)]
 
+
 def get_cpu_count_adjusted_load_avg(*args, **kwargs):
     return [x / psutil.cpu_count() for x in psutil.getloadavg()]
 
+
 def assert_cpus_by_attributes(attributes_set):
     """Assert that you're running on CPUs with a certain set of attributes.
 
@@ -51,8 +58,14 @@ def assert_cpus_by_attributes(attributes_set):
     for attribute, value in attributes_set.items():
         try:
             if cpu_info[attribute] != value:
-                raise CPUAttributeAssertionError("Attribute {} asserted to be {}, but found {} instead.".format(
-                    attribute, value, cpu_info[attribute]))
+                raise CPUAttributeAssertionError(
+                    "Attribute {} asserted to be {}, but found {} instead.".format(
+                        attribute, value, cpu_info[attribute]
+                    )
+                )
         except KeyError:
-            raise CPUAttributeAssertionError("Attribute {} does not exist. Available attributes: {}.".format(
-                attribute, ",".join(list(cpu_info.keys()))))
+            raise CPUAttributeAssertionError(
+                "Attribute {} does not exist. Available attributes: {}.".format(
+                    attribute, ",".join(list(cpu_info.keys()))
+                )
+            )
diff --git a/experiment_impact_tracker/cpu/exceptions.py b/experiment_impact_tracker/cpu/exceptions.py
index 35e7670166b370a195c79ae07d2d49c80c950ec8..3339ce4fe1d076a984106d9504fd3c3ba4b77b31 100644
--- a/experiment_impact_tracker/cpu/exceptions.py
+++ b/experiment_impact_tracker/cpu/exceptions.py
@@ -1,4 +1,5 @@
 class CPUAttributeAssertionError(Exception):
     """ An exception that occurs when try to assert a certain CPU attribute and a different one is encountered.
     """
-    pass
\ No newline at end of file
+
+    pass
diff --git a/experiment_impact_tracker/cpu/intel.py b/experiment_impact_tracker/cpu/intel.py
index 265f0819331d640bb3fd226dbf927bc1f0ba2898..7cbdb0546763888b2ed534e38c93b86c973620e1 100644
--- a/experiment_impact_tracker/cpu/intel.py
+++ b/experiment_impact_tracker/cpu/intel.py
@@ -2,13 +2,13 @@ import atexit
 import os
 import time
 
+import cpuinfo
 import numpy as np
 import pandas as pd
+import psutil
 import requests
 from bs4 import BeautifulSoup
 
-import cpuinfo
-import psutil
 from experiment_impact_tracker.cpu.common import get_my_cpu_info
 from experiment_impact_tracker.utils import *
 
@@ -18,32 +18,52 @@ from . import rapl
 def get_and_cache_cpu_max_tdp_from_intel():
     """ Goes to Intel's website and pulls information about TDP.
     """
-    cpu_brand = cpuinfo.get_cpu_info()['brand'].split(' ')[2]
-    if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cpuinfocache/{}'.format(cpu_brand))):
-        with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cpuinfocache/{}'.format(cpu_brand)), 'r') as f:
+    cpu_brand = cpuinfo.get_cpu_info()["brand"].split(" ")[2]
+    if os.path.exists(
+        os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            "cpuinfocache/{}".format(cpu_brand),
+        )
+    ):
+        with open(
+            os.path.join(
+                os.path.dirname(os.path.abspath(__file__)),
+                "cpuinfocache/{}".format(cpu_brand),
+            ),
+            "r",
+        ) as f:
             return int(f.readline())
     s = requests.Session()
-    user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
-    s.headers['User-Agent'] = user_agent
-    r = s.get('https://ark.intel.com/content/www/us/en/ark/search.html?_charset_=UTF-8&q={}'.format(
-        cpu_brand), allow_redirects=True)
-    soup = BeautifulSoup(r.content, 'lxml')
-    results = soup.find_all('span', attrs={'data-key': "MaxTDP"})
+    user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36"
+    s.headers["User-Agent"] = user_agent
+    r = s.get(
+        "https://ark.intel.com/content/www/us/en/ark/search.html?_charset_=UTF-8&q={}".format(
+            cpu_brand
+        ),
+        allow_redirects=True,
+    )
+    soup = BeautifulSoup(r.content, "lxml")
+    results = soup.find_all("span", attrs={"data-key": "MaxTDP"})
 
     if len(results) == 0:
-        redirect_url = soup.find(id='FormRedirectUrl').attrs['value']
+        redirect_url = soup.find(id="FormRedirectUrl").attrs["value"]
         if redirect_url:
-            r = s.get("https://ark.intel.com/" +
-                      redirect_url, allow_redirects=True)
-            soup = BeautifulSoup(r.content, 'lxml')
-            results = soup.find_all('span', attrs={'data-key': "MaxTDP"})
+            r = s.get("https://ark.intel.com/" + redirect_url, allow_redirects=True)
+            soup = BeautifulSoup(r.content, "lxml")
+            results = soup.find_all("span", attrs={"data-key": "MaxTDP"})
 
-    with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cpuinfocache/{}'.format(cpu_brand)), 'w') as f:
-        f.write((results[0].text.strip().replace('W', '')))
-    return int(results[0].text.strip().replace('W', ''))
+    with open(
+        os.path.join(
+            os.path.dirname(os.path.abspath(__file__)),
+            "cpuinfocache/{}".format(cpu_brand),
+        ),
+        "w",
+    ) as f:
+        f.write((results[0].text.strip().replace("W", "")))
+    return int(results[0].text.strip().replace("W", ""))
 
 
-_timer = getattr(time, 'monotonic', time.time)
+_timer = getattr(time, "monotonic", time.time)
 
 
 def get_rapl_power(pid_list, logger=None, **kwargs):
@@ -83,7 +103,8 @@ def get_rapl_power(pid_list, logger=None, **kwargs):
         except psutil.NoSuchProcess:
             if logger is not None:
                 logger.warn(
-                    "Process with pid {} used to be part of this process chain, but was shut down. Skipping.")
+                    "Process with pid {} used to be part of this process chain, but was shut down. Skipping."
+                )
             continue
 
     # Get initial times and cpu info
@@ -105,7 +126,7 @@ def get_rapl_power(pid_list, logger=None, **kwargs):
         pt2 = p.cpu_times()
         st22 = _timer()
         system_wide_pt2 = psutil.cpu_times()
-        infos2.append((st21,st22, system_wide_pt2, pt2))
+        infos2.append((st21, st22, system_wide_pt2, pt2))
 
     # now is a good time to get the power samples that we got the process times for
     s2 = rapl.RAPLMonitor.sample()
@@ -131,14 +152,14 @@ def get_rapl_power(pid_list, logger=None, **kwargs):
         # Most processors come in two packages so top level domains shold be package-1 and package-0
         if "package" not in domain.name:
             raise NotImplementedError(
-                "Unexpected top level domain for RAPL package. Not yet supported.")
+                "Unexpected top level domain for RAPL package. Not yet supported."
+            )
 
         total_intel_power += power
 
         for sd in domain.subdomains:
             subdomain = domain.subdomains[sd]
-            power = diff.average_power(
-                package=domain.name, domain=subdomain.name)
+            power = diff.average_power(package=domain.name, domain=subdomain.name)
             subdomain = subdomain.name.lower()
             if subdomain == "ram" or subdomain == "dram":
                 total_dram_power += power
@@ -150,23 +171,23 @@ def get_rapl_power(pid_list, logger=None, **kwargs):
             # will get assigned the same amount of credit as the CPU
 
     if total_gpu_power != 0:
-        raise ValueError(
-            "Don't support credit assignment to Intel RAPL GPU yet.")
+        raise ValueError("Don't support credit assignment to Intel RAPL GPU yet.")
 
     for i, p in enumerate(process_list):
         st1, st12, system_wide_pt1, pt1 = infos1[i]
         st2, st22, system_wide_pt2, pt2 = infos2[i]
-        
+
         # change in cpu-hours process
         delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
         cpu_util_process = delta_proc / float(st2 - st1)
         # change in cpu-hours system
-        delta_proc2 = (system_wide_pt2.user - system_wide_pt1.user) + \
-            (system_wide_pt2.system - system_wide_pt1.system) 
+        delta_proc2 = (system_wide_pt2.user - system_wide_pt1.user) + (
+            system_wide_pt2.system - system_wide_pt1.system
+        )
         cpu_util_system = delta_proc2 / float(st22 - st12)
 
         # percent of cpu-hours in time frame attributable to this process (e.g., attributable compute)
-        attributable_compute = cpu_util_process / cpu_util_system 
+        attributable_compute = cpu_util_process / cpu_util_system
 
         delta_time = st2 - st1
 
@@ -189,15 +210,24 @@ def get_rapl_power(pid_list, logger=None, **kwargs):
     # uss is unique memory to this process (if you killed it now that would be freed). PSS is shared memory split evenly among processes using the memory
     # summing these two gets us a nice fair metric for the actual memory used in the RAM hardware. The unique bits are directly attributable to the process
     # and the shared bits we give credit based on how many processes share those bits
-    system_wide_mem_percent = np.sum([float(x["uss"] + x["pss"]) / float(total_physical_memory.total - total_physical_memory.available) for x in mem_info_per_process.values()])
+    system_wide_mem_percent = np.sum(
+        [
+            float(x["uss"] + x["pss"])
+            / float(total_physical_memory.total - total_physical_memory.available)
+            for x in mem_info_per_process.values()
+        ]
+    )
 
     power_credit_cpu = cpu_percent
     power_credit_mem = system_wide_mem_percent
     if power_credit_cpu == 0:
-        logger.warn("Problem retrieving CPU usage percentage to assign power credit, not using any CPU. This is possibly true, but seems unlikely! See if there's a problem!")
+        logger.warn(
+            "Problem retrieving CPU usage percentage to assign power credit, not using any CPU. This is possibly true, but seems unlikely! See if there's a problem!"
+        )
     if power_credit_mem == 0:
         raise ValueError(
-            "Problem retrieving Mem usage percentage to assign power credit")
+            "Problem retrieving Mem usage percentage to assign power credit"
+        )
 
     total_attributable_power = 0
     if total_cpu_power != 0:
@@ -206,12 +236,14 @@ def get_rapl_power(pid_list, logger=None, **kwargs):
         total_attributable_power += total_dram_power * power_credit_mem
 
     # assign the rest of the power to the CPU percentage even if this is a bit innacurate
-    total_attributable_power += (total_intel_power -
-                                 total_dram_power - total_cpu_power) * power_credit_cpu
+    total_attributable_power += (
+        total_intel_power - total_dram_power - total_cpu_power
+    ) * power_credit_cpu
 
     if total_intel_power == 0:
         raise ValueError(
-            "It seems that power estimates from Intel RAPL are coming back 0, this indicates a problem.")
+            "It seems that power estimates from Intel RAPL are coming back 0, this indicates a problem."
+        )
 
     data_return_values_with_headers = {
         "rapl_power_draw_absolute": total_intel_power,
@@ -219,10 +251,17 @@ def get_rapl_power(pid_list, logger=None, **kwargs):
         "cpu_time_seconds": cpu_times_per_process,
         "average_relative_cpu_utilization": cpu_percent,
         "absolute_cpu_utilization": absolute_cpu_percent,
-        "relative_mem_usage" : system_wide_mem_percent,
-        "absolute_mem_usage" : np.sum([float(x["uss"] + x["pss"]) for x in mem_info_per_process.values()]),
-        "absolute_mem_percent_usage" : np.sum([float(x["uss"] + x["pss"]) / float(total_physical_memory.total)  for x in mem_info_per_process.values()]),
-        "mem_info_per_process" : mem_info_per_process
+        "relative_mem_usage": system_wide_mem_percent,
+        "absolute_mem_usage": np.sum(
+            [float(x["uss"] + x["pss"]) for x in mem_info_per_process.values()]
+        ),
+        "absolute_mem_percent_usage": np.sum(
+            [
+                float(x["uss"] + x["pss"]) / float(total_physical_memory.total)
+                for x in mem_info_per_process.values()
+            ]
+        ),
+        "mem_info_per_process": mem_info_per_process,
     }
 
     return data_return_values_with_headers
diff --git a/experiment_impact_tracker/cpu/rapl.py b/experiment_impact_tracker/cpu/rapl.py
index 8e0536f2a48c1cde01f7cdd0ceabd5736caafed6..3c4424400853b86ebd1518b601b853ecd83255f6 100644
--- a/experiment_impact_tracker/cpu/rapl.py
+++ b/experiment_impact_tracker/cpu/rapl.py
@@ -17,8 +17,7 @@ def _read_sysfs_file(path):
 def _get_domain_info(path):
     name = _read_sysfs_file("%s/name" % path)
     energy_uj = int(_read_sysfs_file("%s/energy_uj" % path))
-    max_energy_range_uj = int(_read_sysfs_file(
-        "%s/max_energy_range_uj" % path))
+    max_energy_range_uj = int(_read_sysfs_file("%s/max_energy_range_uj" % path))
 
     return name, energy_uj, max_energy_range_uj
 
@@ -30,7 +29,9 @@ def _is_rapl_compatible(*args, **kwargs):
 
 def _walk_rapl_dir(path):
     if not os.path.exists(path):
-        raise ValueError("No RAPL directory exists to read from, RAPL CPU power readings may not be supported on this machine. If you discover a way to read rapl readings, please submit a pull request to update compatibility for your system!")
+        raise ValueError(
+            "No RAPL directory exists to read from, RAPL CPU power readings may not be supported on this machine. If you discover a way to read rapl readings, please submit a pull request to update compatibility for your system!"
+        )
     regex = re.compile("intel-rapl")
 
     for dirpath, dirnames, filenames in os.walk(path, topdown=True):
@@ -41,7 +42,6 @@ def _walk_rapl_dir(path):
 
 
 class RAPLDomain(object):
-
     @classmethod
     def construct(cls, id, path):
         name, energy_uj, max_energy_range_uj = _get_domain_info(path)
@@ -100,7 +100,6 @@ class RAPLDomain(object):
 
 
 class RAPLSample(object):
-
     @classmethod
     def take_sample(cls):
         sample = RAPLSample()
@@ -108,7 +107,9 @@ class RAPLSample(object):
         sample.domains_by_id = {}
         sample.timestamp = datetime.now()
 
-        for dirpath, dirnames, filenames in _walk_rapl_dir("/sys/class/powercap/intel-rapl"):
+        for dirpath, dirnames, filenames in _walk_rapl_dir(
+            "/sys/class/powercap/intel-rapl"
+        ):
             current = dirpath.split("/")[-1]
             splits = current.split(":")
 
@@ -162,7 +163,7 @@ class RAPLSample(object):
         elif unit == JOULES:
             return e / 1000000
         elif unit == WATT_HOURS:
-            return e / (1000000*3600)
+            return e / (1000000 * 3600)
 
 
 class RAPLDifference(RAPLSample):
diff --git a/experiment_impact_tracker/create_graph_appendix.py b/experiment_impact_tracker/create_graph_appendix.py
index d69f0f0e5103de7801e722f240ed4340fc9ececf..c6d23cb777538648e156b6382bbd20720a1d7fe9 100644
--- a/experiment_impact_tracker/create_graph_appendix.py
+++ b/experiment_impact_tracker/create_graph_appendix.py
@@ -1,59 +1,74 @@
-import os.path
-import pandas as pd
 import datetime
-import matplotlib.pyplot as plt
+import os.path
 import random
 import string
+
+import matplotlib.pyplot as plt
+import pandas as pd
 import seaborn as sns
+
 from experiment_impact_tracker.data_utils import load_data_into_frame
 
 SMALL_SIZE = 22
 MEDIUM_SIZE = 24
 BIGGER_SIZE = 26
 
-plt.rc('font', size=SMALL_SIZE)          # controls default text sizes
-plt.rc('axes', titlesize=MEDIUM_SIZE)     # fontsize of the axes title
-plt.rc('axes', labelsize=MEDIUM_SIZE)    # fontsize of the x and y labels
-plt.rc('xtick', labelsize=SMALL_SIZE)    # fontsize of the tick labels
-plt.rc('ytick', labelsize=SMALL_SIZE)    # fontsize of the tick labels
-plt.rc('legend', fontsize=MEDIUM_SIZE)    # legend fontsize
-plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
+plt.rc("font", size=SMALL_SIZE)  # controls default text sizes
+plt.rc("axes", titlesize=MEDIUM_SIZE)  # fontsize of the axes title
+plt.rc("axes", labelsize=MEDIUM_SIZE)  # fontsize of the x and y labels
+plt.rc("xtick", labelsize=SMALL_SIZE)  # fontsize of the tick labels
+plt.rc("ytick", labelsize=SMALL_SIZE)  # fontsize of the tick labels
+plt.rc("legend", fontsize=MEDIUM_SIZE)  # legend fontsize
+plt.rc("figure", titlesize=BIGGER_SIZE)  # fontsize of the figure title
 
 # def random_suffix(length=4):
 #     letters = string.ascii_lowercase
 #     return ''.join(random.choice(letters) for i in range(stringLength))
 
-def dateparse (time_in_secs):    
+
+def dateparse(time_in_secs):
     return datetime.datetime.fromtimestamp(float(time_in_secs))
 
+
 # def clean(x): #2.9066 GHz
 #     x = x.replace(" GHz", "")
 #     return float(x)
 
+
 def handle_cpu_count_adjusted_average_load(df):
-    separated_df = pd.DataFrame(df['cpu_count_adjusted_average_load'].values.tolist())
-    separated_df.columns = ['5_min_cpu_count_adjusted_average_load', '10_min_cpu_count_adjusted_average_load', '15_min_cpu_count_adjusted_average_load']
+    separated_df = pd.DataFrame(df["cpu_count_adjusted_average_load"].values.tolist())
+    separated_df.columns = [
+        "5_min_cpu_count_adjusted_average_load",
+        "10_min_cpu_count_adjusted_average_load",
+        "15_min_cpu_count_adjusted_average_load",
+    ]
 
     return pd.concat([separated_df, df], axis=1)
 
 
 # HZ_ACTUAL_COL = 'hz_actual'
 ADJUSTED_AVERAGE_LOAD = "cpu_count_adjusted_average_load"
-TIMESTAMP_COL = 'timestamp'
+TIMESTAMP_COL = "timestamp"
 
-SKIP_COLUMN = ['timestamp', 'per_gpu_performance_state']
+SKIP_COLUMN = ["timestamp", "per_gpu_performance_state"]
 
 # TODO move per_gpu_performance_state to special handler
 SPECIAL_COLUMN = [ADJUSTED_AVERAGE_LOAD]
 
-HANDLER_MAP = {ADJUSTED_AVERAGE_LOAD : handle_cpu_count_adjusted_average_load}
+HANDLER_MAP = {ADJUSTED_AVERAGE_LOAD: handle_cpu_count_adjusted_average_load}
 
 
-def create_graphs(input_path: str, output_path: str ='.', fig_x:int = 16, fig_y: int = 8, max_level=None):
+def create_graphs(
+    input_path: str,
+    output_path: str = ".",
+    fig_x: int = 16,
+    fig_y: int = 8,
+    max_level=None,
+):
     if not os.path.exists(output_path):
         os.makedirs(output_path)
     # create graph dirs
-    graph_dir = str(fig_x) + '_' + str(fig_y)
+    graph_dir = str(fig_x) + "_" + str(fig_y)
     out_dir = os.path.join(output_path, graph_dir)
     # if os.path.exists(out_dir):
     #     print("{} already exists, attaching random string to the out put dir and moving on.".format(out_dir))
@@ -63,32 +78,34 @@ def create_graphs(input_path: str, output_path: str ='.', fig_x:int = 16, fig_y:
     df, json_raw = load_data_into_frame(input_path, max_level=max_level)
     # df = pd.read_csv(os.path.join(input_path, csv), sep=',', parse_dates=[0], date_parser=dateparse)
     created_paths = []
-    df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
+    df["timestamp"] = pd.to_datetime(df["timestamp"], unit="s")
     print("Plotting {}".format(",".join([k for k in list(df)[1:]])))
 
     # Do a pass for any pre-processing
     for k in list(df)[:]:
         if k in SPECIAL_COLUMN:
             df = HANDLER_MAP[k](df)
-    
+
     # Then graph everything
     for k in list(df)[:]:
         if k in SKIP_COLUMN:
             continue
         try:
-            df.plot(kind='line', x=TIMESTAMP_COL, y=k, figsize=(25, 8))
+            df.plot(kind="line", x=TIMESTAMP_COL, y=k, figsize=(25, 8))
         except:
             print("problem plotting {}, skipping".format(k))
             continue
 
-        path_name = os.path.join(out_dir, k+'.png')
+        path_name = os.path.join(out_dir, k + ".png")
         plt.savefig(path_name)
-        plt.close('all')
+        plt.close("all")
         created_paths.append(path_name)
     return created_paths
 
 
-def create_scatterplot_from_df(df, x: str, y: str, output_path: str ='.', fig_x:int = 16, fig_y: int = 8):
+def create_scatterplot_from_df(
+    df, x: str, y: str, output_path: str = ".", fig_x: int = 16, fig_y: int = 8
+):
     """Loads an executive summary df and creates a scatterplot from some pre-specified variables.
     
     Args:
@@ -102,14 +119,16 @@ def create_scatterplot_from_df(df, x: str, y: str, output_path: str ='.', fig_x:
     if not os.path.exists(output_path):
         os.makedirs(output_path)
     # create graph dirs
-    graph_dir = str(fig_x) + '_' + str(fig_y)
+    graph_dir = str(fig_x) + "_" + str(fig_y)
     out_dir = os.path.join(output_path, graph_dir)
     df[x] = df[x].astype(float)
     df[y] = df[y].astype(float)
     os.makedirs(out_dir, exist_ok=True)
     a4_dims = (14, 9)
     fig, ax = plt.subplots(figsize=a4_dims)
-    graph = sns.scatterplot(ax=ax, x=x, y=y, data=df, s=325,  alpha=.5, hue='Experiment', legend='brief')#, palette="Set1")
+    graph = sns.scatterplot(
+        ax=ax, x=x, y=y, data=df, s=325, alpha=0.5, hue="Experiment", legend="brief"
+    )  # , palette="Set1")
     box = ax.get_position()
     plt.legend(markerscale=2)
     # ax.set_position([box.x0,box.y0,box.width*0.83,box.height])
@@ -117,9 +136,9 @@ def create_scatterplot_from_df(df, x: str, y: str, output_path: str ='.', fig_x:
     # plt.ylim(bottom=0.0)
 
     # plt.legend(loc='lower right')
-    #Use regplot to plot the regression line for the whole points
+    # Use regplot to plot the regression line for the whole points
     # sns.regplot(x="FPOs", y=args.y_axis_var, data=df, sizes=(250, 500),  alpha=.5, scatter=False, ax=graph.axes[2])
-    path_name = os.path.join(out_dir, '{}v{}.png'.format(x,y))
+    path_name = os.path.join(out_dir, "{}v{}.png".format(x, y))
     plt.savefig(path_name)
-    plt.close('all')
-    return path_name
\ No newline at end of file
+    plt.close("all")
+    return path_name
diff --git a/experiment_impact_tracker/data_info_and_router.py b/experiment_impact_tracker/data_info_and_router.py
index a5a0e007eb3b2c2288adc84cfdbe6709074623c5..a66568147a19e38d9c00671718e530b7c9ecc2da 100644
--- a/experiment_impact_tracker/data_info_and_router.py
+++ b/experiment_impact_tracker/data_info_and_router.py
@@ -1,83 +1,70 @@
-from experiment_impact_tracker.cpu.intel import get_rapl_power
-from experiment_impact_tracker.cpu.common import get_cpu_freq, get_cpu_count_adjusted_load_avg
-from experiment_impact_tracker.cpu import rapl
+from datetime import datetime
+
 import experiment_impact_tracker
-from experiment_impact_tracker.gpu.nvidia import get_nvidia_gpu_power, is_nvidia_compatible
-from experiment_impact_tracker.utils import *
-from experiment_impact_tracker.emissions.common import get_realtime_carbon
+from experiment_impact_tracker.cpu import rapl
+from experiment_impact_tracker.cpu.common import (
+    get_cpu_count_adjusted_load_avg, get_cpu_freq, get_my_cpu_info)
+from experiment_impact_tracker.cpu.intel import get_rapl_power
 from experiment_impact_tracker.disk.common import measure_disk_speed_at_dir
+from experiment_impact_tracker.emissions.common import (
+    get_realtime_carbon, is_capable_realtime_carbon_intensity)
+from experiment_impact_tracker.emissions.get_region_metrics import \
+    get_current_region_info_cached
+from experiment_impact_tracker.gpu.nvidia import (get_gpu_info,
+                                                  get_nvidia_gpu_power,
+                                                  is_nvidia_compatible)
 from experiment_impact_tracker.operating_system.common import is_linux
-from experiment_impact_tracker.emissions.common import is_capable_realtime_carbon_intensity
-from experiment_impact_tracker.cpu import rapl
-from experiment_impact_tracker.gpu.nvidia import get_gpu_info
-
-from datetime import datetime
-
-from experiment_impact_tracker.cpu.common import get_my_cpu_info
-from experiment_impact_tracker.py_environment.common import get_python_packages_and_versions
-from experiment_impact_tracker.emissions.get_region_metrics import get_current_region_info_cached
+from experiment_impact_tracker.py_environment.common import \
+    get_python_packages_and_versions
+from experiment_impact_tracker.utils import *
 
-get_version_number = lambda *args,  **kwargs: experiment_impact_tracker.__version__
-get_time_now =  lambda *args,  **kwargs: datetime.now()
-all_compatible = lambda *args, **kwargs : True
+get_version_number = lambda *args, **kwargs: experiment_impact_tracker.__version__
+get_time_now = lambda *args, **kwargs: datetime.now()
+all_compatible = lambda *args, **kwargs: True
 
 INITIAL_INFO = [
     {
         "name": "python_package_info",
         "description": "Python package info.",
         "compatability": [all_compatible],
-        "routing": {
-            "function": get_python_packages_and_versions
-        }
+        "routing": {"function": get_python_packages_and_versions},
     },
     {
         "name": "cpu_info",
         "description": "CPU hardware information.",
         "compatability": [all_compatible],
-        "routing": {
-            "function": get_my_cpu_info
-        }
+        "routing": {"function": get_my_cpu_info},
     },
     {
         "name": "experiment_start",
         "description": "Start time of experiment.",
         "compatability": [all_compatible],
-        "routing": {
-            "function": get_time_now
-        }
+        "routing": {"function": get_time_now},
     },
     {
         "name": "gpu_info",
         "description": "GPU hardware information.",
         "compatability": [is_nvidia_compatible, is_linux],
-        "routing": {
-            "function": get_gpu_info
-        }
+        "routing": {"function": get_gpu_info},
     },
     {
         "name": "experiment_impact_tracker_version",
         "description": "Version of experiment-impact-tracker framework.",
         "compatability": [all_compatible],
-        "routing": {
-            "function": get_version_number
-        }
+        "routing": {"function": get_version_number},
     },
-   {
+    {
         "name": "region",
         "description": "The region we determine this experiment to be run in.",
         "compatability": [all_compatible],
-        "routing": {
-            "function": lambda : get_current_region_info_cached()[0]
-        }
+        "routing": {"function": lambda: get_current_region_info_cached()[0]},
     },
-   {
+    {
         "name": "region_carbon_intensity_estimate",
         "description": "The average carbon intensity estimated for the region this experiment is in.",
         "compatability": [all_compatible],
-        "routing": {
-            "function": lambda : get_current_region_info_cached()[1]
-        }
-    }
+        "routing": {"function": lambda: get_current_region_info_cached()[1]},
+    },
 ]
 
 DATA_HEADERS = [
@@ -85,148 +72,111 @@ DATA_HEADERS = [
         "name": "timestamp",
         "description": "Time at which sample was drawn based on local machine time in timestamp format.",
         "compatability": [all_compatible],
-        "routing": {
-            "function": get_timestamp
-        }
+        "routing": {"function": get_timestamp},
     },
     {
         "name": "rapl_power_draw_absolute",
         "description": "The absolute power draw reading read from an Intel RAPL package. This is in terms of Watts across the entire machine.",
-        "compatability": [ rapl._is_rapl_compatible, is_linux],
-        "routing": {
-            "function": get_rapl_power
-        }
+        "compatability": [rapl._is_rapl_compatible, is_linux],
+        "routing": {"function": get_rapl_power},
     },
-
     {
         "name": "rapl_estimated_attributable_power_draw",
         "description": "This is the estimated attributable power draw to this process and all child processes based on power draw reading read from an Intel RAPL package. This is calculated as (watts used by cpu) * (relative cpu percentage used) + (watts used by dram) * (relative dram percentage used) + (watts used by other package elements) * (relative cpu percentage used).",
-        "compatability": [ rapl._is_rapl_compatible, is_linux],
-        "routing": {
-            "function": get_rapl_power
-        }
+        "compatability": [rapl._is_rapl_compatible, is_linux],
+        "routing": {"function": get_rapl_power},
     },
     {
         "name": "nvidia_draw_absolute",
         "description": "This is the absolute power draw of all accessible NVIDIA GPUs on the system (as long as the main process or any child process lives on the GPU). Calculated as sum across all GPUs.",
-        "compatability": [ is_nvidia_compatible, is_linux],
-        "routing": {
-            "function": get_nvidia_gpu_power
-        }
+        "compatability": [is_nvidia_compatible, is_linux],
+        "routing": {"function": get_nvidia_gpu_power},
     },
     {
         "name": "nvidia_estimated_attributable_power_draw",
         "description": "This is the estimated attributable power draw of all accessible NVIDIA GPUs on the system (as long as the main process or any child process lives on the GPU). Calculated as the sum per gpu of (absolute power draw per gpu) * (relative process percent utilization of gpu)",
         "compatability": [is_nvidia_compatible, is_linux],
-        "routing": {
-            "function": get_nvidia_gpu_power
-        }
+        "routing": {"function": get_nvidia_gpu_power},
     },
     {
         "name": "cpu_time_seconds",
         "description": "This is the total CPU time used so far by the program in seconds.",
         # TODO: shouldn't need rapl, this should be available to all
-        "compatability": [ rapl._is_rapl_compatible, is_linux],
-        "routing": {
-            "function": get_rapl_power
-        }
+        "compatability": [rapl._is_rapl_compatible, is_linux],
+        "routing": {"function": get_rapl_power},
     },
     {
         "name": "average_gpu_estimated_utilization_absolute",
         "description": "This is the absolute utilization of the GPUs by the main process and all child processes. Returns an average result across several trials of nvidia-smi pmon -c 10. Averaged across GPUs. Using .05 to indicate 5%.",
         "compatability": [is_nvidia_compatible, is_linux],
-        "routing": {
-            "function": get_nvidia_gpu_power
-        }
+        "routing": {"function": get_nvidia_gpu_power},
     },
     {
         "name": "average_gpu_estimated_utilization_relative",
         "description": "This is the relative utilization of the GPUs by the main process and all child processes. Returns an average result across several trials of nvidia-smi pmon -c 10 and the percentage that this process and all child process utilize for the gpu.  Averaged across GPUs. Using .05 to indicate 5%. ",
         "compatability": [is_nvidia_compatible, is_linux],
-        "routing": {
-            "function": get_nvidia_gpu_power
-        }
+        "routing": {"function": get_nvidia_gpu_power},
     },
     {
         "name": "average_relative_cpu_utilization",
         "description": "This is the relative CPU utlization compared to the utilization of the whole system at that time. E.g., if the total system is using 50\% of the CPU power, but our program is only using 25\%, this will return .5.",
         # TODO: shouldn't need rapl, this should be available to all
-        "compatability": [ rapl._is_rapl_compatible, is_linux],
-        "routing": {
-            "function": get_rapl_power
-        }
+        "compatability": [rapl._is_rapl_compatible, is_linux],
+        "routing": {"function": get_rapl_power},
     },
     {
         "name": "absolute_cpu_utilization",
         "description": "This is the relative CPU utlization compared to the utilization of the whole system at that time. E.g., if the total system is using 50\% of 4 CPUs, but our program is only using 25\% of 2 CPUs, this will return .5 (same as in top). There is no multiplier times the number of cores in this case as top does. ",
         # TODO: shouldn't need rapl, this should be available to all
-        "compatability": [ rapl._is_rapl_compatible, is_linux],
-        "routing": {
-            "function": get_rapl_power
-        }
+        "compatability": [rapl._is_rapl_compatible, is_linux],
+        "routing": {"function": get_rapl_power},
     },
     {
         "name": "per_gpu_performance_state",
         "description": "A concatenated string which gives the performance state of every single GPU used by the main process or all child processes. Example formatting looks like <gpuid>::<performance state>. E.g., 0::P0",
         "compatability": [is_nvidia_compatible, is_linux],
-        "routing": {
-            "function": get_nvidia_gpu_power
-        }
+        "routing": {"function": get_nvidia_gpu_power},
     },
     {
         "name": "relative_mem_usage",
         "description": "The percentage of all in-use ram this program is using.",
-        "compatability": [ rapl._is_rapl_compatible, is_linux],
-        "routing": {
-            "function": get_rapl_power 
-        }
+        "compatability": [rapl._is_rapl_compatible, is_linux],
+        "routing": {"function": get_rapl_power},
     },
     {
         "name": "absolute_mem_usage",
         "description": "The amount of memory being used.",
-        "compatability": [ rapl._is_rapl_compatible, is_linux],
-        "routing": {
-            "function": get_rapl_power
-        }
+        "compatability": [rapl._is_rapl_compatible, is_linux],
+        "routing": {"function": get_rapl_power},
     },
     {
         "name": "absolute_mem_percent_usage",
         "description": "The amount of memory being used as an absolute percentage of total memory (RAM).",
-        "compatability": [ rapl._is_rapl_compatible, is_linux],
-        "routing": {
-            "function": get_rapl_power
-        }
+        "compatability": [rapl._is_rapl_compatible, is_linux],
+        "routing": {"function": get_rapl_power},
     },
     {
         "name": "cpu_count_adjusted_average_load",
         "description": "Measures the average load on the system for the past 5, 10, 15 minutes divided by number of CPUs (wrapper for psutil method). As fraction (percentage needs multiplication by 100)",
         "compatability": [all_compatible],
-        "routing": {
-            "function": get_cpu_count_adjusted_load_avg 
-        }
+        "routing": {"function": get_cpu_count_adjusted_load_avg},
     },
     {
         "name": "cpu_freq",
         "description": "Get cpu frequency including realtime in MHz.",
         "compatability": [is_linux],
-        "routing": {
-            "function": get_cpu_freq 
-        }
-    },
-    {
-        "name" : "realtime_carbon_intensity",
-        "description" : "If available, the realtime carbon intensity in the region.",
-        "compatability" : [is_capable_realtime_carbon_intensity],
-        "routing" : {
-            "function" : get_realtime_carbon
-        }
-    },
-    {
-        "name" : "disk_write_speed",
-        "description" : "The write speed to the disk estimated over .5 seconds.",
-        "compatability" : [all_compatible],
-        "routing" : {
-            "function" : measure_disk_speed_at_dir
-        }
-    }
+        "routing": {"function": get_cpu_freq},
+    },
+    {
+        "name": "realtime_carbon_intensity",
+        "description": "If available, the realtime carbon intensity in the region.",
+        "compatability": [is_capable_realtime_carbon_intensity],
+        "routing": {"function": get_realtime_carbon},
+    },
+    {
+        "name": "disk_write_speed",
+        "description": "The write speed to the disk estimated over .5 seconds.",
+        "compatability": [all_compatible],
+        "routing": {"function": measure_disk_speed_at_dir},
+    },
 ]
diff --git a/experiment_impact_tracker/data_utils.py b/experiment_impact_tracker/data_utils.py
index 065d6d446f1e8d53237a629a7b6c3457bcb121ea..a49d4ada3f8f208e61bc32a86bbe006741a3f5b3 100644
--- a/experiment_impact_tracker/data_utils.py
+++ b/experiment_impact_tracker/data_utils.py
@@ -1,62 +1,72 @@
+import csv
+import os
 import pickle
+import zipfile
+from datetime import datetime
+
 import ujson as json
 from pandas.io.json import json_normalize
-from datetime import datetime
-import os
-import zipfile
-import csv
 
-BASE_LOG_PATH = 'impacttracker/'
-DATAPATH = BASE_LOG_PATH + 'data.json'
-INFOPATH = BASE_LOG_PATH + 'info.pkl'
+BASE_LOG_PATH = "impacttracker/"
+DATAPATH = BASE_LOG_PATH + "data.json"
+INFOPATH = BASE_LOG_PATH + "info.pkl"
+
 
 def load_initial_info(log_dir):
     info_path = safe_file_path(os.path.join(log_dir, INFOPATH))
-    with open(info_path, 'rb') as info_file:
+    with open(info_path, "rb") as info_file:
         return pickle.load(info_file)
 
+
 def _read_json_file(filename):
-    with open(filename, 'r') as f:
+    with open(filename, "r") as f:
         lines = f.readlines()
         return [json.loads(line) for line in lines]
 
+
 def load_data_into_frame(log_dir, max_level=None):
     data_path = safe_file_path(os.path.join(log_dir, DATAPATH))
     json_array = _read_json_file(data_path)
     return json_normalize(json_array, max_level=max_level), json_array
 
+
 def log_final_info(log_dir):
     final_time = datetime.now()
     info = load_initial_info(log_dir)
     info["experiment_end"] = final_time
     info_path = safe_file_path(os.path.join(log_dir, INFOPATH))
 
-    with open(info_path, 'wb') as info_file:
+    with open(info_path, "wb") as info_file:
         pickle.dump(info, info_file)
 
+
 def safe_file_path(file_path):
     directory = os.path.dirname(file_path)
     if not os.path.exists(directory):
         os.makedirs(directory)
     return file_path
 
+
 def write_csv_data_to_file(file_path, data, overwrite=False):
     file_path = safe_file_path(file_path)
-    with open(file_path, 'w' if overwrite else 'a') as outfile:
+    with open(file_path, "w" if overwrite else "a") as outfile:
         writer = csv.writer(outfile)
         writer.writerow(data)
 
+
 def write_json_data_to_file(file_path, data, overwrite=False):
     file_path = safe_file_path(file_path)
-    with open(file_path, 'w' if overwrite else 'a') as outfile:
+    with open(file_path, "w" if overwrite else "a") as outfile:
         outfile.write(json.dumps(data) + "\n")
 
+
 def zip_data_and_info(log_dir, zip_path):
     info_path = safe_file_path(os.path.join(log_dir, INFOPATH))
     data_path = safe_file_path(os.path.join(log_dir, DATAPATH))
     zip_files([info_path, data_path], zip_path)
     return zip_path
 
+
 def zip_files(src, dst, arcname=None):
     """ Compress a list of files to a given zip 
     
@@ -67,12 +77,14 @@ def zip_files(src, dst, arcname=None):
         @dst: filename (path/filename if needed)
         @arcname: Iterable object containing the names we want to give to the elements in the archive (has to correspond to src) 
     """
-    zip_ = zipfile.ZipFile(dst, 'w')
+    zip_ = zipfile.ZipFile(dst, "w")
 
     for i in range(len(src)):
         if arcname is None:
-            zip_.write(src[i], os.path.basename(src[i]), compress_type = zipfile.ZIP_DEFLATED)
+            zip_.write(
+                src[i], os.path.basename(src[i]), compress_type=zipfile.ZIP_DEFLATED
+            )
         else:
-            zip_.write(src[i], arcname[i], compress_type = zipfile.ZIP_DEFLATED)
+            zip_.write(src[i], arcname[i], compress_type=zipfile.ZIP_DEFLATED)
 
-    zip_.close()
\ No newline at end of file
+    zip_.close()
diff --git a/experiment_impact_tracker/disk/common.py b/experiment_impact_tracker/disk/common.py
index 6d1e5bd9e1cc05be4c57ec129eaac1e89342fac7..90d27e2af1f455e59f63961fa3efaea4f49df6ce 100644
--- a/experiment_impact_tracker/disk/common.py
+++ b/experiment_impact_tracker/disk/common.py
@@ -1,14 +1,17 @@
 """ Taken mostly from https://github.com/sanderjo/disk-speed/blob/master/diskspeed.py
 """
-import time, os, sys
+import os
+import sys
+import time
 import uuid
 
+
 def writetofile(filename, size_in_mb):
-    # writes string to specified file repeatdely, until mysizeMB is reached. Then deletes fle 
+    # writes string to specified file repeatdely, until mysizeMB is reached. Then deletes fle
     mystring = "The quick brown fox jumps over the lazy dog"
-    writeloops = int(1000000*size_in_mb/len(mystring))
+    writeloops = int(1000000 * size_in_mb / len(mystring))
     try:
-        f = open(filename, 'w')
+        f = open(filename, "w")
     except:
         # no better idea than:
         raise
@@ -17,14 +20,16 @@ def writetofile(filename, size_in_mb):
     f.close()
     os.remove(filename)
 
-############## 
+
+##############
+
 
 def measure_disk_speed_at_dir(*args, log_dir=None, **kwargs):
     dirname = log_dir
     # returns writing speed to dirname in MB/s
     # method: keep writing a file, until 0.5 seconds is passed. Then divide bytes written by time passed
-    filesize = 1    # in MB
-    maxtime = 0.5     # in sec
+    filesize = 1  # in MB
+    maxtime = 0.5  # in sec
     filename = os.path.join(dirname, str(uuid.uuid4()))
     start = time.time()
     loopcounter = 0
@@ -33,9 +38,9 @@ def measure_disk_speed_at_dir(*args, log_dir=None, **kwargs):
             writetofile(filename, filesize)
         except:
             # I have no better idea than:
-            raise    
+            raise
         loopcounter += 1
         diff = time.time() - start
-        if diff > maxtime: break
-    return (loopcounter*filesize)/diff
-
+        if diff > maxtime:
+            break
+    return (loopcounter * filesize) / diff
diff --git a/experiment_impact_tracker/emissions/common.py b/experiment_impact_tracker/emissions/common.py
index d833313e4c68463176009a6397e92725ec32b41b..47f4a4181dedb72231b6184733c91be91abe5356 100644
--- a/experiment_impact_tracker/emissions/common.py
+++ b/experiment_impact_tracker/emissions/common.py
@@ -1,30 +1,28 @@
-import experiment_impact_tracker.emissions.us_ca_parser as us_ca_parser
 import numpy
 
-REALTIME_REGIONS = {
-    "US-CA" : us_ca_parser
-}
+import experiment_impact_tracker.emissions.us_ca_parser as us_ca_parser
+
+REALTIME_REGIONS = {"US-CA": us_ca_parser}
+
 
 def is_capable_realtime_carbon_intensity(*args, region=None, **kwargs):
     return region in list(REALTIME_REGIONS.keys())
-    
+
+
 def get_realtime_carbon_source(region):
     return REALTIME_REGIONS[region].get_realtime_carbon_source()
 
+
 def get_realtime_carbon(*args, **kwargs):
-    if 'region' not in kwargs:
+    if "region" not in kwargs:
         raise ValueError("region was not passed to function")
     try:
-        carbon_intensity = REALTIME_REGIONS[kwargs['region']].fetch_supply()[0]['carbon_intensity']
+        carbon_intensity = REALTIME_REGIONS[kwargs["region"]].fetch_supply()[0][
+            "carbon_intensity"
+        ]
         if numpy.isnan(carbon_intensity):
-            return {
-                "realtime_carbon_intensity" : "n/a"
-            }
+            return {"realtime_carbon_intensity": "n/a"}
     except:
-        return {
-            "realtime_carbon_intensity" : "n/a"
-        }
+        return {"realtime_carbon_intensity": "n/a"}
 
-    return {
-        "realtime_carbon_intensity" : carbon_intensity 
-    }
+    return {"realtime_carbon_intensity": carbon_intensity}
diff --git a/experiment_impact_tracker/emissions/constants.py b/experiment_impact_tracker/emissions/constants.py
index f0d41f8f687e1d0445da106e2be4867a4a265408..2aaa7bb8672cbf5dc80d4d4fd54b3237d0539a9f 100644
--- a/experiment_impact_tracker/emissions/constants.py
+++ b/experiment_impact_tracker/emissions/constants.py
@@ -1,9 +1,8 @@
 import os
 
 import numpy as np
-from progiter import ProgIter
-
 import ujson as json
+from progiter import ProgIter
 from shapely.geometry import shape
 
 
@@ -17,7 +16,7 @@ def read_terrible_json(path):
     Returns:
         [dict]: list of dictionaries
     """
-    with open(path, 'rt') as f:
+    with open(path, "rt") as f:
         lines = []
         test_read_lines = [x for x in f.readlines()]
         for x in ProgIter(test_read_lines):
@@ -35,10 +34,11 @@ def _load_zone_info():
         dict : the loaded json file
     """
     dir_path = os.path.dirname(os.path.realpath(__file__))
-    with open(os.path.join(dir_path, 'data/co2eq_parameters.json'), 'rt') as f:
+    with open(os.path.join(dir_path, "data/co2eq_parameters.json"), "rt") as f:
         x = json.load(f)
     return x
 
+
 def _load_zone_names():
     """Loads zone name info from the json file in the package.
     
@@ -46,7 +46,7 @@ def _load_zone_names():
         dict : the loaded json file
     """
     dir_path = os.path.dirname(os.path.realpath(__file__))
-    with open(os.path.join(dir_path, 'data/zone_names.json'), 'rt') as f:
+    with open(os.path.join(dir_path, "data/zone_names.json"), "rt") as f:
         x = json.load(f)
     return x
 
@@ -57,13 +57,14 @@ def load_regions_with_bounding_boxes():
     Returns:
         list: list of shapely objects containing regional geometries
     """
-    print("loading region bounding boxes for computing carbon emissions region, this may take a moment...")
+    print(
+        "loading region bounding boxes for computing carbon emissions region, this may take a moment..."
+    )
 
     dir_path = os.path.dirname(os.path.realpath(__file__))
     all_geoms = []
     # with open('data/zone_geometries.json') as f:
-    all_geoms = read_terrible_json(os.path.join(
-        dir_path, 'data/zonegeometries.json'))
+    all_geoms = read_terrible_json(os.path.join(dir_path, "data/zonegeometries.json"))
 
     for i, geom in enumerate(all_geoms):
         all_geoms[i]["geometry"] = shape(geom["geometry"])
diff --git a/experiment_impact_tracker/emissions/get_region_metrics.py b/experiment_impact_tracker/emissions/get_region_metrics.py
index 4cb936464cc77972094ba8bf74127d7c90ba1d8d..573a6a900cfb64ceb345cda1579c06bffb2a34db 100644
--- a/experiment_impact_tracker/emissions/get_region_metrics.py
+++ b/experiment_impact_tracker/emissions/get_region_metrics.py
@@ -1,14 +1,18 @@
-from .constants import REGIONS_WITH_BOUNDING_BOXES, ZONE_INFO, ZONE_NAMES
-from shapely.geometry import Point
-from functools import lru_cache
 import time
+from functools import lru_cache
+
+from shapely.geometry import Point
+
+from .constants import REGIONS_WITH_BOUNDING_BOXES, ZONE_INFO, ZONE_NAMES
+
 
 def get_zone_information_by_coords(coords):
     region = get_region_by_coords(coords)
     return region, ZONE_INFO[region["id"]]
 
+
 def get_region_by_coords(coords):
-    #TODO: automatically narrow down possibilities
+    # TODO: automatically narrow down possibilities
     lat, lon = coords
     point = Point(lon, lat)
     zone_possibilities = []
@@ -17,31 +21,38 @@ def get_region_by_coords(coords):
             if zone["geometry"].contains(point):
                 zone_possibilities.append(zone)
         except:
-            import pdb; pdb.set_trace()    
+            import pdb
+
+            pdb.set_trace()
     if len(zone_possibilities) == 0:
         raise ValueError("No possibilities found, may need to add a zone.")
-        
+
     z = min(zone_possibilities, key=lambda x: x["geometry"].area)
     return z
 
+
 def get_current_location():
     import geocoder
-    g = geocoder.ip('me')
+
+    g = geocoder.ip("me")
     return g.y, g.x
 
+
 @lru_cache(maxsize=32)
 def get_current_region_info(*args, **kwargs):
     return get_zone_information_by_coords(get_current_location())
 
+
 def get_zone_name_by_id(zone_id):
     zone = ZONE_NAMES["zoneShortName"][zone_id]
-    name = zone['zoneName']
-    if 'countryName' in zone:
-        name += ", {}".format(zone['countryName'])
+    name = zone["zoneName"]
+    if "countryName" in zone:
+        name += ", {}".format(zone["countryName"])
     return name
 
+
 def get_sorted_region_infos():
-    zone_infos = [(key, value['carbonIntensity']) for key, value in ZONE_INFO.items()]
+    zone_infos = [(key, value["carbonIntensity"]) for key, value in ZONE_INFO.items()]
     return sorted(zone_infos, key=lambda x: x[1])
 
 
@@ -49,6 +60,6 @@ def get_ttl_hash(seconds=3600):
     """Return the same value withing `seconds` time period"""
     return round(time.time() / seconds)
 
+
 def get_current_region_info_cached():
-    return get_current_region_info(ttl_hash=get_ttl_hash(seconds=60*60))
-    
\ No newline at end of file
+    return get_current_region_info(ttl_hash=get_ttl_hash(seconds=60 * 60))
diff --git a/experiment_impact_tracker/emissions/us_ca_parser.py b/experiment_impact_tracker/emissions/us_ca_parser.py
index 989d9b3ca8949883c902ad44e28e41424c29201f..9e8c079f012e1f9865ad516512d18f8f29705f8c 100644
--- a/experiment_impact_tracker/emissions/us_ca_parser.py
+++ b/experiment_impact_tracker/emissions/us_ca_parser.py
@@ -1,28 +1,33 @@
 #!/usr/bin/env python3
 
+import time
+from collections import defaultdict
+from functools import lru_cache
+
 import arrow
 import pandas
 import requests
 from bs4 import BeautifulSoup
-from collections import defaultdict
-from functools import lru_cache
-import time
 
-FUEL_SOURCE_CSV = 'http://www.caiso.com/outlook/SP/History/{}/fuelsource.csv'
+FUEL_SOURCE_CSV = "http://www.caiso.com/outlook/SP/History/{}/fuelsource.csv"
+
+CARBON_INTENSITY_CSV = "http://www.caiso.com/outlook/SP/History/{}/co2.csv"
 
-CARBON_INTENSITY_CSV = 'http://www.caiso.com/outlook/SP/History/{}/co2.csv'
 
 def get_realtime_carbon_source():
     return CARBON_INTENSITY_CSV.format("<date>")
 
+
 def get_ttl_hash(seconds=3600):
     """Return the same value withing `seconds` time period"""
     return round(time.time() / seconds)
 
+
 def fetch_supply(*args, **kwargs):
     # Only query every 5 minutes since that's when the values are updated anyways
     # See example https://stackoverflow.com/questions/31771286/python-in-memory-cache-with-time-to-live
-    return _fetch_supply(**kwargs, ttl_hash=get_ttl_hash(seconds=5*60))
+    return _fetch_supply(**kwargs, ttl_hash=get_ttl_hash(seconds=5 * 60))
+
 
 @lru_cache(maxsize=32)
 def _fetch_supply(target_datetime=None, latest_only=True, ttl_hash=None, **kwargs):
@@ -53,49 +58,50 @@ def _fetch_supply(target_datetime=None, latest_only=True, ttl_hash=None, **kwarg
       'source': 'mysource.com'
     }
     """
-    del ttl_hash # make sure this isn't actually used, also stop pylint errors
+    del ttl_hash  # make sure this isn't actually used, also stop pylint errors
     # target_datetime = arrow.get(target_datetime)
-    target_date = arrow.get(target_datetime).to('US/Pacific').replace(
-        hour=0, minute=0, second=0, microsecond=0)
-    
-    formatted = target_date.format('YYYYMMDD')
-    zone_key='US-CA'
+    target_date = (
+        arrow.get(target_datetime)
+        .to("US/Pacific")
+        .replace(hour=0, minute=0, second=0, microsecond=0)
+    )
+
+    formatted = target_date.format("YYYYMMDD")
+    zone_key = "US-CA"
     # Get the supply from the CSV
     fuel_source_csv = pandas.read_csv(FUEL_SOURCE_CSV.format(formatted))
     carbon_intensity_csv = pandas.read_csv(CARBON_INTENSITY_CSV.format(formatted))
 
-
     # there may be a timing issue where the carbon intensity csv will have one added time than the other, in this case truncate the carbon_intensity_csv
     if len(carbon_intensity_csv) > len(fuel_source_csv):
-        carbon_intensity_csv = carbon_intensity_csv[:len(fuel_source_csv)]
+        carbon_intensity_csv = carbon_intensity_csv[: len(fuel_source_csv)]
     latest_index = len(carbon_intensity_csv) - 1
 
     supply_map = {
-        'Solar': 'solar',
-        'Wind': 'wind',
-        'Geothermal': 'geothermal',
-        'Biomass': 'biomass',
-        'Biogas': 'biogas',
-        'Small hydro': 'hydro',
-        'Coal': 'coal',
-        'Nuclear': 'nuclear',
-        'Natural gas': 'gas',
-        'Large hydro': 'hydro',
-        'Imports' : 'imports',
-        'Batteries' : 'battery',
-        'Other': 'unknown'
+        "Solar": "solar",
+        "Wind": "wind",
+        "Geothermal": "geothermal",
+        "Biomass": "biomass",
+        "Biogas": "biogas",
+        "Small hydro": "hydro",
+        "Coal": "coal",
+        "Nuclear": "nuclear",
+        "Natural gas": "gas",
+        "Large hydro": "hydro",
+        "Imports": "imports",
+        "Batteries": "battery",
+        "Other": "unknown",
     }
 
     co2_map = {
-        'Biogas CO2' : 'biogas',
-        'Biomass CO2' : 'biomass',
-        'Natural Gas CO2' : 'gas',
-        'Coal CO2' : 'coal',
-        'Imports CO2' : 'imports',
-        'Geothermal CO2' : 'geothermal'
+        "Biogas CO2": "biogas",
+        "Biomass CO2": "biomass",
+        "Natural Gas CO2": "gas",
+        "Coal CO2": "coal",
+        "Imports CO2": "imports",
+        "Geothermal CO2": "geothermal",
     }
 
-
     daily_data = []
     if latest_only:
         start_index = latest_index
@@ -103,15 +109,18 @@ def _fetch_supply(target_datetime=None, latest_only=True, ttl_hash=None, **kwarg
         start_index = 0
 
     for i in range(start_index, latest_index + 1):
-        h, m = map(int, fuel_source_csv['Time'][i].split(':'))
-        date = arrow.utcnow().to('US/Pacific').replace(hour=h, minute=m,
-                                                       second=0, microsecond=0)
+        h, m = map(int, fuel_source_csv["Time"][i].split(":"))
+        date = (
+            arrow.utcnow()
+            .to("US/Pacific")
+            .replace(hour=h, minute=m, second=0, microsecond=0)
+        )
         data = {
-            'zoneKey': zone_key,
-            'supply': defaultdict(float),
-            'carbon_intensity' :  defaultdict(float),
-            'source': 'caiso.com',
-            'datetime': date.datetime
+            "zoneKey": zone_key,
+            "supply": defaultdict(float),
+            "carbon_intensity": defaultdict(float),
+            "source": "caiso.com",
+            "datetime": date.datetime,
         }
 
         # map items from names in CAISO CSV to names used in Electricity Map
@@ -122,7 +131,7 @@ def _fetch_supply(target_datetime=None, latest_only=True, ttl_hash=None, **kwarg
             supply = float(fuel_source_csv[ca_gen_type][i]) * 1000.0
 
             # if another mean of supply created a value, sum them up
-            data['supply'][mapped_gen_type] += supply
+            data["supply"][mapped_gen_type] += supply
             total_supply += supply
 
         summed_carbon = 0.0
@@ -131,13 +140,11 @@ def _fetch_supply(target_datetime=None, latest_only=True, ttl_hash=None, **kwarg
             summed_carbon += intensity
 
         summed_carbon_grams = 1000000 * summed_carbon
-        # While CAISO says that carbon intensity is divided by demans, 
-        # we can calculate carbon intensity from carbon divided by supply since this is 
+        # While CAISO says that carbon intensity is divided by demans,
+        # we can calculate carbon intensity from carbon divided by supply since this is
         # what is being produced
-        data['carbon_intensity'] = summed_carbon_grams / total_supply
+        data["carbon_intensity"] = summed_carbon_grams / total_supply
 
         daily_data.append(data)
 
     return daily_data
-
-
diff --git a/experiment_impact_tracker/gpu/exceptions.py b/experiment_impact_tracker/gpu/exceptions.py
index 79b298ecded386b9a85abedcf8b5d0cfa371d049..c8b28a4097518ff216ab60238e3b1199908c1769 100644
--- a/experiment_impact_tracker/gpu/exceptions.py
+++ b/experiment_impact_tracker/gpu/exceptions.py
@@ -1,2 +1,2 @@
 class GPUAttributeAssertionError(Exception):
-    pass
\ No newline at end of file
+    pass
diff --git a/experiment_impact_tracker/gpu/nvidia.py b/experiment_impact_tracker/gpu/nvidia.py
index b93f20b4b9b24707ef80489294063e3b95878e5a..42339de130cadf53fa7527c75b8032d3c20eaa62 100644
--- a/experiment_impact_tracker/gpu/nvidia.py
+++ b/experiment_impact_tracker/gpu/nvidia.py
@@ -2,22 +2,22 @@ import atexit
 import subprocess
 import time
 from collections import OrderedDict
+from io import StringIO
 from subprocess import PIPE, Popen
 from xml.etree.ElementTree import fromstring
 
-from io import StringIO 
+import cpuinfo
 import numpy as np
 import pandas as pd
+import psutil
 import requests
 from bs4 import BeautifulSoup
 
-import cpuinfo
-import psutil
 from experiment_impact_tracker.utils import *
 
 from .exceptions import GPUAttributeAssertionError
 
-_timer = getattr(time, 'monotonic', time.time)
+_timer = getattr(time, "monotonic", time.time)
 
 
 def is_nvidia_compatible(*args, **kwargs):
@@ -27,9 +27,9 @@ def is_nvidia_compatible(*args, **kwargs):
         return False
 
     # make sure that nvidia-smi doesn't just return no devices
-    p = Popen(['nvidia-smi'], stdout=PIPE)
+    p = Popen(["nvidia-smi"], stdout=PIPE)
     stdout, stderror = p.communicate()
-    output = stdout.decode('UTF-8')
+    output = stdout.decode("UTF-8")
     if "no devices" in output.lower():
         return False
 
@@ -37,23 +37,23 @@ def is_nvidia_compatible(*args, **kwargs):
 
 
 def get_gpu_info(*args, **kwargs):
-    p = Popen(['nvidia-smi', '-q', '-x'], stdout=PIPE)
+    p = Popen(["nvidia-smi", "-q", "-x"], stdout=PIPE)
     outs, errors = p.communicate()
     xml = fromstring(outs)
     datas = []
-    driver_version = xml.findall('driver_version')[0].text
-    cuda_version = xml.findall('cuda_version')[0].text
+    driver_version = xml.findall("driver_version")[0].text
+    cuda_version = xml.findall("cuda_version")[0].text
 
-    for gpu_id, gpu in enumerate(xml.getiterator('gpu')):
+    for gpu_id, gpu in enumerate(xml.getiterator("gpu")):
         gpu_data = {}
-        name = [x for x in gpu.getiterator('product_name')][0].text
-        memory_usage = gpu.findall('fb_memory_usage')[0]
-        total_memory = memory_usage.findall('total')[0].text
-
-        gpu_data['name'] = name
-        gpu_data['total_memory'] = total_memory
-        gpu_data['driver_version'] = driver_version
-        gpu_data['cuda_version'] = cuda_version
+        name = [x for x in gpu.getiterator("product_name")][0].text
+        memory_usage = gpu.findall("fb_memory_usage")[0]
+        total_memory = memory_usage.findall("total")[0].text
+
+        gpu_data["name"] = name
+        gpu_data["total_memory"] = total_memory
+        gpu_data["driver_version"] = driver_version
+        gpu_data["cuda_version"] = cuda_version
         datas.append(gpu_data)
     return datas
 
@@ -77,11 +77,17 @@ def assert_gpus_by_attributes(attributes_set):
         for attribute, value in attributes_set.items():
             try:
                 if gpu[attribute] != value:
-                    raise GPUAttributeAssertionError("Attribute {} asserted to be {}, but found {} instead.".format(
-                        attribute, value, gpu[attribute]))
+                    raise GPUAttributeAssertionError(
+                        "Attribute {} asserted to be {}, but found {} instead.".format(
+                            attribute, value, gpu[attribute]
+                        )
+                    )
             except KeyError:
-                raise GPUAttributeAssertionError("Attribute {} does not exist. Available attributes: {}.".format(
-                    attribute, ",".join(list(gpu.keys()))))
+                raise GPUAttributeAssertionError(
+                    "Attribute {} does not exist. Available attributes: {}.".format(
+                        attribute, ",".join(list(gpu.keys()))
+                    )
+                )
 
 
 def _stringify_performance_states(state_dict):
@@ -98,36 +104,40 @@ def _stringify_performance_states(state_dict):
 
 def get_nvidia_gpu_power(pid_list, logger=None, **kwargs):
     # Find per process per gpu usage info
-    sp = subprocess.Popen(['nvidia-smi', 'pmon', '-c', '5'],
-                          stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    sp = subprocess.Popen(
+        ["nvidia-smi", "pmon", "-c", "5"],
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+    )
     out_str = sp.communicate()
-    out_str_split = out_str[0].decode('utf-8').split('\n')
+    out_str_split = out_str[0].decode("utf-8").split("\n")
     # sometimes with too many processess on the machine or too many gpus, this command will reprint the headers
     # to avoid that we just remove duplicate lines
     out_str_split = list(OrderedDict.fromkeys(out_str_split))
-    out_str_pruned = [x for x in out_str_split if 'Idx' not in x] # [out_str_split[0], ] + out_str_split[2:]
-    
-    # For some weird reason the header position sometimes gets jumbled so we need to re-order it to the front 
+    out_str_pruned = [
+        x for x in out_str_split if "Idx" not in x
+    ]  # [out_str_split[0], ] + out_str_split[2:]
+
+    # For some weird reason the header position sometimes gets jumbled so we need to re-order it to the front
     position = -1
 
     for i, x in enumerate(out_str_pruned):
-        if 'gpu' in x:
+        if "gpu" in x:
             position = i
     if position == -1:
-        raise ValueError('Problem with output in nvidia-smi pmon -c 10')
+        raise ValueError("Problem with output in nvidia-smi pmon -c 10")
     out_str_pruned.insert(0, out_str_pruned.pop(position))
     out_str_final = "\n".join(out_str_pruned)
     out_str_final = out_str_final.replace("-", "0")
     out_str_final = out_str_final.replace("#", "")
 
-    df = pd.read_csv(StringIO(out_str_final), engine='python', delim_whitespace=True)
-    process_percentage_used_gpu = df.groupby(
-        ['gpu', 'pid']).mean().reset_index()
+    df = pd.read_csv(StringIO(out_str_final), engine="python", delim_whitespace=True)
+    process_percentage_used_gpu = df.groupby(["gpu", "pid"]).mean().reset_index()
 
-    p = Popen(['nvidia-smi', '-q', '-x'], stdout=PIPE)
+    p = Popen(["nvidia-smi", "-q", "-x"], stdout=PIPE)
     outs, errors = p.communicate()
     xml = fromstring(outs)
-    num_gpus = int(xml.findall('attached_gpus')[0].text)
+    num_gpus = int(xml.findall("attached_gpus")[0].text)
     results = []
     power = 0
     per_gpu_absolute_percent_usage = {}
@@ -135,70 +145,70 @@ def get_nvidia_gpu_power(pid_list, logger=None, **kwargs):
     absolute_power = 0
     per_gpu_performance_states = {}
 
-    for gpu_id, gpu in enumerate(xml.findall('gpu')):
+    for gpu_id, gpu in enumerate(xml.findall("gpu")):
         gpu_data = {}
 
-        name = gpu.findall('product_name')[0].text
-        gpu_data['name'] = name
+        name = gpu.findall("product_name")[0].text
+        gpu_data["name"] = name
 
         # get memory
-        memory_usage = gpu.findall('fb_memory_usage')[0]
-        total_memory = memory_usage.findall('total')[0].text
-        used_memory = memory_usage.findall('used')[0].text
-        free_memory = memory_usage.findall('free')[0].text
-        gpu_data['memory'] = {
-            'total': total_memory,
-            'used_memory': used_memory,
-            'free_memory': free_memory
+        memory_usage = gpu.findall("fb_memory_usage")[0]
+        total_memory = memory_usage.findall("total")[0].text
+        used_memory = memory_usage.findall("used")[0].text
+        free_memory = memory_usage.findall("free")[0].text
+        gpu_data["memory"] = {
+            "total": total_memory,
+            "used_memory": used_memory,
+            "free_memory": free_memory,
         }
 
         # get utilization
-        utilization = gpu.findall('utilization')[0]
-        gpu_util = utilization.findall('gpu_util')[0].text
-        memory_util = utilization.findall('memory_util')[0].text
-        gpu_data['utilization'] = {
-            'gpu_util': gpu_util,
-            'memory_util': memory_util
-        }
+        utilization = gpu.findall("utilization")[0]
+        gpu_util = utilization.findall("gpu_util")[0].text
+        memory_util = utilization.findall("memory_util")[0].text
+        gpu_data["utilization"] = {"gpu_util": gpu_util, "memory_util": memory_util}
 
         # get power
-        power_readings = gpu.findall('power_readings')[0]
-        power_draw = power_readings.findall('power_draw')[0].text
+        power_readings = gpu.findall("power_readings")[0]
+        power_draw = power_readings.findall("power_draw")[0].text
 
-        gpu_data['power_readings'] = {
-            'power_draw': power_draw
-        }
+        gpu_data["power_readings"] = {"power_draw": power_draw}
         absolute_power += float(power_draw.replace("W", ""))
 
         # processes
-        processes = gpu.findall('processes')[0]
+        processes = gpu.findall("processes")[0]
 
         infos = []
         # all the info for processes on this particular gpu that we're on
-        gpu_based_processes = process_percentage_used_gpu[process_percentage_used_gpu['gpu'] == gpu_id]
+        gpu_based_processes = process_percentage_used_gpu[
+            process_percentage_used_gpu["gpu"] == gpu_id
+        ]
         # what's the total absolute SM for this gpu across all accessible processes
-        percentage_of_gpu_used_by_all_processes = float(
-            gpu_based_processes['sm'].sum())
+        percentage_of_gpu_used_by_all_processes = float(gpu_based_processes["sm"].sum())
         per_gpu_power_draw = {}
-        for info in processes.findall('process_info'):
-            pid = info.findall('pid')[0].text
-            process_name = info.findall('process_name')[0].text
-            used_memory = info.findall('used_memory')[0].text
-            sm_absolute_percent = gpu_based_processes[gpu_based_processes['pid'] == int(
-                pid)]['sm'].sum()
+        for info in processes.findall("process_info"):
+            pid = info.findall("pid")[0].text
+            process_name = info.findall("process_name")[0].text
+            used_memory = info.findall("used_memory")[0].text
+            sm_absolute_percent = gpu_based_processes[
+                gpu_based_processes["pid"] == int(pid)
+            ]["sm"].sum()
             if percentage_of_gpu_used_by_all_processes == 0:
                 # avoid divide by zero, sometimes nothing is used so 0/0 should = 0 in this case
                 sm_relative_percent = 0
             else:
-                sm_relative_percent = sm_absolute_percent / \
-                    percentage_of_gpu_used_by_all_processes
-            infos.append({
-                'pid': pid,
-                'process_name': process_name,
-                'used_memory': used_memory,
-                'sm_relative_percent': sm_relative_percent,
-                'sm_absolute_percent': sm_absolute_percent
-            })
+                sm_relative_percent = (
+                    sm_absolute_percent / percentage_of_gpu_used_by_all_processes
+                )
+            infos.append(
+                {
+                    "pid": pid,
+                    "process_name": process_name,
+                    "used_memory": used_memory,
+                    "sm_relative_percent": sm_relative_percent,
+                    "sm_absolute_percent": sm_absolute_percent,
+                }
+            )
 
             if int(pid) in pid_list:
                 # only add a gpu to the list if it's being used by one of the processes. sometimes nvidia-smi seems to list all gpus available
@@ -213,19 +223,16 @@ def get_nvidia_gpu_power(pid_list, logger=None, **kwargs):
                 if gpu_id not in per_gpu_performance_states:
                     # we only log information for gpus that we're using, we've noticed that nvidia-smi will sometimes return information
                     # about all gpu's on a slurm cluster even if they're not assigned to a worker
-                    performance_state = gpu.findall(
-                        'performance_state')[0].text
+                    performance_state = gpu.findall("performance_state")[0].text
                     per_gpu_performance_states[gpu_id] = performance_state
 
-                power += sm_relative_percent * \
-                    float(power_draw.replace("W", ""))
+                power += sm_relative_percent * float(power_draw.replace("W", ""))
                 per_gpu_power_draw[gpu_id] = float(power_draw.replace("W", ""))
                 # want a proportion value rather than percentage
-                per_gpu_absolute_percent_usage[gpu_id] += (
-                    sm_absolute_percent / 100.0)
+                per_gpu_absolute_percent_usage[gpu_id] += sm_absolute_percent / 100.0
                 per_gpu_relative_percent_usage[gpu_id] += sm_relative_percent
 
-        gpu_data['processes'] = infos
+        gpu_data["processes"] = infos
 
         results.append(gpu_data)
 
@@ -233,19 +240,23 @@ def get_nvidia_gpu_power(pid_list, logger=None, **kwargs):
         average_gpu_utilization = 0
         average_gpu_relative_utilization = 0
     else:
-        average_gpu_utilization = np.mean(
-            list(per_gpu_absolute_percent_usage.values()))
+        average_gpu_utilization = np.mean(list(per_gpu_absolute_percent_usage.values()))
         average_gpu_relative_utilization = np.mean(
-            list(per_gpu_relative_percent_usage.values()))
+            list(per_gpu_relative_percent_usage.values())
+        )
 
     data_return_values_with_headers = {
         "nvidia_draw_absolute": absolute_power,
         "nvidia_estimated_attributable_power_draw": power,
         "average_gpu_estimated_utilization_absolute": average_gpu_utilization,
-        "per_gpu_average_estimated_utilization_absolute": process_percentage_used_gpu.set_index(['gpu', 'pid']).to_dict(orient='index'),
+        "per_gpu_average_estimated_utilization_absolute": process_percentage_used_gpu.set_index(
+            ["gpu", "pid"]
+        ).to_dict(
+            orient="index"
+        ),
         "average_gpu_estimated_utilization_relative": average_gpu_relative_utilization,
         "per_gpu_performance_state": per_gpu_performance_states,
-        "per_gpu_power_draw" : per_gpu_power_draw
+        "per_gpu_power_draw": per_gpu_power_draw,
     }
 
     return data_return_values_with_headers
diff --git a/experiment_impact_tracker/operating_system/common.py b/experiment_impact_tracker/operating_system/common.py
index 008711d942220ad5354c23a46054aad105fbc1cd..46e8244b3a99b5d14f1a8501e599b8a0c0ef851e 100644
--- a/experiment_impact_tracker/operating_system/common.py
+++ b/experiment_impact_tracker/operating_system/common.py
@@ -2,4 +2,4 @@ from sys import platform
 
 
 def is_linux(*args, **kwargs):
-    return (platform == "linux" or platform == "linux2")
\ No newline at end of file
+    return platform == "linux" or platform == "linux2"
diff --git a/experiment_impact_tracker/py_environment/common.py b/experiment_impact_tracker/py_environment/common.py
index f42e7e3cbc947b9dd3081cdfd97546d644cc57ef..e50d6d4f2cde7d4cf9b0708e55bc0641880987f9 100644
--- a/experiment_impact_tracker/py_environment/common.py
+++ b/experiment_impact_tracker/py_environment/common.py
@@ -1,8 +1,10 @@
-import pkg_resources
 import copyreg
 import zipimport
 
-copyreg.pickle(zipimport.zipimporter, lambda x: (x.__class__, (x.archive, )))
+import pkg_resources
+
+copyreg.pickle(zipimport.zipimporter, lambda x: (x.__class__, (x.archive,)))
+
 
 def get_python_packages_and_versions(*args, **kwargs):
-    return list(pkg_resources.working_set)
\ No newline at end of file
+    return list(pkg_resources.working_set)
diff --git a/experiment_impact_tracker/stats.py b/experiment_impact_tracker/stats.py
index 42aaf8e90cb365a834adf41cd434f1b41cc8680f..b8e5ad332bab4656e76e1ee587a1e7d85ed5de90 100644
--- a/experiment_impact_tracker/stats.py
+++ b/experiment_impact_tracker/stats.py
@@ -1,10 +1,17 @@
-import numpy as np
-from scipy.stats import ttest_ind, mannwhitneyu, rankdata, median_test
 import bootstrapped.bootstrap as bs
 import bootstrapped.compare_functions as bs_compare
 import bootstrapped.stats_functions as bs_stats
+import numpy as np
+from scipy.stats import mannwhitneyu, median_test, rankdata, ttest_ind
 
-tests_list = ['t-test', "Welch t-test", 'Mann-Whitney', 'Ranked t-test', 'bootstrap', 'permutation']
+tests_list = [
+    "t-test",
+    "Welch t-test",
+    "Mann-Whitney",
+    "Ranked t-test",
+    "bootstrap",
+    "permutation",
+]
 
 
 def run_permutation_test(all_data, n1, n2):
@@ -16,8 +23,7 @@ def run_permutation_test(all_data, n1, n2):
 
 def get_average_treatment_effect(data1, data2):
     delta = np.mean(data1) - np.mean(data2)
-    delta_err = 1.96 * np.sqrt(np.var(data1) / len(data1) + \
-        np.var(data2) / len(data2))
+    delta_err = 1.96 * np.sqrt(np.var(data1) / len(data1) + np.var(data2) / len(data2))
     return delta, delta_err
 
 
@@ -42,13 +48,20 @@ def run_test(test_id, data1, data2, alpha=0.05):
     n1 = data1.size
     n2 = data2.size
 
-    if test_id == 'bootstrap':
+    if test_id == "bootstrap":
         assert alpha < 1 and alpha > 0, "alpha should be between 0 and 1"
-        res = bs.bootstrap_ab(data1, data2, bs_stats.mean, bs_compare.difference, alpha=alpha, num_iterations=1000)
+        res = bs.bootstrap_ab(
+            data1,
+            data2,
+            bs_stats.mean,
+            bs_compare.difference,
+            alpha=alpha,
+            num_iterations=1000,
+        )
         rejection = np.sign(res.upper_bound) == np.sign(res.lower_bound)
         return rejection, res
 
-    elif test_id == 't-test':
+    elif test_id == "t-test":
         _, p = ttest_ind(data1, data2, equal_var=True)
         return p < alpha, p
 
@@ -56,20 +69,20 @@ def run_test(test_id, data1, data2, alpha=0.05):
         _, p = ttest_ind(data1, data2, equal_var=False)
         return p < alpha, p
 
-    elif test_id == 'Mann-Whitney':
-        _, p = mannwhitneyu(data1, data2, alternative='two-sided')
+    elif test_id == "Mann-Whitney":
+        _, p = mannwhitneyu(data1, data2, alternative="two-sided")
         return p < alpha, p
 
-    elif test_id == 'Ranked t-test':
+    elif test_id == "Ranked t-test":
         all_data = np.concatenate([data1.copy(), data2.copy()], axis=0)
         ranks = rankdata(all_data)
-        ranks1 = ranks[: n1]
-        ranks2 = ranks[n1:n1 + n2]
+        ranks1 = ranks[:n1]
+        ranks2 = ranks[n1 : n1 + n2]
         assert ranks2.size == n2
         _, p = ttest_ind(ranks1, ranks2, equal_var=True)
         return p < alpha, p
 
-    elif test_id == 'permutation':
+    elif test_id == "permutation":
         all_data = np.concatenate([data1.copy(), data2.copy()], axis=0)
         delta = np.abs(data1.mean() - data2.mean())
         num_samples = 1000
@@ -78,8 +91,10 @@ def run_test(test_id, data1, data2, alpha=0.05):
             estimates.append(run_permutation_test(all_data.copy(), n1, n2))
         estimates = np.abs(np.array(estimates))
         diff_count = len(np.where(estimates <= delta)[0])
-        return (1.0 - (float(diff_count) / float(num_samples))) < alpha, (1.0 - (float(diff_count) / float(num_samples)))
+        return (
+            (1.0 - (float(diff_count) / float(num_samples))) < alpha,
+            (1.0 - (float(diff_count) / float(num_samples))),
+        )
 
     else:
         raise NotImplementedError
-
diff --git a/experiment_impact_tracker/utils.py b/experiment_impact_tracker/utils.py
index 8ae816ddfc22481da4a333acdd340b34e4de56e8..29a4b646eb3dc4cfbeed3a84cae554ca2fe0d2b1 100644
--- a/experiment_impact_tracker/utils.py
+++ b/experiment_impact_tracker/utils.py
@@ -7,23 +7,27 @@ import traceback
 from datetime import datetime
 from functools import wraps
 from multiprocessing import Process, Queue
-import ujson
+
 import numpy as np
 import pandas as pd
-
 import psutil
-from experiment_impact_tracker.emissions.constants import PUE
-from experiment_impact_tracker.data_utils import load_data_into_frame
+import ujson
+
 from experiment_impact_tracker.data_utils import *
+from experiment_impact_tracker.data_utils import load_data_into_frame
+from experiment_impact_tracker.emissions.constants import PUE
+
+_timer = getattr(time, "monotonic", time.time)
+
 
-_timer = getattr(time, 'monotonic', time.time)
 def get_timestamp(*args, **kwargs):
     now = datetime.now()
     timestamp = datetime.timestamp(now)
     return timestamp
 
+
 def get_flop_count_tensorflow(graph=None, session=None):
-    import tensorflow as tf # import within function so as not to require tf for package
+    import tensorflow as tf  # import within function so as not to require tf for package
     from tensorflow.python.framework import graph_util
 
     def load_pb(pb):
@@ -31,7 +35,7 @@ def get_flop_count_tensorflow(graph=None, session=None):
             graph_def = tf.GraphDef()
             graph_def.ParseFromString(f.read())
         with tf.Graph().as_default() as graph:
-            tf.import_graph_def(graph_def, name='')
+            tf.import_graph_def(graph_def, name="")
             return graph
 
     if graph is None and session is None:
@@ -43,26 +47,25 @@ def get_flop_count_tensorflow(graph=None, session=None):
     opts = tf.profiler.ProfileOptionBuilder.float_operation()
 
     # We use the Keras session graph in the call to the profiler.
-    flops = tf.profiler.profile(graph=graph,
-                                run_meta=run_meta, cmd='op', options=opts)
+    flops = tf.profiler.profile(graph=graph, run_meta=run_meta, cmd="op", options=opts)
 
     return flops.total_float_ops  # Prints the "flops" of the model.
 
 
 def processify(func):
-    '''Decorator to run a function as a process.
+    """Decorator to run a function as a process.
     Be sure that every argument and the return value
     is *pickable*.
     The created process is joined, so the code does not
     run in parallel.
-    '''
+    """
 
     def process_func(q, *args, **kwargs):
         try:
             ret = func(q, *args, **kwargs)
         except Exception as e:
             ex_type, ex_value, tb = sys.exc_info()
-            error = ex_type, ex_value, ''.join(traceback.format_tb(tb))
+            error = ex_type, ex_value, "".join(traceback.format_tb(tb))
             ret = None
             q.put((ret, error))
             raise e
@@ -70,20 +73,19 @@ def processify(func):
             error = None
         q.put((ret, error))
 
-
     # register original function with different name
     # in sys.modules so it is pickable
-    process_func.__name__ = func.__name__ + 'processify_func'
+    process_func.__name__ = func.__name__ + "processify_func"
     setattr(sys.modules[__name__], process_func.__name__, process_func)
 
     @wraps(func)
     def wrapper(*args, **kwargs):
-        queue = Queue() # not the same as a Queue.Queue()
+        queue = Queue()  # not the same as a Queue.Queue()
         p = Process(target=process_func, args=[queue] + list(args), kwargs=kwargs)
         p.start()
         return p, queue
-    return wrapper
 
+    return wrapper
 
 
 def _get_cpu_hours_from_per_process_data(json_array):
@@ -94,31 +96,43 @@ def _get_cpu_hours_from_per_process_data(json_array):
             latest_per_pid[pid] = value["user"] + value["system"]
     return sum(latest_per_pid.values())
 
+
 def gather_additional_info(info, logdir):
     df, json_array = load_data_into_frame(logdir)
     cpu_seconds = _get_cpu_hours_from_per_process_data(json_array)
     num_gpus = len(info["gpu_info"])
-    exp_len = datetime.timestamp(info["experiment_end"]) - \
-        datetime.timestamp(info["experiment_start"])
-    exp_len_hours = exp_len/3600.
+    exp_len = datetime.timestamp(info["experiment_end"]) - datetime.timestamp(
+        info["experiment_start"]
+    )
+    exp_len_hours = exp_len / 3600.0
     # integrate power
     # https://electronics.stackexchange.com/questions/237025/converting-watt-values-over-time-to-kwh
     # multiply by carbon intensity to get Kg Carbon eq
     time_differences = df["timestamp"].diff()
-    time_differences[0] = df["timestamp"][0] - \
-        datetime.timestamp(info["experiment_start"])
-    
+    time_differences[0] = df["timestamp"][0] - datetime.timestamp(
+        info["experiment_start"]
+    )
+
     # Add final timestamp and extrapolate last row of power estimates
-    time_differences.loc[len(time_differences)] = datetime.timestamp(info["experiment_end"]) - df["timestamp"][len(df["timestamp"]) - 1]
+    time_differences.loc[len(time_differences)] = (
+        datetime.timestamp(info["experiment_end"])
+        - df["timestamp"][len(df["timestamp"]) - 1]
+    )
 
     # elementwise multiplication and sum
-    time_differences_in_hours = time_differences/3600.
-    power_draw_rapl_kw = df["rapl_estimated_attributable_power_draw"] / 1000.
-    nvidia_power_draw_kw = df["nvidia_estimated_attributable_power_draw"] / 1000.
-    nvidia_power_draw_kw.loc[len(nvidia_power_draw_kw)] = nvidia_power_draw_kw.loc[len(nvidia_power_draw_kw)-1] 
-    power_draw_rapl_kw.loc[len(power_draw_rapl_kw)] = power_draw_rapl_kw.loc[len(power_draw_rapl_kw)-1]
-    gpu_absolute_util = df["average_gpu_estimated_utilization_absolute"] 
-    gpu_absolute_util.loc[len(gpu_absolute_util)] = gpu_absolute_util.loc[len(gpu_absolute_util)-1]
+    time_differences_in_hours = time_differences / 3600.0
+    power_draw_rapl_kw = df["rapl_estimated_attributable_power_draw"] / 1000.0
+    nvidia_power_draw_kw = df["nvidia_estimated_attributable_power_draw"] / 1000.0
+    nvidia_power_draw_kw.loc[len(nvidia_power_draw_kw)] = nvidia_power_draw_kw.loc[
+        len(nvidia_power_draw_kw) - 1
+    ]
+    power_draw_rapl_kw.loc[len(power_draw_rapl_kw)] = power_draw_rapl_kw.loc[
+        len(power_draw_rapl_kw) - 1
+    ]
+    gpu_absolute_util = df["average_gpu_estimated_utilization_absolute"]
+    gpu_absolute_util.loc[len(gpu_absolute_util)] = gpu_absolute_util.loc[
+        len(gpu_absolute_util) - 1
+    ]
     # elementwise multiplication and sum
     kw_hr_nvidia = np.multiply(time_differences_in_hours, nvidia_power_draw_kw)
     kw_hr_rapl = np.multiply(time_differences_in_hours, power_draw_rapl_kw)
@@ -128,36 +142,49 @@ def gather_additional_info(info, logdir):
     realtime_carbon = None
     if "realtime_carbon_intensity" in df:
         realtime_carbon = df["realtime_carbon_intensity"]
-        realtime_carbon.loc[len(realtime_carbon)] = realtime_carbon.loc[len(realtime_carbon)-1]
-        # If we lost some values due to network errors, forward fill the last available value. 
+        realtime_carbon.loc[len(realtime_carbon)] = realtime_carbon.loc[
+            len(realtime_carbon) - 1
+        ]
+        # If we lost some values due to network errors, forward fill the last available value.
         # Backfill in a second pass to get any values that haven't been picked up.
         # Then finally, if any values remain, replace with the region average.
-        realtime_carbon = pd.to_numeric(realtime_carbon, errors='coerce').fillna(method='ffill').fillna(method='bfill').fillna(value=info["region_carbon_intensity_estimate"]["carbonIntensity"])
+        realtime_carbon = (
+            pd.to_numeric(realtime_carbon, errors="coerce")
+            .fillna(method="ffill")
+            .fillna(method="bfill")
+            .fillna(value=info["region_carbon_intensity_estimate"]["carbonIntensity"])
+        )
         try:
-            estimated_carbon_impact_grams_per_timestep = np.multiply(total_power_per_timestep, realtime_carbon)
+            estimated_carbon_impact_grams_per_timestep = np.multiply(
+                total_power_per_timestep, realtime_carbon
+            )
         except:
-            import pdb; pdb.set_trace()
+            import pdb
+
+            pdb.set_trace()
         estimated_carbon_impact_grams = estimated_carbon_impact_grams_per_timestep.sum()
     else:
-        estimated_carbon_impact_grams = total_power * \
-            info["region_carbon_intensity_estimate"]["carbonIntensity"]
-    
+        estimated_carbon_impact_grams = (
+            total_power * info["region_carbon_intensity_estimate"]["carbonIntensity"]
+        )
+
     estimated_carbon_impact_kg = estimated_carbon_impact_grams / 1000.0
     # GPU-hours percent utilization * length of time utilized (assumes absolute utliziation)
-    gpu_hours = np.multiply(
-        time_differences_in_hours, gpu_absolute_util).sum() * num_gpus
+    gpu_hours = (
+        np.multiply(time_differences_in_hours, gpu_absolute_util).sum() * num_gpus
+    )
 
-    cpu_hours = cpu_seconds/3600.
+    cpu_hours = cpu_seconds / 3600.0
 
     data = {
-        "cpu_hours" : cpu_hours, 
-        "gpu_hours" : gpu_hours,
-        "estimated_carbon_impact_kg" : estimated_carbon_impact_kg,
-        "total_power" : total_power,
-        "kw_hr_gpu" : kw_hr_nvidia.sum(),
-        "kw_hr_cpu" : kw_hr_rapl.sum(),
-        "exp_len_hours" : exp_len_hours
-     }
+        "cpu_hours": cpu_hours,
+        "gpu_hours": gpu_hours,
+        "estimated_carbon_impact_kg": estimated_carbon_impact_kg,
+        "total_power": total_power,
+        "kw_hr_gpu": kw_hr_nvidia.sum(),
+        "kw_hr_cpu": kw_hr_rapl.sum(),
+        "exp_len_hours": exp_len_hours,
+    }
 
     if realtime_carbon is not None:
         data["average_realtime_carbon_intensity"] = realtime_carbon.mean()
diff --git a/scripts/create-compute-appendix b/scripts/create-compute-appendix
index bc862be469e1f60a1807e958ed3ce7b7e80af475..97c113dd828a9ec4dcb9d9ab5b0b32a7e38b3c94 100644
--- a/scripts/create-compute-appendix
+++ b/scripts/create-compute-appendix
@@ -20,7 +20,6 @@ from deepdiff import DeepDiff  # For Deep Difference of 2 objects
 from jinja2 import Environment, FileSystemLoader
 
 import experiment_impact_tracker
-from experiment_impact_tracker.emissions.constants import PUE
 from experiment_impact_tracker.create_graph_appendix import (
     create_graphs, create_scatterplot_from_df)
 from experiment_impact_tracker.data_utils import (load_data_into_frame,
@@ -28,6 +27,7 @@ from experiment_impact_tracker.data_utils import (load_data_into_frame,
                                                   zip_data_and_info)
 from experiment_impact_tracker.emissions.common import \
     get_realtime_carbon_source
+from experiment_impact_tracker.emissions.constants import PUE
 from experiment_impact_tracker.emissions.get_region_metrics import \
     get_zone_name_by_id
 from experiment_impact_tracker.stats import (get_average_treatment_effect,
diff --git a/scripts/generate-carbon-impact-statement b/scripts/generate-carbon-impact-statement
index 7bbb1ec91b9dc2f718657621394ba6638c9f6d72..eace8619dc8e1e5608f28817eba600a7548eb658 100644
--- a/scripts/generate-carbon-impact-statement
+++ b/scripts/generate-carbon-impact-statement
@@ -20,7 +20,6 @@ from deepdiff import DeepDiff  # For Deep Difference of 2 objects
 from jinja2 import Environment, FileSystemLoader
 
 import experiment_impact_tracker
-from experiment_impact_tracker.emissions.constants import PUE
 from experiment_impact_tracker.create_graph_appendix import (
     create_graphs, create_scatterplot_from_df)
 from experiment_impact_tracker.data_utils import (load_data_into_frame,
@@ -28,6 +27,7 @@ from experiment_impact_tracker.data_utils import (load_data_into_frame,
                                                   zip_data_and_info)
 from experiment_impact_tracker.emissions.common import \
     get_realtime_carbon_source
+from experiment_impact_tracker.emissions.constants import PUE
 from experiment_impact_tracker.emissions.get_region_metrics import \
     get_zone_name_by_id
 from experiment_impact_tracker.stats import (get_average_treatment_effect,
diff --git a/scripts/get-rough-emissions-estimate b/scripts/get-rough-emissions-estimate
index a264c819bc4b66e35a6442967672722f9204ba7f..5ce4bf92454f61424459a5e37fffa93bf5afc27b 100644
--- a/scripts/get-rough-emissions-estimate
+++ b/scripts/get-rough-emissions-estimate
@@ -1,14 +1,15 @@
 #!/usr/bin/env python3
 
 import argparse
+import os
 import sys
 from pprint import pprint
+
+import geocoder
 import pandas as pd
-import os
 from geopy.geocoders import Nominatim
-import geocoder
-import experiment_impact_tracker
 
+import experiment_impact_tracker
 from experiment_impact_tracker.emissions.get_region_metrics import (
     get_current_region_info, get_sorted_region_infos,
     get_zone_information_by_coords)
diff --git a/setup.py b/setup.py
index 3b6f997cef56abfbac42acfcae88116e7dc739ad..2cea83ebef13a4ee6dca9185f225c7edc3661f41 100644
--- a/setup.py
+++ b/setup.py
@@ -1,79 +1,77 @@
-import sys
 import subprocess
-from setuptools import setup, find_packages
+import sys
+from distutils.util import convert_path
 from distutils.version import LooseVersion
 
-
-from distutils.util import convert_path
+from setuptools import find_packages, setup
 
 main_ns = {}
-ver_path = convert_path('experiment_impact_tracker/version.py')
+ver_path = convert_path("experiment_impact_tracker/version.py")
 with open(ver_path) as ver_file:
     exec(ver_file.read(), main_ns)
 
 if sys.version_info.major != 3:
-    print('This Python is only compatible with Python 3, but you are running '
-          'Python {}. The installation will likely fail.'.format(sys.version_info.major))
+    print(
+        "This Python is only compatible with Python 3, but you are running "
+        "Python {}. The installation will likely fail.".format(sys.version_info.major)
+    )
 
-setup(name='experiment_impact_tracker',
-      packages= find_packages(),
-      include_package_data=True,
-      scripts=['scripts/create-compute-appendix','scripts/get-region-emissions-info', 'scripts/lookup-cloud-region-info', 'scripts/generate-carbon-impact-statement', 'scripts/get-rough-emissions-estimate'],
-      install_requires=[
-          'requests',
-          'bs4',
-          'shapely',
-          'scipy',
-          'joblib',
-          'numpy',
-          'pandas>0.25.0',
-          'matplotlib',
-          'py-cpuinfo',
-          'pylatex',
-          'ujson',
-          'geocoder',
-          'deepdiff',
-          'arrow',
-          'bootstrapped',
-          'jinja2',
-          'geopy',
-          'progiter', # tqdm doesn't play well with multi-threading
-                      # and can lead to deadlocks. progiter is single threaded
-                      # so we used it instead in this package
-          'psutil',
-          'seaborn'
-      ], 
-      extras_require={
-        'tests': [
-            'pytest==3.5.1',
-            'pytest-cov',
-            'pytest-env',
-            'pytest-xdist',
-        ],
-        'docs': [
-            'sphinx',
-            'sphinx-autobuild',
-            'sphinx-rtd-theme',
-            'recommonmark'
-        ]
-      },
-      description='A toolkit for tracking energy, carbon, and compute metrics for machine learning (or any other) experiments.',
-      author='Peter Henderson',
-      url='https://github.com/Breakend/experiment-impact-tracker',
-      keywords=["machine learning", "carbon", "energy", "compute"],
-      license="MIT",
-      version=main_ns['__version__'],
-        classifiers=[
-    'Development Status :: 3 - Alpha',      # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
-    'Intended Audience :: Developers',      # Define that your audience are developers
-    'Topic :: Software Development :: Build Tools',
-    'License :: OSI Approved :: MIT License',   # Again, pick a license
-    'Operating System :: POSIX :: Linux',
-    'Programming Language :: Python :: 3.5',
-    'Programming Language :: Python :: 3.6',
-    'Programming Language :: Python :: 3.7',
-  ]
-      )
+setup(
+    name="experiment_impact_tracker",
+    packages=find_packages(),
+    include_package_data=True,
+    scripts=[
+        "scripts/create-compute-appendix",
+        "scripts/get-region-emissions-info",
+        "scripts/lookup-cloud-region-info",
+        "scripts/generate-carbon-impact-statement",
+        "scripts/get-rough-emissions-estimate",
+    ],
+    install_requires=[
+        "requests",
+        "bs4",
+        "shapely",
+        "scipy",
+        "joblib",
+        "numpy",
+        "pandas>0.25.0",
+        "matplotlib",
+        "py-cpuinfo",
+        "pylatex",
+        "ujson",
+        "geocoder",
+        "deepdiff",
+        "arrow",
+        "bootstrapped",
+        "jinja2",
+        "geopy",
+        "progiter",  # tqdm doesn't play well with multi-threading
+        # and can lead to deadlocks. progiter is single threaded
+        # so we used it instead in this package
+        "psutil",
+        "seaborn",
+    ],
+    extras_require={
+        "tests": ["pytest==3.5.1", "pytest-cov", "pytest-env", "pytest-xdist",],
+        "docs": ["sphinx", "sphinx-autobuild", "sphinx-rtd-theme", "recommonmark"],
+    },
+    description="A toolkit for tracking energy, carbon, and compute metrics for machine learning (or any other) experiments.",
+    author="Peter Henderson",
+    url="https://github.com/Breakend/experiment-impact-tracker",
+    keywords=["machine learning", "carbon", "energy", "compute"],
+    license="MIT",
+    version=main_ns["__version__"],
+    classifiers=[
+        "Development Status :: 3 - Alpha",  # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
+        "Intended Audience :: Developers",  # Define that your audience are developers
+        "Topic :: Software Development :: Build Tools",
+        "License :: OSI Approved :: MIT License",  # Again, pick a license
+        "Operating System :: POSIX :: Linux",
+        "Programming Language :: Python :: 3.5",
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
+    ],
+)
 
 # python setup.py sdist
 # python setup.py bdist_wheel
diff --git a/test/climatetrackertest.py b/test/climatetrackertest.py
index 1486fb1b43201eec8928ffb2acade02151707b55..d18b6e023f9eb755c9dfcfda5a1fcd95feebd7a2 100644
--- a/test/climatetrackertest.py
+++ b/test/climatetrackertest.py
@@ -1,12 +1,13 @@
 # Code in file tensor/two_layer_net_tensor.py
 import torch
+
 from experiment_impact_tracker.compute_tracker import ImpactTracker
 
-tracker = ImpactTracker('./testlogs/')
+tracker = ImpactTracker("./testlogs/")
 
 tracker.launch_impact_monitor()
-#device = torch.device('cpu')
-device = torch.device('cuda') # Uncomment this to run on GPU
+# device = torch.device('cpu')
+device = torch.device("cuda")  # Uncomment this to run on GPU
 
 # N is batch size; D_in is input dimension;
 # H is hidden dimension; D_out is output dimension.
@@ -22,25 +23,25 @@ w2 = torch.randn(H, D_out, device=device)
 
 learning_rate = 1e-6
 for t in range(1000):
-  # Forward pass: compute predicted y
-  h = x.mm(w1)
-  h_relu = h.clamp(min=0)
-  y_pred = h_relu.mm(w2)
-
-  # Compute and print loss; loss is a scalar, and is stored in a PyTorch Tensor
-  # of shape (); we can get its value as a Python number with loss.item().
-  loss = (y_pred - y).pow(2).sum()
-  print(t, loss.item())
-
-  # Backprop to compute gradients of w1 and w2 with respect to loss
-  grad_y_pred = 2.0 * (y_pred - y)
-  grad_w2 = h_relu.t().mm(grad_y_pred)
-  grad_h_relu = grad_y_pred.mm(w2.t())
-  grad_h = grad_h_relu.clone()
-  grad_h[h < 0] = 0
-  grad_w1 = x.t().mm(grad_h)
-
-  # Update weights using gradient descent
-  w1 -= learning_rate * grad_w1
-  w2 -= learning_rate * grad_w2
-  tracker.get_latest_info_and_check_for_errors()
+    # Forward pass: compute predicted y
+    h = x.mm(w1)
+    h_relu = h.clamp(min=0)
+    y_pred = h_relu.mm(w2)
+
+    # Compute and print loss; loss is a scalar, and is stored in a PyTorch Tensor
+    # of shape (); we can get its value as a Python number with loss.item().
+    loss = (y_pred - y).pow(2).sum()
+    print(t, loss.item())
+
+    # Backprop to compute gradients of w1 and w2 with respect to loss
+    grad_y_pred = 2.0 * (y_pred - y)
+    grad_w2 = h_relu.t().mm(grad_y_pred)
+    grad_h_relu = grad_y_pred.mm(w2.t())
+    grad_h = grad_h_relu.clone()
+    grad_h[h < 0] = 0
+    grad_w1 = x.t().mm(grad_h)
+
+    # Update weights using gradient descent
+    w1 -= learning_rate * grad_w1
+    w2 -= learning_rate * grad_w2
+    tracker.get_latest_info_and_check_for_errors()
diff --git a/test/climatetrackertestcpuonly.py b/test/climatetrackertestcpuonly.py
index ed537e5acc796f9ca2f0af6caa6db1fc0dd98f52..fc6421902b011c6e2bf4f10683189c0f28290e74 100644
--- a/test/climatetrackertestcpuonly.py
+++ b/test/climatetrackertestcpuonly.py
@@ -1,12 +1,14 @@
 # Code in file tensor/two_layer_net_tensor.py
-import torch
 import time
+
+import torch
+
 from experiment_impact_tracker.compute_tracker import ImpactTracker
 
-tracker = ImpactTracker('./testlogs/')
+tracker = ImpactTracker("./testlogs/")
 
 tracker.launch_impact_monitor()
-device = torch.device('cpu')
+device = torch.device("cpu")
 
 # N is batch size; D_in is input dimension;
 # H is hidden dimension; D_out is output dimension.
@@ -22,25 +24,25 @@ w2 = torch.randn(H, D_out, device=device)
 
 learning_rate = 1e-6
 for t in range(1000):
-  # Forward pass: compute predicted y
-  h = x.mm(w1)
-  h_relu = h.clamp(min=0)
-  y_pred = h_relu.mm(w2)
-
-  # Compute and print loss; loss is a scalar, and is stored in a PyTorch Tensor
-  # of shape (); we can get its value as a Python number with loss.item().
-  loss = (y_pred - y).pow(2).sum()
-  print(t, loss.item())
-
-  # Backprop to compute gradients of w1 and w2 with respect to loss
-  grad_y_pred = 2.0 * (y_pred - y)
-  grad_w2 = h_relu.t().mm(grad_y_pred)
-  grad_h_relu = grad_y_pred.mm(w2.t())
-  grad_h = grad_h_relu.clone()
-  grad_h[h < 0] = 0
-  grad_w1 = x.t().mm(grad_h)
-
-  # Update weights using gradient descent
-  w1 -= learning_rate * grad_w1
-  w2 -= learning_rate * grad_w2
-  tracker.get_latest_info_and_check_for_errors()
+    # Forward pass: compute predicted y
+    h = x.mm(w1)
+    h_relu = h.clamp(min=0)
+    y_pred = h_relu.mm(w2)
+
+    # Compute and print loss; loss is a scalar, and is stored in a PyTorch Tensor
+    # of shape (); we can get its value as a Python number with loss.item().
+    loss = (y_pred - y).pow(2).sum()
+    print(t, loss.item())
+
+    # Backprop to compute gradients of w1 and w2 with respect to loss
+    grad_y_pred = 2.0 * (y_pred - y)
+    grad_w2 = h_relu.t().mm(grad_y_pred)
+    grad_h_relu = grad_y_pred.mm(w2.t())
+    grad_h = grad_h_relu.clone()
+    grad_h[h < 0] = 0
+    grad_w1 = x.t().mm(grad_h)
+
+    # Update weights using gradient descent
+    w1 -= learning_rate * grad_w1
+    w2 -= learning_rate * grad_w2
+    tracker.get_latest_info_and_check_for_errors()