diff --git a/exercises/surface17_depolarizing_optimized.py b/exercises/surface17_depolarizing_optimized.py
index 5be2ebd09b2922c69b6b91e16ed97cceaa2a4762..ebc0b7b64b8ee264c5c6e8a0a0bc77c1c599f54d 100644
--- a/exercises/surface17_depolarizing_optimized.py
+++ b/exercises/surface17_depolarizing_optimized.py
@@ -148,10 +148,10 @@ def main(relative_samples=10000, resolution=16):
     ax.set_title('Pseudothreshold of Surface-17, depolarizing noise')
     ax.set_xscale('log')
     ax.set_yscale('log')
-    ax.plot(pRange, x_failure_rates, '-x', label='logical X error')
-    ax.plot(pRange, y_failure_rates, '-x', label='logical Y error')
-    ax.plot(pRange, z_failure_rates, '-x', label='logical Z error')
-    ax.plot(pRange, total_failure_rates, '-x', label='any logical error')
+    ax.plot(pRange, logical_error_rates[:,0], '-x', label='logical X error')
+    ax.plot(pRange, logical_error_rates[:,1], '-x', label='logical Y error')
+    ax.plot(pRange, logical_error_rates[:,2], '-x', label='logical Z error')
+    ax.plot(pRange, logical_error_rates.sum(axis=1), '-x', label='any logical error')
     ax.plot(pRange, pRange, 'k:', alpha=0.5)
     ax.legend()
     fig.tight_layout()
diff --git a/exercises/surface17_solution.py b/exercises/surface17_solution.py
index a007792474494ccb718263e3b27658a2bd27df25..fc3c5ef3629474433b166c95fb7847fa358cf8df 100644
--- a/exercises/surface17_solution.py
+++ b/exercises/surface17_solution.py
@@ -20,7 +20,7 @@ stabilizerGenerators = [s1,s2,s3,s4]
 def getCommutator(a,b):
 	# For two multiqubit Pauli X- or Z- operators we want to know
 	# whether they commute (0) or anticommute (1). Because single qubit Paulis
-	# anticommute we just sum up (mod 2) the mutual ones in the bitstings 
+	# anticommute we just sum up (mod 2) the mutual ones in the bitstings
 	# that represent the multiqubit operators in symplectic notation
 	return bin(a & b)[2:].count('1')%2
 
@@ -60,16 +60,16 @@ pRange = np.logspace(-3,0,7)
 fail_log = []
 logical_failure_rate = []
 for p in range(len(pRange)):
-	
+
 	fail_log.append([])
 	# sample a reasonable amount of times given the probability of errors
 	for _ in range(int(1000/pRange[p])):
-		
+
 		# start out with clean state and randomly place Z-errors on data qubits
 		configuration = 0
 		for i in range(9):
 			if np.random.random() < pRange[p]: configuration += 2**i
-		
+
 		# read syndrome and apply correction operators according to look up table
 		syndrome = getSyndrome(configuration)
 		correction = getCorrection(syndrome)
@@ -82,13 +82,13 @@ for p in range(len(pRange)):
 			fail_log[-1].append(0)
 		else:
 			fail_log[-1].append(1)
-		
+
 		#print('configuration', format(configuration,'09b'))
 		#print('syndrome', format(syndrome,'04b'))
 		#print('correction', format(correction,'09b'))
 		#print('result', format(after_correction,'09b'))
 		#print('logical X', getCommutator(after_correction, xL))
-	
+
 	# calculate logical failure rate from samples
 	logical_failure_rate.append(np.sum(fail_log[-1])/len(fail_log[-1]))