Select Git revision
training.py
surface17_depolarizing.py 6.80 KiB
#!/usr/bin/env python3
"""
Solution for exercise 12.3a, Monte Carlo simulation of surface-17
quantum error correction for depolarizing noise.
This script is much more efficient if the numba package is available.
"""
import numpy as np
from random import random
import matplotlib.pyplot as plt
try:
from numba import jit, prange
except ImportError:
prange = range
jit = lambda *args, **kwargs: (lambda x: x)
# logical X and Z in symplectic notation
xL = 0b001001001
zL = 0b000000111
# X stabilizer generators in symplectic notation
sx1 = 0b000000110
sx2 = 0b000011011
sx3 = 0b110110000
sx4 = 0b011000000
# Z stabilizer generators in symplectic notation
sz1 = 0b000001001
sz2 = 0b011011000
sz3 = 0b000110110
sz4 = 0b100100000
xstabilizerGenerators = (sx1, sx2, sx3, sx4)
zstabilizerGenerators = (sz1, sz2, sz3, sz4)
# Look-up table mapping 9 bit integers to the parity of the number of bits:
# Definition: parity[x] = 1 if bitstring(x) has odd number of '1's, otherwise 0.
parity = bytes(bin(x).count('1') % 2 for x in range(2**9))
# Z corrections detected by X stabilizers
zcorrections = (
# binary data show which data qubit should be corrected:
# 987654321 ← data qubit number
0b000000000, # syndrom: -, correction: -
0b000000100, # syndrom: 1, correction: 3
0b000000001, # syndrom: 2, correction: 1 (could also be 4)
0b000000010, # syndrom: 1,2, correction: 2
0b100000000, # syndrom: 3, correction: 9 (could also be 6)
0b100000100, # syndrom: 1,3, correction: 3,9 (could also be 3,6)
0b000010000, # syndrom: 2,3, correction: 5
0b100000010, # syndrom: 1,2,3, correction: 2,9 (could also be 2,6 or 3,5)
0b001000000, # syndrom: 4, correction: 7
0b001000100, # syndrom: 1,4, correction: 3,7
0b001000001, # syndrom: 2,4, correction: 1,7 (could also be 4,7)
0b001000010, # syndrom: 1,2,4, correction: 2,7
0b010000000, # syndrom: 3,4, correction: 8
0b010000100, # syndrom: 1,3,4, correction: 3,8
0b010000001, # syndrom: 2,3,4, correction: 1,8 (could also be 4,8 or 5,7)
0b010000010, # syndrom: 1,2,3,4, correction: 2,8
)
# X corrections detected by Z stabilizers
xcorrections = (
# binary data show which data qubit should be corrected:
# 987654321 ← data qubit number
0b000000000, # syndrom: -, correction: -
0b000000001, # syndrom: 1, correction: 1
0b001000000, # syndrom: 2, correction: 7 (could also be 8)
0b000001000, # syndrom: 1,2, correction: 4
0b000000100, # syndrom: 3, correction: 3 (could also be 2)
0b000000101, # syndrom: 1,3, correction: 1,3 (could also be 1,2)
0b000010000, # syndrom: 2,3, correction: 5
0b000001100, # syndrom: 1,2,3, correction: 3,4 (could also be 2,4 or 1,5)
0b100000000, # syndrom: 4, correction: 9
0b100000001, # syndrom: 1,4, correction: 1,9
0b101000000, # syndrom: 2,4, correction: 7,9 (could also be 8,9)
0b100001000, # syndrom: 1,2,4, correction: 4,9
0b000100000, # syndrom: 3,4, correction: 6
0b000100001, # syndrom: 1,3,4, correction: 1,6
0b001100000, # syndrom: 2,3,4, correction: 6,7 (could also be 6,8 or 5,9)
0b000101000, # syndrom: 1,2,3,4, correction: 4,6
)
@jit(nopython=True, parallel=True, cache=False)
def montecarlo(p, samples):
"""
Simulate surface 17 code with error rate p/3 for X, Y and Z errors on data
qubits for given number of samples. Return number of logical X, Y, and Z
errors.
"""
xfailures = 0
yfailures = 0
zfailures = 0
# sample a reasonable amount of times given the probability of errors
for _ in prange(samples):
# start out with clean state and randomly place X, Z and XZ errors on data qubits
xerrors = 0
zerrors = 0
# Iterate over the 9 qubits
for i in range(9):
r = random()
if r < p/3:
xerrors |= 2**i
elif r < 2*p/3:
zerrors |= 2**i
elif r < p:
xerrors |= 2**i
zerrors |= 2**i
# read syndrome and apply correction operators according to look up table
xsyndrome = 0
zsyndrome = 0
for i in range(len(xstabilizerGenerators)):
# for all stabilizers: check whether the stabilizer commutes or
# anticommutes with the error
xsyndrome |= parity[xstabilizerGenerators[i] & zerrors] * 2**i
zsyndrome |= parity[zstabilizerGenerators[i] & xerrors] * 2**i
zcorrection = zcorrections[xsyndrome]
xcorrection = xcorrections[zsyndrome]
x_after_correction = xerrors ^ xcorrection
z_after_correction = zerrors ^ zcorrection
# check if the resulting configuration has caused a logical Z error
# logical Z should always anticommute with logical X
x_logical_error = parity[x_after_correction & zL]
z_logical_error = parity[z_after_correction & xL]
if x_logical_error and z_logical_error:
yfailures += 1
elif x_logical_error:
xfailures += 1
elif z_logical_error:
zfailures += 1
return xfailures, yfailures, zfailures
def main(relative_samples=1000, resolution=16):
"""
Simulate and plot logical error rates of surface 17 code with depolarizing
noise.
"""
# set range of physical error rates to MC sample
pRange = np.logspace(-3, 0, resolution)
# define number of samples for each physical error rate
sampleRange = np.int_(relative_samples/pRange)
x_failure_rates = np.empty_like(pRange)
y_failure_rates = np.empty_like(pRange)
z_failure_rates = np.empty_like(pRange)
# run the MC simulation
for i in range(resolution):
xfailures, yfailures, zfailures = montecarlo(pRange[i], sampleRange[i])
x_failure_rates[i] = xfailures / sampleRange[i]
y_failure_rates[i] = yfailures / sampleRange[i]
z_failure_rates[i] = zfailures / sampleRange[i]
total_failure_rates = x_failure_rates + y_failure_rates + z_failure_rates
# Plot the results
fig, ax = plt.subplots()
ax.grid()
ax.set_xlabel('$p$')
ax.set_ylabel('$p_L$')
ax.set_title('Pseudothreshold of Surface-17, depolarizing noise')
ax.set_xscale('log')
ax.set_yscale('log')
ax.plot(pRange, x_failure_rates, '-x', label='logical X error')
ax.plot(pRange, y_failure_rates, '-x', label='logical Y error')
ax.plot(pRange, z_failure_rates, '-x', label='logical Z error')
ax.plot(pRange, total_failure_rates, '-x', label='any logical error')
ax.plot(pRange, pRange, 'k:', alpha=0.5)
ax.legend()
fig.tight_layout()
plt.show()
if __name__ == '__main__':
main(1000)