Skip to content
This repository has been archived by the owner on Jun 12, 2023. It is now read-only.

Added new graph decoder and visualization tools for error and decoded graphs #585

Open
wants to merge 36 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
65160aa
Added some new features
Jul 9, 2021
fc95bcf
Update test_codes.py
Jul 10, 2021
85bef04
Create new-graph-decoder-and-visulization-method-e0e2a0792344a3b3.yaml
Jul 10, 2021
55460d1
added requirements
Jul 10, 2021
aa869ea
Merge branch 'master' of https://github.com/Rahps97/qiskit-ignis
Jul 10, 2021
75492f5
Revert "added requirements"
Jul 10, 2021
ac6233e
Revert "Create new-graph-decoder-and-visulization-method-e0e2a0792344…
Jul 10, 2021
cd7a8a4
Revert "Revert "added requirements""
Jul 10, 2021
313efc9
Revert "Revert "Create new-graph-decoder-and-visulization-method-e0e2…
Jul 10, 2021
1e92e98
improved fitters.py
Jul 10, 2021
6843432
Added scikit-learn as required
Jul 10, 2021
7f7ab00
Removed scikit learn from extras
Jul 10, 2021
3f87bc2
Lint corrections
Jul 11, 2021
93b630e
Lint and test update
Jul 11, 2021
772baab
pyqt5 improvement
Jul 11, 2021
779ae35
Replace mayavi with pyvista
Jul 13, 2021
0a0fd21
updated requirement-dev.txt
Jul 13, 2021
64c2ef2
Updated setup.py
Jul 13, 2021
fd216c7
Made visual libraries optional
Jul 13, 2021
3fc67dd
docs test
Jul 13, 2021
49abb8f
Added epsilon in get_logical_prob function
Jul 13, 2021
f8fa0fd
Some final corrections for visualization function
Jul 14, 2021
8327e3e
lint update
Jul 14, 2021
7cea165
Replaced nodes to actual nodes in 2d
Jul 14, 2021
a2f213f
Update releasenotes/notes/new-graph-decoder-and-visulization-method-e…
Jul 14, 2021
21484ea
Made another class for visualization and scikit learn is optional
Jul 20, 2021
392d974
Update constraints.txt
Jul 20, 2021
d1922e3
changing release notes
Jul 22, 2021
d28e86c
lint corrections
Jul 22, 2021
b785721
Update requirements-dev.txt
Jul 22, 2021
e722586
Revert "changing release notes"
Jul 22, 2021
e24eedd
Update new-graph-decoder-and-visulization-method-e0e2a0792344a3b3.yaml
Jul 22, 2021
c0f12d5
Update visualization.py
Jul 24, 2021
a76dc15
Merge branch 'Qiskit:master' into master
Jul 24, 2021
547a6ae
Update visualization.py
Sep 14, 2021
079f1c5
Added feature for figure size
Sep 14, 2021
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion constraints.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ astroid==2.5.6
pywin32==225
setuptools==49.6.0
pyfakefs==4.1.0
cvxpy==1.1.7
cvxpy==1.1.7
1 change: 1 addition & 0 deletions qiskit/ignis/verification/topological_codes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@
from .fitters import GraphDecoder
from .fitters import lookuptable_decoding
from .fitters import postselection_decoding
from .visualization import GraphVisualization
169 changes: 164 additions & 5 deletions qiskit/ignis/verification/topological_codes/fitters.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,23 @@
import retworkx as rx
import numpy as np

from qiskit.exceptions import QiskitError
from qiskit import QuantumCircuit, execute


try:
from qiskit.providers.aer import Aer
HAS_AER = True
except ImportError:
from qiskit import BasicAer
HAS_AER = False

try:
from sklearn.cluster import DBSCAN
HAS_SCIKIT = True
except ImportError:
HAS_SCIKIT = False


class GraphDecoder():
"""
Expand Down Expand Up @@ -312,7 +320,8 @@ def make_error_graph(self, string, subgraphs=None):
return E

def matching(self, string):
"""
"""Graph theoritical decoder that uses minimum weight matching to decode errors.

Args:
string (str): A string describing the output from the code.

Expand All @@ -324,7 +333,6 @@ def matching(self, string):
This function can be run directly, or used indirectly to
calculate a logical error probability with `get_logical_prob`
"""

# this matching algorithm is designed for a single graph
E = self.make_error_graph(string)['0']

Expand Down Expand Up @@ -387,19 +395,163 @@ def matching(self, string):

return logical_string

def get_logical_prob(self, results, algorithm='matching'):
def nearest_cluster(self, cluster, graph, target):
"""Find the nearest cluster to the target cluster.

Args:
cluster (dict): Dictionary that contains clusters in the
Error graph and the nodes in it.

graph (retworkx.PyGraph):Error graph in which the
nearest cluster and the node will be searched.

target (int,int,int) : target cluster for which nearest
cluster is being searched.

Returns:
list: [nearest_outside_node, nearest_cluster]
nearest_outside_node : nearest node to the target node
which doesn't belong to the same cluster.
nearest_cluster : cluster to which nearest outside node
belongs.
"""
cluster_graph = rx.PyGraph()
cluster_graph.add_nodes_from(graph.nodes())
cluster_graph.add_edges_from(graph.weighted_edge_list())
for i, __ in enumerate(graph.nodes()):
if __ not in cluster[target]:
cluster_graph.remove_node(i)
edges = rx.max_weight_matching(cluster_graph,
max_cardinality=True, weight_fn=lambda x: x)
remaining_node = list(cluster_graph.node_indexes())
for edge in edges:
remaining_node.remove(edge[0])
remaining_node.remove(edge[1])
node_neigbours = {}
for edge in graph.weighted_edge_list():
if remaining_node[0] == edge[0]:
node_neigbours[graph[edge[1]]] = {'weight': edge[2]}
if remaining_node[0] == edge[1]:
node_neigbours[graph[edge[0]]] = {'weight': edge[2]}
nearest_neighbours = sorted(node_neigbours.items(),
key=lambda e: e[1]["weight"],
reverse=True)[:len(cluster[target])]
nearest_outside_node = [x[0] for x in nearest_neighbours if x[0] not in
cluster[target]]
for x in cluster.keys():
if nearest_outside_node[0] in cluster[x]:
nearest_cluster = x
return [nearest_outside_node[0], nearest_cluster]

def cluster_decoding(self, string, eps=4):
"""Graph theoritical decoder that uses Clustering and matching to decode errors.

Args:
string (str): A string describing the output from the code.

eps (int):The maximum distance between two samples for one
to be considered as in the neighborhood of the other. This
is not a maximum bound on the distances of points within a
cluster. This is the most important DBSCAN parameter to
choose appropriately for your data set and distance function.
Default value here is 4.

Returns:
str: A string with corrected logical values,
computed using clustering and matching.

Raises:
QiskitError: if scikit-learn is not installed
Additional information:
This function can be run directly, or used indirectly to
calculate a logical error probability with `get_logical_prob`
"""
if not HAS_SCIKIT:
raise QiskitError('please install scikit-learn')

graph = self.make_error_graph(string)['0']
logical_nodes = [(0, 0, 0), (0, 1, 0)]
Non_neutral_nodes = list(graph.nodes())
for _ in logical_nodes:
Non_neutral_nodes.remove(_)
# Trivial Case
if len(Non_neutral_nodes) == 0:
logicals = self._separate_string(string)[0]
logical_string = ''
for logical in logicals:
logical_string += logical + ' '
logical_string = logical_string[:-1]
return logical_string
# Cluster Decoder
corrected_logical_string = []
clustering = DBSCAN(eps=eps, min_samples=2,
metric='manhattan').fit(Non_neutral_nodes)
cluster = {_: [] for _ in set(clustering.labels_)}
for _, __ in zip(clustering.labels_, Non_neutral_nodes):
cluster[_].append(__)
# appending logical nodes as separate clusters
cluster['logical_0'] = [logical_nodes[0]]
cluster['logical_1'] = [logical_nodes[1]]
unmatched_node = True
while unmatched_node:
for _ in cluster.keys():
if len(cluster[_]) % 2 != 0 and _ != 'logical_0' and _ != 'logical_1':
s = self.nearest_cluster(cluster, graph, _)
if s[1] == 'logical_0' or s[1] == 'logical_1':
corrected_logical_string.append(s[1][-1])
cluster[_].append(s[0])
else:
cluster[_] = cluster[_] + cluster[s[1]]
cluster.pop(s[1])
break
else:
unmatched_node = False
neutral_nodelist = []
edgelist = []
for _ in cluster.keys():
cluster_graph = rx.PyGraph()
cluster_graph.add_nodes_from(graph.nodes())
cluster_graph.add_edges_from(graph.weighted_edge_list())
for i, __ in enumerate(graph.nodes()):
if __ not in cluster[_]:
cluster_graph.remove_node(i)
edges = [(cluster_graph[x[0]],
cluster_graph[x[1]]) for x in rx.max_weight_matching(
cluster_graph, max_cardinality=True, weight_fn=lambda x: x)]
edgelist = edgelist + edges
neutral_nodelist += [k[0] for k in list(edges)] + [k[1] for k in list(edges)]
# use it to construct and return a corrected logical string
logicals = self._separate_string(string)[0]
for (source, target) in edgelist:
if source[0] == 0 and target[0] != 0:
logicals[source[1]] = str((int(logicals[source[1]]) + 1) % 2)
if target[0] == 0 and source[0] != 0:
logicals[target[1]] = str((int(logicals[target[1]]) + 1) % 2)
logical_string = ''
for logical in logicals:
logical_string += logical + ' '
logical_string = logical_string[:-1]
return [logical_string, edgelist, neutral_nodelist]

def get_logical_prob(self, results, eps=4, algorithm='matching'):
"""Calculate logical probabilty for graph decoders.

Args:
results (dict): A results dictionary, as produced by the
`process_results` method of the code.
algorithm (str): Choice of which decoder to use.

eps (int): If algorithm is set to 'clustering'. The maximum
distance between two samples for one to be considered as in
the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the
most important DBSCAN parameter to choose appropriately for
your data set and distance function.
Default value here is 4.
Returns:
dict: Dictionary of logical error probabilities for
each of the encoded logical states whose results were given in
the input.
"""

logical_prob = {}
for log in results:

Expand All @@ -414,6 +566,13 @@ def get_logical_prob(self, results, algorithm='matching'):
corrected_results[corr_str] += results[log][string]
else:
corrected_results[corr_str] = results[log][string]
elif algorithm == 'clustering':
for string in results[log]:
corr_str = self.cluster_decoding(string, eps=eps)[0]
if corr_str in corrected_results:
corrected_results[corr_str] += results[log][string]
else:
corrected_results[corr_str] = results[log][string]
else:
warnings.warn(
"The requested algorithm " +
Expand Down
Loading