diff --git a/Dockerfile b/Dockerfile index 8f64607a..8e2ac8eb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,5 @@ FROM nvidia/cuda:11.0.3-runtime-ubuntu20.04 - ENV DEBIAN_FRONTEND=noninteractive - RUN yes | apt update RUN yes | apt install python3 python3-pip git htop vim diff --git a/Dockerfile.dev b/Dockerfile.dev new file mode 100644 index 00000000..1cace985 --- /dev/null +++ b/Dockerfile.dev @@ -0,0 +1,17 @@ +FROM nvidia/cuda:11.0.3-runtime-ubuntu20.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt update -y && \ + apt install -y python3 python3-pip git htop vim + +# Make sure you first recursively clone down the git repo before building +WORKDIR /app +RUN pip install quimb pyrofiler cartesian-explorer opt_einsum +RUN pip install --no-binary pynauty pynauty +# Run the below commands after the container opens - because volume hasn't mounted yet +# RUN cd qtree && pip install . +# RUN pip install . +RUN pip install pdbpp + +ENTRYPOINT ["bash"] \ No newline at end of file diff --git a/dev.sh b/dev.sh new file mode 100755 index 00000000..4a9b7f05 --- /dev/null +++ b/dev.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker build -f Dockerfile.dev -t dev . +docker run -v $(pwd):/app -it dev diff --git a/qtensor/Bitstring.py b/qtensor/Bitstring.py new file mode 100644 index 00000000..92b9b1b9 --- /dev/null +++ b/qtensor/Bitstring.py @@ -0,0 +1,51 @@ +import numpy as np + +class Bitstring: + """Bitstring class""" + def __init__(self, bits: 'list[int]', prob=None, dim=2): + self._bits = list(bits) + self._s = ''.join(str(b) for b in self._bits) + self._prob = prob + self._dim = dim + + def __iter__(self): + for i in self._bits: + yield int(i) + + def __repr__(self): + return f'<{self._s}>' + + def __len__(self): + return len(self._bits) + + @classmethod + def str(cls, s: str, **kwargs): + return cls([int(_s) for _s in s], **kwargs) + + @classmethod + def int(cls, i: int, width, **kwargs): + dim = kwargs.get('dim', 2) + return cls(list(int(_i) for _i in np.unravel_index(i, [dim]*width)), **kwargs) + + def __add__(self, other: 'Bitstring'): + assert self._dim == other._dim + if self._prob is not None and other._prob is not None: + return Bitstring(self._bits + other._bits, self._prob * other._prob, dim=self._dim) + return Bitstring(self._bits + other._bits, dim=self._dim) + + def __iadd__(self, other: 'Bitstring'): + assert self._dim == other._dim + if self._prob is not None and other._prob is not None: + self._prob *= other._prob + self._bits += other._bits + + def __eq__(self, other: 'Bitstring'): + return self.to_int() == other.to_int() + + def __hash__(self): + if self._prob is not None: + return hash((self._s, self._prob, self._dim)) + return int(self.to_int()) + + def to_int(self): + return np.ravel_multi_index(self._bits, [self._dim]*len(self._bits)) diff --git a/qtensor/Simulate.py b/qtensor/Simulate.py index 0e271bd1..25d6a489 100644 --- a/qtensor/Simulate.py +++ b/qtensor/Simulate.py @@ -4,6 +4,7 @@ from qtensor.optimisation.TensorNet import QtreeTensorNet from qtensor.optimisation.Optimizer import DefaultOptimizer, Optimizer +from qtensor import Bitstring as Bs, TNAdapter, QTensorTNAdapter from tqdm.auto import tqdm from loguru import logger as log @@ -32,22 +33,10 @@ def __init__(self, backend=NumpyBackend(), optimizer=None, max_tw=None): self.optimizer = self.FallbackOptimizer() self.max_tw = max_tw - #-- Internal helpers - def _new_circuit(self, qc): - self.all_gates = qc - - def _create_buckets(self): - self.tn = QtreeTensorNet.from_qtree_gates(self.all_gates, - backend=self.backend) - self.tn.backend = self.backend - def _set_free_qubits(self, free_final_qubits): self.tn.free_vars = [self.tn.bra_vars[i] for i in free_final_qubits] self.tn.bra_vars = [var for var in self.tn.bra_vars if var not in self.tn.free_vars] - def _optimize_buckets(self): - self.peo = self.optimize_buckets() - def _reorder_buckets(self): """ Permutes indices in the tensor network and peo @@ -95,15 +84,27 @@ def _get_slice_dict(self, initial_state=0, target_state=0): return slice_dict #-- - def optimize_buckets(self): - peo, self.tn = self.optimizer.optimize(self.tn) - # print('Treewidth', self.optimizer.treewidth) - # print(peo) - return peo + def optimize_buckets(self, peo=None): + if peo is not None: + self.peo, self.tn = self.optimizer.optimize(self.tn) + if self.max_tw: + if self.optimizer.treewidth > self.max_tw: + raise ValueError(f'Treewidth {self.optimizer.treewidth} is larger than max_tw={self.max_tw}.') + + all_indices = sum([list(t.indices) for bucket in self.tn.buckets for t in bucket], []) + identity_map = {int(v): v for v in all_indices} + peo = [identity_map[int(i)] for i in self.peo] + self._reorder_buckets() - def prepare_buckets(self, qc, batch_vars=0, peo=None): - self._new_circuit(qc) - self._create_buckets() + + def _prepare_state(self, qc, batch_vars=0, tn=None): + self.all_gates = qc + if tn is None: + self.tn = QtreeTensorNet.from_qtree_gates(self.all_gates, + backend=self.backend) + else: + self.tn = tn + self.tn.backend = self.backend # Collect free qubit variables if isinstance(batch_vars, int): free_final_qubits = list(range(batch_vars)) @@ -111,43 +112,83 @@ def prepare_buckets(self, qc, batch_vars=0, peo=None): free_final_qubits = batch_vars self._set_free_qubits(free_final_qubits) - if peo is None: - self._optimize_buckets() - if self.max_tw: - if self.optimizer.treewidth > self.max_tw: - raise ValueError(f'Treewidth {self.optimizer.treewidth} is larger than max_tw={self.max_tw}.') - else: - self.peo = peo - - all_indices = sum([list(t.indices) for bucket in self.tn.buckets for t in bucket], []) - identity_map = {int(v): v for v in all_indices} - self.peo = [identity_map[int(i)] for i in self.peo] + def contract_tn(self, qc, batch_vars=0, peo=None, tn=None): + self._prepare_state(qc, batch_vars, tn) + self.optimize_buckets(peo) + self.slice() + + result = qtree.optimizer.bucket_elimination( + self.buckets, self.backend.process_bucket, + n_var_nosum=len(self.tn.free_vars) + ) - self._reorder_buckets() + return result + + def slice(self): slice_dict = self._get_slice_dict() - #log.info('batch slice {}', slice_dict) + log.info('batch slice {}', slice_dict) sliced_buckets = self.tn.slice(slice_dict) - #self.backend.pbar.set_total ( len(sliced_buckets)) self.buckets = sliced_buckets - # print("Buckets:") - # print(sliced_buckets) - def simulate_batch(self, qc, batch_vars=0, peo=None): - self.prepare_buckets(qc, batch_vars, peo) - result = qtree.optimizer.bucket_elimination( - self.buckets, self.backend.process_bucket, - n_var_nosum=len(self.tn.free_vars) - ) + def simulate_batch(self, qc, batch_vars=0, peo=None): + result = self.contract_tn(qc, batch_vars, peo) return self.backend.get_result_data(result).flatten() def simulate(self, qc): return self.simulate_state(qc) def simulate_state(self, qc, peo=None): - return self.simulate_batch(qc, peo=peo, batch_vars=0) + return self.contract_tn(qc, peo=peo, batch_vars=0) + + def sample(self, circuit): + # TODO: can use QTensorTNAdapter in init to avoid this operation again + tn_adapter = QTensorTNAdapter.from_qtree_gates(circuit) + return _sequence_sample(tn_adapter, composer.qubits) + + def _sequence_sample(tn: TNAdapter, circuit, indices, batch_size=10, batch_fix_sequence=None, dim=2): + K = int(np.ceil(len(indices) / batch_size)) + if batch_fix_sequence is None: + batch_fix_sequence = [1]*K + + cache = {} + samples = [Bs.str('', prob=1., dim=dim)] + z_0 = None + for i in range(K): + for j in range(len(samples)): + bs = samples.pop(0) + res = None + if len(bs)>0: + res = cache.get(bs.to_int()) + if res is None: + free_batch_ix = indices[i*batch_size:(i+1)*batch_size] + res = self.contract_tn(circuit, batch_vars=free_batch_ix, tn=tn) + res = res.real**2 + if len(bs)>0: + cache[bs.to_int()] = res + + # result should be shaped accourdingly + if z_0 is None: + z_0 = res.sum() + prob_prev = bs._prob + z_n = prob_prev * z_0 + z_n = res.sum() + logger.debug('bs {}, Sum res {}, prev_Z {}, prob_prev {}', + bs, res.sum(), prob_prev*z_0, prob_prev + ) + pdist = res.flatten() / z_n + logger.debug(f'Prob distribution: {pdist.round(4)}') + indices_bs = np.arange(len(pdist)) + batch_ix = np.random.choice(indices_bs, batch_fix_sequence[i], p=pdist) + for ix in batch_ix: + _new_s = bs + Bs.int(ix, width=len(free_batch_ix), prob=pdist[ix], dim=dim) + logger.trace(f'New sample: {_new_s}') + samples.append(_new_s) + + return samples + class CirqSimulator(Simulator): @@ -155,3 +196,17 @@ def simulate(self, qc, **params): sim = cirq.Simulator(**params) return sim.simulate(qc) +if __name__=="__main__": + import networkx as nx + import numpy as np + + G = nx.random_regular_graph(3, 10) + gamma, beta = [np.pi/3], [np.pi/2] + + from qtensor import QtreeQAOAComposer, QAOAQtreeSimulator + composer = QtreeQAOAComposer(graph=G, gamma=gamma, beta=beta) + composer.ansatz_state() + + sim = QAOAQtreeSimulator(composer) + + log.debug('hello world') \ No newline at end of file diff --git a/qtensor/TNAdapter.py b/qtensor/TNAdapter.py new file mode 100644 index 00000000..a7b99666 --- /dev/null +++ b/qtensor/TNAdapter.py @@ -0,0 +1,128 @@ +import numpy as np +import networkx as nx +from loguru import logger +import qtree, qtensor +import sys +from typing import Iterable, Union + +from qtensor.optimisation.TensorNet import circ2buckets_init + +logger.remove() +logger.add(sys.stderr, level='DEBUG') + +class ContractionInfo: + pass + +class TNAdapter: + def __init__(self, *args, **kwargs): + pass + + def optimize(self, out_indices: Iterable = []) -> ContractionInfo: + return ContractionInfo() + + # Inplace or not? + def slice(self, slice_dict: dict) -> 'TNAdapter': + ... + + # Inplace or not? + def contract(self, contraction_info: ContractionInfo) -> np.ndarray: + ... + + # require this? + def copy(self): + pass + + def add_tensor(self, data, indices): + pass + +def add_random_tn(tn:TNAdapter, graph:nx.Graph, e2i:dict={}, min_ix=0, dim=2): + e2i_default = {tuple(sorted(e)):i for i,e in enumerate(graph.edges(), min_ix)} + if set(e2i.keys()).intersection(set(e2i_default.keys())): + raise ValueError("e2i and e2i_default have common keys") + # overwrite default with e2i + e2i_default.update(e2i) + e2i = e2i_default + logger.debug("Indices: {}", e2i) + for u in graph: + indices = [] + for v in graph[u]: + edge = tuple(sorted((u,v))) + indices.append(e2i[edge]) + #tn.add_tensor(np.random.randn(*[dim]*len(indices)), indices) + tn.add_tensor(1+np.random.rand(*[dim]*len(indices)), indices) + return list(e2i_default.values()) + +def test_TNAdapter(cls): + for dim in [2,3,4]: + tn = cls() + logger.debug("Testing dim {}", dim) + graph = nx.random_regular_graph(3, 10) + indices = add_random_tn(tn, graph, dim=dim) + # tn full contraction + _c_info = tn.optimize() + ref = tn.contract(_c_info) + # test slicing + for index in indices: + values = [] + for v in range(dim): + tn2 = tn.slice({index:v}) + _c_info = tn2.optimize() + logger.debug("Contracting {} with value {}", str(index), v) + values.append(tn2.contract(_c_info)) + logger.debug("Reference: {}, values: {}", ref, values) + assert np.allclose(np.sum(values), ref) + # test free indices and partial contraction + for _ in range(5): + # random subset of indices + __memory_budget = 2**24 # 16MB + __max_free = int(np.log(__memory_budget) / np.log(dim)) + n_free = np.random.randint(1, min(len(indices), __max_free)) + n_contract = len(indices) - n_free + contract = np.random.choice(indices, n_contract, replace=False) + logger.debug("Free indices: {}, contract ({}): {}", n_free, n_contract, contract) + tn2 = tn.slice({}) # copy + # sometimes it's better to specify non-contracting indices + _c_info = tn2.optimize(index_list=contract) + res = tn2.contract(_c_info) + logger.debug("Reference: {}, result shape: {}", ref, res.shape) + assert np.allclose(res.sum(), ref) + + + logger.debug("Testing dim {} finished!\n===", dim) + +# -- QTensor tensor adapter + +class QTensorContractionInfo(ContractionInfo): + def __init__(self, peo, width): + self.peo = peo + self.width = width + + def __repr__(self): + return f"QTensorContractionInfo({self.peo}, {self.width})" + +class QTensorTNAdapter(TNAdapter): + def __init__(self, buckets, data_dict, bra_vars, ket_vars, *args, **kwargs): + self._indices_to_vars = {} + self.qtn = qtensor.optimisation.QtreeTensorNet( + buckets, data_dict, bra_vars, ket_vars + ) + + @property + def _all_indices(self): + return set(self._indices_to_vars.keys()) + + + @classmethod + def from_qtree_gates(cls, qc, init_state=None, **kwargs): + all_gates = qc + n_qubits = len(set(sum([g.qubits for g in all_gates], tuple()))) + qtree_circuit = [[g] for g in qc] + if init_state is None: + buckets, data_dict, bra_vars, ket_vars = qtree.optimizer.circ2buckets( + n_qubits, qtree_circuit) + else: + buckets, data_dict, bra_vars, ket_vars = circ2buckets_init( + n_qubits, qtree_circuit, init_vector=init_state) + + tn = cls(buckets, data_dict, bra_vars, ket_vars, **kwargs) + return tn