From eeba989bb5f5d4026eae1384919fd9bfbaf1fe29 Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 9 Oct 2023 23:00:51 +0000 Subject: [PATCH 01/15] Sketch out conj method --- Dockerfile.dev | 18 +++++++++++ dev.sh | 4 +++ qtensor/Simulate.py | 74 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) create mode 100644 Dockerfile.dev create mode 100755 dev.sh diff --git a/Dockerfile.dev b/Dockerfile.dev new file mode 100644 index 00000000..9f42b3ac --- /dev/null +++ b/Dockerfile.dev @@ -0,0 +1,18 @@ +FROM nvidia/cuda:11.0.3-runtime-ubuntu20.04 + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt update -y && \ + apt install -y python3 python3-pip git htop vim + +# Make sure you first recursively clone down the git repo before building +WORKDIR /app +RUN pip install quimb pyrofiler cartesian-explorer opt_einsum +RUN pip install --no-binary pynauty pynauty +# Run the below commands after the container opens - because volume hasn't mounted yet +# RUN cd qtree && pip install . +# RUN pip install . +RUN pip install pdbpp +RUN pip install tensornetwork + +ENTRYPOINT ["bash"] \ No newline at end of file diff --git a/dev.sh b/dev.sh new file mode 100755 index 00000000..729e3c14 --- /dev/null +++ b/dev.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker build -f Dockerfile.dev -t dev . +docker run -v $(pwd):/app -it dev \ No newline at end of file diff --git a/qtensor/Simulate.py b/qtensor/Simulate.py index 0e271bd1..c2925031 100644 --- a/qtensor/Simulate.py +++ b/qtensor/Simulate.py @@ -155,3 +155,77 @@ def simulate(self, qc, **params): sim = cirq.Simulator(**params) return sim.simulate(qc) +if __name__=="__main__": + import networkx as nx + import numpy as np + import tensornetwork as tn + + G = nx.random_regular_graph(3, 10) + gamma, beta = [np.pi/3], [np.pi/2] + + from qtensor import QtreeQAOAComposer, QAOAQtreeSimulator + composer = QtreeQAOAComposer(graph=G, gamma=gamma, beta=beta) + composer.ansatz_state() + + sim = QAOAQtreeSimulator(composer) + + # now let's run the prepare buckets method to init the tn + sim.simulate_batch(composer.circuit) + buckets = sim.tn.buckets + + # now let's use these buckets to square the TN + def conj(buckets): + # turn each bucket into a node + nodes = [] + for bucket in buckets: + node = tn.Node(np.array(bucket)) + nodes.append(node) + + # now for each node, append its conjugate + conj_nodes = [] + for node in nodes: + conj = np.conj(node.tensor) + conj_node = tn.Node(conj) + conj_nodes.append(conj_node) + + + indices = {} + + for node in nodes: + for conj_node in conj_nodes: + # check if there is a shared index between a node and a conj_node + node_indices = node.get_all_dangling() + conj_indices = conj_node.get_all_dangling() + + shared_indices = set(node_indices).intersection(set(conj_indices)) + if shared_indices: + if node not in indices: + indices[node] = shared_indices + else: + indices[node].update(shared_indices) + + if conj_node not in indices: + indices[conj_node] = shared_indices + else: + indices[conj_node].update(shared_indices) + + for node, shared_indices in indices.items(): + for pair_node in indices.keys(): + if node == pair_node: + continue + # if there are shared indices, connect an edge + if shared_indices.intersection(set(pair_node.get_all_dangling())): + edge = tn.connect(node, pair_node) + + # TODO: TNAdapter should support tensornetwork.Node + # So that we can contract this resulting tensor network directly + return [] + + tn_with_conj = conj(buckets) + + # TODO: contract or sample using tn_with_conj based on method in other branch + + + + log.debug('hello world') + import pdb; pdb.set_trace() \ No newline at end of file From 69d0341bac82e2055d426a2198f5a76230d64a78 Mon Sep 17 00:00:00 2001 From: Dan Lykov Date: Wed, 18 Oct 2023 00:03:34 -0500 Subject: [PATCH 02/15] add draft of tn api --- scratchpad/tn_api/test_tn_api.py | 51 ++++++++++ scratchpad/tn_api/tn.py | 156 +++++++++++++++++++++++++++++++ 2 files changed, 207 insertions(+) create mode 100644 scratchpad/tn_api/test_tn_api.py create mode 100644 scratchpad/tn_api/tn.py diff --git a/scratchpad/tn_api/test_tn_api.py b/scratchpad/tn_api/test_tn_api.py new file mode 100644 index 00000000..a7ec7d0c --- /dev/null +++ b/scratchpad/tn_api/test_tn_api.py @@ -0,0 +1,51 @@ +import random +import numpy as np +from tn import TensorNetwork +from functools import reduce + +def test_add_numpy_array(): + a = TensorNetwork() + t = np.random.randn(2, 2) + a.add(t) + b = TensorNetwork() + b.add(a) + assert b == a + + +def test_composition(): + """ + tensor network adding is associative + """ + tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)] + stack = TensorNetwork() + # (((0A)B)C)D + for tn in tns: + stack.add(tn) + # A(B(CD)) + for i in range(len(tns)): + l = tns[len(tns)-2-i] + r = tns[len(tns)-1-i] + l.add(r) + + assert stack == tns[0] + +def test_edges_consistent_ports(): + tns = [TensorNetwork.new_random_cpu(2, 3, 4) for _ in range(5)] + tn = TensorNetwork() + # (((0A)B)C)D + for t in tns: + tn.add(t) + + port_data = {} + for e in tn._edges: + for p in e: + port_data[p.tensor_ref] = port_data.get(p.tensor_ref, []) + port_data[p.tensor_ref].append(p.ix) + for i, t in enumerate(tn._tensors): + assert len(t.shape) == len(port_data[i]) + + + +if __name__=="__main__": + test_edges_consistent_ports() + test_composition() diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py new file mode 100644 index 00000000..7f69a28b --- /dev/null +++ b/scratchpad/tn_api/tn.py @@ -0,0 +1,156 @@ +import numpy as np +import math +from dataclasses import dataclass +from typing import TypeVar, Generic, Iterable + +class Array(np.ndarray): + shape: tuple[int] + +D = TypeVar('D') # tensor data type (numpy, torch, etc.) + +class ContractionInfo: + pass + +class TensorNetworkIFC(Generic[D]): + def __init__(self, *args, **kwargs): + ... + + def optimize(self, out_indices: Iterable = []) -> ContractionInfo: + return ContractionInfo() + + # slice not inplace + def slice(self, slice_dict: dict) -> 'TensorNetwork': + ... + + # contract to produce a new tensor + def contract(self, contraction_info: ContractionInfo) -> D: + ... + + # + def copy(self): + ... + + def add(self, other: "TensorNetworkIFC[D] | D"): + ... + + + @classmethod + def new_random_cpu(cls, dims: Iterable[int])-> 'TensorNetworkIFC[D]': + ... + + def __eq__(a, b): + ... + + +N = TypeVar('N', bound=np.ndarray) + +@dataclass +class Port: + tensor_ref: int + ix: int + +class TensorNetwork(TensorNetworkIFC[np.ndarray]): + tensors: Iterable[np.ndarray] + shape: tuple + edges: tuple[tuple[Port]] + + def __init__(self, *args, **kwargs): + self._tensors = [] + self._edges = tuple() + self.shape = tuple() + + # slice not inplace + def slice(self, slice_dict: dict) -> 'TensorNetwork': + ... + + def copy(self): + new = TensorNetwork() + new._tensors = self._tensors + new._edges = self._edges + new.shape = self.shape + return new + + def add(self, other: "TensorNetwork | np.ndarray"): + if not isinstance(other, TensorNetwork): + self._tensors.append(other) + self.shape = self.shape + other.shape + else: + m = len(self._tensors) + n = len(self.shape) + # -- other's edges tensors will refer to shifted tensor location + enew = [] + for e in other._edges: + e_ = [] + for p in e: + if p.tensor_ref == -1: + e_.append(Port(tensor_ref=-1, ix=p.ix+n)) + else: + e_.append(Port(tensor_ref=p.tensor_ref+m, ix=p.ix)) + enew.append(tuple(e_)) + + self._edges += tuple(enew) + self._tensors += other._tensors + self.shape += other.shape + + # contract to produce a new tensor + def contract(self, contraction_info: ContractionInfo) -> np.ndarray: + raise NotImplementedError() + + def optimize(self, out_indices: Iterable = []) -> ContractionInfo: + raise NotImplementedError() + + + @classmethod + def new_random_cpu(cls, count, size, dim: int): + out = cls() + for i in range(count): + t: np.ndarray = np.random.random((dim, )*size) + out.add(t) + # arbitrary number of output indices + out_dims = np.random.randint(low=0, high=len(out.shape)) + tensor_dims = len(out.shape) + out.shape = (dim, )*out_dims + # -- random connectivity + # A hypergraph can be generated as a partition into + # E parts where E is number of edges. The isolated vertices are equivalent + # to vertices with 1 edge that contains only them. + # arbitrary max number of edges, must be less than total indices + edges_cnt = np.random.randint(low=1, high=tensor_dims+out_dims) + # a partition can be implemented using a random function + partition_fn = lambda : np.random.randint(low=0, high=edges_cnt) + partition_dict = {} + for t_ref, t in enumerate(out._tensors): + for i in range(t.ndim): + eix = partition_fn() + new_port = Port(tensor_ref=t_ref, ix=i) + partition_dict[eix] = partition_dict.get(eix, []) + partition_dict[eix].append(new_port) + + # add "self" tensor indices to partition + for i in range(len(out.shape)): + eix = partition_fn() + new_port = Port(tensor_ref=-1, ix=i) + partition_dict[eix] = partition_dict.get(eix, []) + partition_dict[eix].append(new_port) + + edges = [] + for i in range(edges_cnt): + p = partition_dict.get(i) + if p is not None: + edges.append(tuple(p)) + out._edges = tuple(edges) + return out + + def __eq__(self, other): + if self.shape != other.shape: + return False + if self._edges != other._edges: + return False + return all((a==b).all() for a, b in zip(self._tensors, other._tensors)) + + def __repr__(self): + return f"TensorNetwork({self.shape})<{self._tensors}, {self._edges}>" + + + + From 0d744620ae6eff984c43c68c9148076a722a6e83 Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Sat, 21 Oct 2023 10:24:57 +0000 Subject: [PATCH 03/15] Use np sliced buckets method from qtree --- scratchpad/tn_api/tn.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index 7f69a28b..0acbefe1 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -2,9 +2,10 @@ import math from dataclasses import dataclass from typing import TypeVar, Generic, Iterable +from qtree import np_framework class Array(np.ndarray): - shape: tuple[int] + shape: tuple D = TypeVar('D') # tensor data type (numpy, torch, etc.) @@ -52,16 +53,21 @@ class Port: class TensorNetwork(TensorNetworkIFC[np.ndarray]): tensors: Iterable[np.ndarray] shape: tuple - edges: tuple[tuple[Port]] + edges: tuple def __init__(self, *args, **kwargs): self._tensors = [] self._edges = tuple() self.shape = tuple() + self.buckets = [] + self.data_dict = {} # slice not inplace def slice(self, slice_dict: dict) -> 'TensorNetwork': - ... + tn = self.copy() + sliced_buckets = np_framework.get_sliced_np_buckets(self.buckets, self.data_dict, slice_dict) + tn.buckets = sliced_buckets + return tn def copy(self): new = TensorNetwork() @@ -153,4 +159,6 @@ def __repr__(self): - +if __name__ == "__main__": + tn = TensorNetwork.new_random_cpu(2, 3, 4) + import pdb; pdb.set_trace() \ No newline at end of file From b0645a2656b23916e422992b20bc6ca60b30442f Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Sun, 22 Oct 2023 07:53:07 +0000 Subject: [PATCH 04/15] Update --- scratchpad/tn_api/tn.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index 0acbefe1..c9ef5d8c 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -2,7 +2,6 @@ import math from dataclasses import dataclass from typing import TypeVar, Generic, Iterable -from qtree import np_framework class Array(np.ndarray): shape: tuple @@ -59,14 +58,22 @@ def __init__(self, *args, **kwargs): self._tensors = [] self._edges = tuple() self.shape = tuple() - self.buckets = [] - self.data_dict = {} # slice not inplace def slice(self, slice_dict: dict) -> 'TensorNetwork': tn = self.copy() - sliced_buckets = np_framework.get_sliced_np_buckets(self.buckets, self.data_dict, slice_dict) - tn.buckets = sliced_buckets + sliced_tns = [] + for tensor in tn._tensors: + slice_bounds = [] + for idx in range(tensor.ndim): + try: + slice_bounds.append(slice_dict[idx]) + except KeyError: + slice_bounds.append(slice(None)) + + sliced_tns.append(tensor[tuple(slice_bounds)]) + + tn._tensors = sliced_tns return tn def copy(self): @@ -161,4 +168,6 @@ def __repr__(self): if __name__ == "__main__": tn = TensorNetwork.new_random_cpu(2, 3, 4) + slice_dict = {0: slice(0, 2), 1: slice(1, 3)} + sliced_tn = tn.slice(slice_dict) import pdb; pdb.set_trace() \ No newline at end of file From d7e24c22155793c91f841cdc8f43df17de8c67a5 Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 30 Oct 2023 16:15:42 +0000 Subject: [PATCH 05/15] Address comments --- scratchpad/tn_api/tn.py | 53 +++++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 12 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index c9ef5d8c..c5364dfd 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -58,22 +58,50 @@ def __init__(self, *args, **kwargs): self._tensors = [] self._edges = tuple() self.shape = tuple() + self._indices = {} # slice not inplace def slice(self, slice_dict: dict) -> 'TensorNetwork': tn = self.copy() - sliced_tns = [] - for tensor in tn._tensors: - slice_bounds = [] - for idx in range(tensor.ndim): - try: - slice_bounds.append(slice_dict[idx]) - except KeyError: - slice_bounds.append(slice(None)) - - sliced_tns.append(tensor[tuple(slice_bounds)]) - - tn._tensors = sliced_tns + + # hydrate the index map of edge index to tensor and dimension (via port object) + for idx, edge in enumerate(tn._edges): + for port in edge: + if idx in tn._indices: + tn._indices[idx].append(port) + else: + tn._indices[idx] = [port] + + for idx, slice_val in slice_dict.items(): + # don't care about indices that are not in TN + if idx not in tn._indices: + continue + + edge = tn._edges.pop(idx) + # get all tensors indexed by this edge + tensors_to_slice = set(port.tensor_ref for port in edge) + # store slice index and value for each tensor + local_slices_dict = {} + for current_tensor_ref in tensors_to_slice: + slice_dict = {} + # get all ports for the current tensor + current_tensor_ref_ports = [port for port in edge where port.tensor_ref == current_tensor_ref] + for current_port in current_tensor_ref_ports: + slice_dict[current_port.ix] = slice_val + # store the slice params for this tensor in the local dict + local_slices_dict[current_tensor_ref] = slice_dict + + # now use the local slice dict to slice for each tensor + for current_tensor_ref, slice_dict in local_slices_dict.items(): + slice_bounds = [] + current_tensor = tn._tensors[current_tensor_ref] + for idx in range(current_tensor.ndim): + try: + slice_bounds.append(slice_dict[idx]) + except KeyError: + slice_bounds.append(slice(None)) + tn._tensors[current_tensor_ref] = tn._tensors[current_tensor_ref][tuple(slice_bounds)] + return tn def copy(self): @@ -81,6 +109,7 @@ def copy(self): new._tensors = self._tensors new._edges = self._edges new.shape = self.shape + new.indices = self.indices return new def add(self, other: "TensorNetwork | np.ndarray"): From 1aafd0520ed6bd2fa5fec51ad74e8ec9e5f978f6 Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 6 Nov 2023 10:43:22 +0000 Subject: [PATCH 06/15] Cleanup --- scratchpad/tn_api/tn.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index c5364dfd..1c7898c7 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -58,23 +58,14 @@ def __init__(self, *args, **kwargs): self._tensors = [] self._edges = tuple() self.shape = tuple() - self._indices = {} # slice not inplace def slice(self, slice_dict: dict) -> 'TensorNetwork': tn = self.copy() - # hydrate the index map of edge index to tensor and dimension (via port object) - for idx, edge in enumerate(tn._edges): - for port in edge: - if idx in tn._indices: - tn._indices[idx].append(port) - else: - tn._indices[idx] = [port] - for idx, slice_val in slice_dict.items(): - # don't care about indices that are not in TN - if idx not in tn._indices: + # make sure idx is valid + if idx >= len(tn._edges): continue edge = tn._edges.pop(idx) @@ -85,7 +76,7 @@ def slice(self, slice_dict: dict) -> 'TensorNetwork': for current_tensor_ref in tensors_to_slice: slice_dict = {} # get all ports for the current tensor - current_tensor_ref_ports = [port for port in edge where port.tensor_ref == current_tensor_ref] + current_tensor_ref_ports = [port for port in edge if port.tensor_ref == current_tensor_ref] for current_port in current_tensor_ref_ports: slice_dict[current_port.ix] = slice_val # store the slice params for this tensor in the local dict From 3533289262f5712e9903295efc715c6f3cdaca5f Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:10:46 +0000 Subject: [PATCH 07/15] progress --- scratchpad/tn_api/tn.py | 51 ++++++++++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 11 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index 1c7898c7..4a11e01f 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -1,15 +1,26 @@ import numpy as np import math +import string from dataclasses import dataclass -from typing import TypeVar, Generic, Iterable +from typing import TypeVar, Generic, Iterable, Tuple class Array(np.ndarray): shape: tuple D = TypeVar('D') # tensor data type (numpy, torch, etc.) +CHARS = string.ascii_lowercase + string.ascii_uppercase + +N = TypeVar('N', bound=np.ndarray) + +@dataclass +class Port: + tensor_ref: int + ix: int + +@dataclass class ContractionInfo: - pass + result_indices: Iterable[int] class TensorNetworkIFC(Generic[D]): def __init__(self, *args, **kwargs): @@ -42,13 +53,6 @@ def __eq__(a, b): ... -N = TypeVar('N', bound=np.ndarray) - -@dataclass -class Port: - tensor_ref: int - ix: int - class TensorNetwork(TensorNetworkIFC[np.ndarray]): tensors: Iterable[np.ndarray] shape: tuple @@ -100,7 +104,6 @@ def copy(self): new._tensors = self._tensors new._edges = self._edges new.shape = self.shape - new.indices = self.indices return new def add(self, other: "TensorNetwork | np.ndarray"): @@ -127,7 +130,26 @@ def add(self, other: "TensorNetwork | np.ndarray"): # contract to produce a new tensor def contract(self, contraction_info: ContractionInfo) -> np.ndarray: - raise NotImplementedError() + tensors_to_contract = [self._tensors[port.tensor_ref] for port in self._edges] + einsum_expr = self._get_einsum_expr(contraction_info) + + return np.einsum(einsum_expr, *tensors_to_contract) + + # based on implementation in + # qtensor/contraction_backends/numpy.py -> get_einsum_expr + def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: + # map each index to a character + all_indices = sorted(set(port.ix for port in self._edges)) + if len(all_indices) > len(CHARS): + raise ValueError("too many indices to map to CHARS") + index_to_char = {index: CHARS[i] for i, index in enumerate(all_indices)} + + # i think this is missing buckets, comparing to other einsums that look like this + # np.einsum('ijk,ijl->jkl', a, b) + expr = ''.join(index_to_char[port.ix] for port in self._edges) + '->' + \ + ''.join(index_to_char[ix] for ix in contraction_info.result_indices) + + return expr def optimize(self, out_indices: Iterable = []) -> ContractionInfo: raise NotImplementedError() @@ -190,4 +212,11 @@ def __repr__(self): tn = TensorNetwork.new_random_cpu(2, 3, 4) slice_dict = {0: slice(0, 2), 1: slice(1, 3)} sliced_tn = tn.slice(slice_dict) + + random_index_to_contract = np.random.randint(0, len(sliced_tn.shape)) + + contraction_info = ContractionInfo(random_index_to_contract) + + contracted_tensor = sliced_tn.contract(contraction_info) + import pdb; pdb.set_trace() \ No newline at end of file From ea2a43cc3b4428dfafa72ba24b01a3e4e6e5639f Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 6 Nov 2023 22:52:34 +0000 Subject: [PATCH 08/15] Bugfixes --- scratchpad/tn_api/tn.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index 4a11e01f..f497ecc4 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -72,7 +72,10 @@ def slice(self, slice_dict: dict) -> 'TensorNetwork': if idx >= len(tn._edges): continue - edge = tn._edges.pop(idx) + edge_list = list(tn._edges) + edge = edge_list.pop(idx) + # now put the updated edges back on the class + tn._edges = tuple(edge_list) # get all tensors indexed by this edge tensors_to_slice = set(port.tensor_ref for port in edge) # store slice index and value for each tensor @@ -130,24 +133,26 @@ def add(self, other: "TensorNetwork | np.ndarray"): # contract to produce a new tensor def contract(self, contraction_info: ContractionInfo) -> np.ndarray: - tensors_to_contract = [self._tensors[port.tensor_ref] for port in self._edges] + tensors_to_contract = [self._tensors[port.tensor_ref] for edge in self._edges for port in edge if port.tensor_ref != -1] einsum_expr = self._get_einsum_expr(contraction_info) - + import pdb; pdb.set_trace() return np.einsum(einsum_expr, *tensors_to_contract) # based on implementation in # qtensor/contraction_backends/numpy.py -> get_einsum_expr def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: # map each index to a character - all_indices = sorted(set(port.ix for port in self._edges)) + all_indices = sorted(set(port.ix for edge in self._edges for port in edge)) + # TODO: is this the correct mapping? + if len(all_indices) > len(CHARS): raise ValueError("too many indices to map to CHARS") index_to_char = {index: CHARS[i] for i, index in enumerate(all_indices)} - - # i think this is missing buckets, comparing to other einsums that look like this + import pdb; pdb.set_trace() + # TODO: i think this is missing buckets?, comparing to other einsums that look like this # np.einsum('ijk,ijl->jkl', a, b) - expr = ''.join(index_to_char[port.ix] for port in self._edges) + '->' + \ - ''.join(index_to_char[ix] for ix in contraction_info.result_indices) + expr = ''.join(index_to_char[port.ix] for edge in self._edges for port in edge) + '->' + \ + ''.join(index_to_char[ix] for ix in contraction_info.result_indices) return expr @@ -213,9 +218,9 @@ def __repr__(self): slice_dict = {0: slice(0, 2), 1: slice(1, 3)} sliced_tn = tn.slice(slice_dict) - random_index_to_contract = np.random.randint(0, len(sliced_tn.shape)) + random_indices_to_contract = (np.random.randint(0, 50), ) - contraction_info = ContractionInfo(random_index_to_contract) + contraction_info = ContractionInfo(random_indices_to_contract) contracted_tensor = sliced_tn.contract(contraction_info) From 0c91a8f954abdd4d95a2075f59ee0aaecc2d7a99 Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 13 Nov 2023 13:29:15 +0000 Subject: [PATCH 09/15] Progress --- scratchpad/tn_api/tn.py | 80 +++++++++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 23 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index f497ecc4..58931c2c 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -133,32 +133,68 @@ def add(self, other: "TensorNetwork | np.ndarray"): # contract to produce a new tensor def contract(self, contraction_info: ContractionInfo) -> np.ndarray: - tensors_to_contract = [self._tensors[port.tensor_ref] for edge in self._edges for port in edge if port.tensor_ref != -1] einsum_expr = self._get_einsum_expr(contraction_info) import pdb; pdb.set_trace() - return np.einsum(einsum_expr, *tensors_to_contract) + return np.einsum(einsum_expr, self._tensors) + + + # what can I understand next? + # if i setup this contract method to work for QTensorNet, it would be pretty similar becuase it also uses np.einsum + # other types can also be supported, such as QTensor and Quimb + # those would require setting up the optimize method to build the buckets + # TODO is read through and understand what it would take to set each of those up + # based on implementation in # qtensor/contraction_backends/numpy.py -> get_einsum_expr def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: - # map each index to a character - all_indices = sorted(set(port.ix for edge in self._edges for port in edge)) - # TODO: is this the correct mapping? - - if len(all_indices) > len(CHARS): - raise ValueError("too many indices to map to CHARS") - index_to_char = {index: CHARS[i] for i, index in enumerate(all_indices)} - import pdb; pdb.set_trace() - # TODO: i think this is missing buckets?, comparing to other einsums that look like this - # np.einsum('ijk,ijl->jkl', a, b) - expr = ''.join(index_to_char[port.ix] for edge in self._edges for port in edge) + '->' + \ - ''.join(index_to_char[ix] for ix in contraction_info.result_indices) - + # mapping from tensor index to a tuple of edges that preserves ordering + # st can lookup tix -> tuple(idx of edges) # this iterable needs to be sorted by of port.ix + t_ref_to_edges = {} + for t_idx in range(0, len(self._tensors)): + connected_edges_dict = {} # use a dict to dedup + for edge_index, edge in enumerate(self._edges): + for port in edge: + if port.tensor_ref == t_idx: + connected_edges_dict[edge_index] = port.ix + # now sort by port ix + connected_edges = [(edge_index, port_ix) for edge_index, port_ix in connected_edges_dict.items()] + connected_edges_sorted = sorted(connected_edges, key=lambda x: x[1]) + # extract the ix of the global edge + edge_indices_sorted = [edge_index for edge_index, port_ix in connected_edges_sorted] + t_ref_to_edges[t_idx] = edge_indices_sorted + + # i:0, j:1, k:2, l:3 -> int is index is self._edges + # s[0] = (012), s[1]=(103) where index to s is the index in self._tensors + # edge 0 is (Port(t_ref=0, ix=0), Port(t_ref=1, ix=1)) # i + # edge 1 is (Port(t_ref=0, ix=1), Port(t_ref=1, ix=0)) # j + # edge 2 is (Port(t_ref=0, ix=2)) #k + # edge 3 is (Port(t_ref=1, ix=2)) #l + + edge_to_char = {i: CHARS[i] for i in range(0, len(self._edges))} + # np.einsum('ijk,jil->jkl', a, b) + # expr = ','.join(''.join(index_to_char[port.ix] for edge in self._edges for port in edge) for t in self._tensors) + '->' + \ + # ''.join(index_to_char[ix] for ix in contraction_info.result_indices) + + substrs_to_join = [] + for t_idx, t in enumerate(self._tensors): + substr = '' + for edge_idx in t_ref_to_edges[t_idx]: + substr += edge_to_char[edge_idx] + substrs_to_join.append(substr) + + expr = ','.join(substrs_to_join) + '->' + ''.join(edge_to_char[ix] for ix in contraction_info.result_indices) + + + # expr = ','.join(''.join(index_to_char[edge_idx] for edge_idx in t_ref_to_edges[t]) for t in t_ref_to_edges.keys()) + '->' + \ + # ''.join(index_to_char[ix] for ix in contraction_info.result_indices) + + # expr = ','.join(''.join(index_to_char[self._edges[edge_index][i].ix] for i in range(len(self._edges[edge_index])) if self._edges[edge_index][i].tensor_ref == t_idx) for t_idx in range(len(self._tensors))) + '->' + \ + # ''.join(index_to_char[ix] for ix in contraction_info.result_indices) return expr def optimize(self, out_indices: Iterable = []) -> ContractionInfo: raise NotImplementedError() - @classmethod def new_random_cpu(cls, count, size, dim: int): @@ -212,16 +248,14 @@ def __repr__(self): return f"TensorNetwork({self.shape})<{self._tensors}, {self._edges}>" - if __name__ == "__main__": tn = TensorNetwork.new_random_cpu(2, 3, 4) - slice_dict = {0: slice(0, 2), 1: slice(1, 3)} - sliced_tn = tn.slice(slice_dict) - - random_indices_to_contract = (np.random.randint(0, 50), ) + # slice_dict = {0: slice(0, 2), 1: slice(1, 3)} + # sliced_tn = tn.slice(slice_dict) + # can also do "contract all except..." by knowing indices of edges in tn + random_indices_to_contract = (np.random.randint(0, len(tn._edges)), ) contraction_info = ContractionInfo(random_indices_to_contract) - contracted_tensor = sliced_tn.contract(contraction_info) - + contracted_tensor = tn.contract(contraction_info) import pdb; pdb.set_trace() \ No newline at end of file From dbeee3d7415e85a025965df97f0cbb8285a6672e Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 13 Nov 2023 14:30:56 +0000 Subject: [PATCH 10/15] Progress --- scratchpad/tn_api/tn.py | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index 58931c2c..a32431cc 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -143,6 +143,22 @@ def contract(self, contraction_info: ContractionInfo) -> np.ndarray: # other types can also be supported, such as QTensor and Quimb # those would require setting up the optimize method to build the buckets # TODO is read through and understand what it would take to set each of those up + + + def _get_random_indices_to_contract(self, count=2): + import random + tn_copy = self.copy() + indices_to_contract = [] + counter = 0 + edges_with_indices = [idx for idx, port in enumerate(list(tn_copy._edges))] + + while counter < count: + random_element = random.choice(edges_with_indices) + edges_with_indices.remove(random_element) + indices_to_contract.append(random_element) + counter += 1 + + return sorted(indices_to_contract) # based on implementation in @@ -184,13 +200,6 @@ def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: substrs_to_join.append(substr) expr = ','.join(substrs_to_join) + '->' + ''.join(edge_to_char[ix] for ix in contraction_info.result_indices) - - - # expr = ','.join(''.join(index_to_char[edge_idx] for edge_idx in t_ref_to_edges[t]) for t in t_ref_to_edges.keys()) + '->' + \ - # ''.join(index_to_char[ix] for ix in contraction_info.result_indices) - - # expr = ','.join(''.join(index_to_char[self._edges[edge_index][i].ix] for i in range(len(self._edges[edge_index])) if self._edges[edge_index][i].tensor_ref == t_idx) for t_idx in range(len(self._tensors))) + '->' + \ - # ''.join(index_to_char[ix] for ix in contraction_info.result_indices) return expr def optimize(self, out_indices: Iterable = []) -> ContractionInfo: @@ -249,13 +258,17 @@ def __repr__(self): if __name__ == "__main__": - tn = TensorNetwork.new_random_cpu(2, 3, 4) - # slice_dict = {0: slice(0, 2), 1: slice(1, 3)} - # sliced_tn = tn.slice(slice_dict) + dim = 3 + tn = TensorNetwork.new_random_cpu(2, dim, 4) + slice_dict = {0: slice(0, 2), 1: slice(1, 3)} + sliced_tn = tn.slice(slice_dict) # can also do "contract all except..." by knowing indices of edges in tn - random_indices_to_contract = (np.random.randint(0, len(tn._edges)), ) - contraction_info = ContractionInfo(random_indices_to_contract) + # generate random indices to contract + + random_indices_to_contract = tn._get_random_indices_to_contract(2) + # random_indices_to_contract = (np.random.randint(0, len(tn._edges)), np.random.randint(0, len(tn._edges)), np.random.randint(0, len(tn._edges)),) + contraction_info = ContractionInfo(tuple(random_indices_to_contract)) contracted_tensor = tn.contract(contraction_info) import pdb; pdb.set_trace() \ No newline at end of file From 1d7271bf69de91f6e940b5f7528fe47db4d2e613 Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 13 Nov 2023 14:31:20 +0000 Subject: [PATCH 11/15] Notes --- scratchpad/tn_api/tn.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index a32431cc..e27bb21f 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -263,6 +263,9 @@ def __repr__(self): slice_dict = {0: slice(0, 2), 1: slice(1, 3)} sliced_tn = tn.slice(slice_dict) + # Where did I leave off? + # Having trouble verifying tests, perhaps logic is incorrect but it makes sense to me + # can also do "contract all except..." by knowing indices of edges in tn # generate random indices to contract From 002628318aeea5a10a5842aaf0d1c62fcbf9d15c Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 13 Nov 2023 23:27:36 +0000 Subject: [PATCH 12/15] Cleanup --- scratchpad/tn_api/tn.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index e27bb21f..2ed98c6a 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -134,17 +134,12 @@ def add(self, other: "TensorNetwork | np.ndarray"): # contract to produce a new tensor def contract(self, contraction_info: ContractionInfo) -> np.ndarray: einsum_expr = self._get_einsum_expr(contraction_info) + print(einsum_expr) + print(len(self._tensors)) import pdb; pdb.set_trace() return np.einsum(einsum_expr, self._tensors) - # what can I understand next? - # if i setup this contract method to work for QTensorNet, it would be pretty similar becuase it also uses np.einsum - # other types can also be supported, such as QTensor and Quimb - # those would require setting up the optimize method to build the buckets - # TODO is read through and understand what it would take to set each of those up - - def _get_random_indices_to_contract(self, count=2): import random tn_copy = self.copy() @@ -152,7 +147,7 @@ def _get_random_indices_to_contract(self, count=2): counter = 0 edges_with_indices = [idx for idx, port in enumerate(list(tn_copy._edges))] - while counter < count: + while counter < count and len(edges_with_indices) > 0: random_element = random.choice(edges_with_indices) edges_with_indices.remove(random_element) indices_to_contract.append(random_element) @@ -161,8 +156,7 @@ def _get_random_indices_to_contract(self, count=2): return sorted(indices_to_contract) - # based on implementation in - # qtensor/contraction_backends/numpy.py -> get_einsum_expr + # for reference, see qtensor/contraction_backends/numpy.py -> get_einsum_expr def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: # mapping from tensor index to a tuple of edges that preserves ordering # st can lookup tix -> tuple(idx of edges) # this iterable needs to be sorted by of port.ix @@ -199,6 +193,9 @@ def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: substr += edge_to_char[edge_idx] substrs_to_join.append(substr) + for ix in contraction_info.result_indices: + if ix not in edge_to_char: + raise ValueError("result expects invalid indices") expr = ','.join(substrs_to_join) + '->' + ''.join(edge_to_char[ix] for ix in contraction_info.result_indices) return expr @@ -270,7 +267,7 @@ def __repr__(self): # generate random indices to contract random_indices_to_contract = tn._get_random_indices_to_contract(2) - # random_indices_to_contract = (np.random.randint(0, len(tn._edges)), np.random.randint(0, len(tn._edges)), np.random.randint(0, len(tn._edges)),) + contraction_info = ContractionInfo(tuple(random_indices_to_contract)) contracted_tensor = tn.contract(contraction_info) From b48ea1867debbc3ec4e32b75aa87f654a0bf6c6c Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Tue, 14 Nov 2023 00:24:49 +0000 Subject: [PATCH 13/15] Progress after meeting --- scratchpad/tn_api/tn.py | 64 +++++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index 2ed98c6a..71146b7d 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -81,7 +81,7 @@ def slice(self, slice_dict: dict) -> 'TensorNetwork': # store slice index and value for each tensor local_slices_dict = {} for current_tensor_ref in tensors_to_slice: - slice_dict = {} + slice_dict = {} # TODO: make sure this handles the case with multiple ports pointing to the same tensor # get all ports for the current tensor current_tensor_ref_ports = [port for port in edge if port.tensor_ref == current_tensor_ref] for current_port in current_tensor_ref_ports: @@ -130,15 +130,7 @@ def add(self, other: "TensorNetwork | np.ndarray"): self._edges += tuple(enew) self._tensors += other._tensors self.shape += other.shape - - # contract to produce a new tensor - def contract(self, contraction_info: ContractionInfo) -> np.ndarray: - einsum_expr = self._get_einsum_expr(contraction_info) - print(einsum_expr) - print(len(self._tensors)) - import pdb; pdb.set_trace() - return np.einsum(einsum_expr, self._tensors) - + def _get_random_indices_to_contract(self, count=2): import random @@ -154,21 +146,32 @@ def _get_random_indices_to_contract(self, count=2): counter += 1 return sorted(indices_to_contract) - + + # contract to produce a new tensor + def contract(self, contraction_info: ContractionInfo) -> np.ndarray: + einsum_expr = self._get_einsum_expr(contraction_info) + print(einsum_expr) + print([t.shape for t in self._tensors]) + print(self._edges) + print(len(self._tensors)) + try: + return np.einsum(einsum_expr, *self._tensors) + except: + import pdb; pdb.set_trace() # for reference, see qtensor/contraction_backends/numpy.py -> get_einsum_expr def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: # mapping from tensor index to a tuple of edges that preserves ordering # st can lookup tix -> tuple(idx of edges) # this iterable needs to be sorted by of port.ix t_ref_to_edges = {} + # TODO: can do this in a single loop by looping over edges and looking up for t_idx in range(0, len(self._tensors)): - connected_edges_dict = {} # use a dict to dedup + connected_edges = [] for edge_index, edge in enumerate(self._edges): for port in edge: if port.tensor_ref == t_idx: - connected_edges_dict[edge_index] = port.ix + connected_edges.append((edge_index, port.ix)) # now sort by port ix - connected_edges = [(edge_index, port_ix) for edge_index, port_ix in connected_edges_dict.items()] connected_edges_sorted = sorted(connected_edges, key=lambda x: x[1]) # extract the ix of the global edge edge_indices_sorted = [edge_index for edge_index, port_ix in connected_edges_sorted] @@ -181,6 +184,7 @@ def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: # edge 2 is (Port(t_ref=0, ix=2)) #k # edge 3 is (Port(t_ref=1, ix=2)) #l + # TODO: don't need this dict, use chars instead edge_to_char = {i: CHARS[i] for i in range(0, len(self._edges))} # np.einsum('ijk,jil->jkl', a, b) # expr = ','.join(''.join(index_to_char[port.ix] for edge in self._edges for port in edge) for t in self._tensors) + '->' + \ @@ -196,6 +200,7 @@ def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: for ix in contraction_info.result_indices: if ix not in edge_to_char: raise ValueError("result expects invalid indices") + expr = ','.join(substrs_to_join) + '->' + ''.join(edge_to_char[ix] for ix in contraction_info.result_indices) return expr @@ -229,11 +234,12 @@ def new_random_cpu(cls, count, size, dim: int): partition_dict[eix].append(new_port) # add "self" tensor indices to partition - for i in range(len(out.shape)): - eix = partition_fn() - new_port = Port(tensor_ref=-1, ix=i) - partition_dict[eix] = partition_dict.get(eix, []) - partition_dict[eix].append(new_port) + # commented out to debug einsum err + # for i in range(len(out.shape)): + # eix = partition_fn() + # new_port = Port(tensor_ref=-1, ix=i) + # partition_dict[eix] = partition_dict.get(eix, []) + # partition_dict[eix].append(new_port) edges = [] for i in range(edges_cnt): @@ -257,8 +263,9 @@ def __repr__(self): if __name__ == "__main__": dim = 3 tn = TensorNetwork.new_random_cpu(2, dim, 4) - slice_dict = {0: slice(0, 2), 1: slice(1, 3)} - sliced_tn = tn.slice(slice_dict) + # slice_dict = {0: slice(0, 2), 1: slice(1, 3)} # TODO: this shouldn't affect original tn + # sliced_tn = tn.slice(slice_dict) # TODO: go through debugger here to make sure that certain edges of the same port aren't being skipped + # TODO: st i can run contract on a sliced tn without it breaking # Where did I leave off? # Having trouble verifying tests, perhaps logic is incorrect but it makes sense to me @@ -271,4 +278,17 @@ def __repr__(self): contraction_info = ContractionInfo(tuple(random_indices_to_contract)) contracted_tensor = tn.contract(contraction_info) - import pdb; pdb.set_trace() \ No newline at end of file + print("success") + # import pdb; pdb.set_trace() + +""" +dae,dca->be +[(4, 4, 4), (4, 4, 4)] +((Port(tensor_ref=0, ix=1), Port(tensor_ref=1, ix=2)), (Port(tensor_ref=-1, ix=2), Port(tensor_ref=-1, ix=3)), (Port(tensor_ref=1, ix=1),), (Port(tensor_ref=0, ix=0), Port(tensor_ref=1, ix=0)), (Port(tensor_ref=0, ix=2), Port(tensor_ref=-1, ix=1)), (Port(tensor_ref=-1, ix=0),)) +2 +--Return-- +[1] > /app/scratchpad/tn_api/tn.py(160)contract()->None +-> import pdb; pdb.set_trace() +(Pdb++) np.einsum(einsum_expr, *self._tensors) +*** ValueError: einstein sum subscripts string included output subscript 'b' which never appeared in an input +""" \ No newline at end of file From efbab98f786481b29eb69f4d3d0c50536034fa05 Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 4 Dec 2023 12:03:46 +0000 Subject: [PATCH 14/15] Fix copy so that it is not in place --- scratchpad/tn_api/tn.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index 71146b7d..9def0acd 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -104,9 +104,9 @@ def slice(self, slice_dict: dict) -> 'TensorNetwork': def copy(self): new = TensorNetwork() - new._tensors = self._tensors - new._edges = self._edges - new.shape = self.shape + new._tensors = self._tensors[:] + new._edges = self._edges[:] + new.shape = self.shape[:] return new def add(self, other: "TensorNetwork | np.ndarray"): @@ -154,6 +154,7 @@ def contract(self, contraction_info: ContractionInfo) -> np.ndarray: print([t.shape for t in self._tensors]) print(self._edges) print(len(self._tensors)) + import pdb; pdb.set_trace() try: return np.einsum(einsum_expr, *self._tensors) except: @@ -235,6 +236,7 @@ def new_random_cpu(cls, count, size, dim: int): # add "self" tensor indices to partition # commented out to debug einsum err + # TODO: need to fix this einsum issue # for i in range(len(out.shape)): # eix = partition_fn() # new_port = Port(tensor_ref=-1, ix=i) @@ -263,8 +265,8 @@ def __repr__(self): if __name__ == "__main__": dim = 3 tn = TensorNetwork.new_random_cpu(2, dim, 4) - # slice_dict = {0: slice(0, 2), 1: slice(1, 3)} # TODO: this shouldn't affect original tn - # sliced_tn = tn.slice(slice_dict) # TODO: go through debugger here to make sure that certain edges of the same port aren't being skipped + slice_dict = {0: slice(0, 2), 1: slice(1, 3)} + sliced_tn = tn.slice(slice_dict) # TODO: go through debugger here to make sure that certain edges of the same port aren't being skipped # TODO: st i can run contract on a sliced tn without it breaking # Where did I leave off? @@ -276,10 +278,10 @@ def __repr__(self): random_indices_to_contract = tn._get_random_indices_to_contract(2) contraction_info = ContractionInfo(tuple(random_indices_to_contract)) - + import pdb; pdb.set_trace() contracted_tensor = tn.contract(contraction_info) print("success") - # import pdb; pdb.set_trace() + import pdb; pdb.set_trace() """ dae,dca->be From 1631afbc874a7baa417c895c9fa9f02d7806cd5f Mon Sep 17 00:00:00 2001 From: Dallon Asnes <30935722+dallonasnes@users.noreply.github.com> Date: Mon, 4 Dec 2023 12:45:52 +0000 Subject: [PATCH 15/15] Stuck --- scratchpad/tn_api/tn.py | 52 ++++++++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/scratchpad/tn_api/tn.py b/scratchpad/tn_api/tn.py index 9def0acd..aed4ceca 100644 --- a/scratchpad/tn_api/tn.py +++ b/scratchpad/tn_api/tn.py @@ -66,7 +66,7 @@ def __init__(self, *args, **kwargs): # slice not inplace def slice(self, slice_dict: dict) -> 'TensorNetwork': tn = self.copy() - + new_edge_list = [] for idx, slice_val in slice_dict.items(): # make sure idx is valid if idx >= len(tn._edges): @@ -75,7 +75,7 @@ def slice(self, slice_dict: dict) -> 'TensorNetwork': edge_list = list(tn._edges) edge = edge_list.pop(idx) # now put the updated edges back on the class - tn._edges = tuple(edge_list) + tn._edges = tuple(edge_list) # TODO @dallon - is this the issue, that i'm getting rid of all of the edges? # get all tensors indexed by this edge tensors_to_slice = set(port.tensor_ref for port in edge) # store slice index and value for each tensor @@ -98,7 +98,17 @@ def slice(self, slice_dict: dict) -> 'TensorNetwork': slice_bounds.append(slice_dict[idx]) except KeyError: slice_bounds.append(slice(None)) - tn._tensors[current_tensor_ref] = tn._tensors[current_tensor_ref][tuple(slice_bounds)] + sliced_tensor = tn._tensors[current_tensor_ref][tuple(slice_bounds)] + tn._tensors[current_tensor_ref] = sliced_tensor + + # TODO: this is just a guess - but i am adding the ports from the popped edge back to the list of slices + # for port in edge: + # if port.ix in slice_dict and port.tensor_ref == current_tensor_ref: + # new_edge_list.append(Port(tensor_ref=current_tensor_ref, ix=port.ix)) + + # # update the tensor network's edges with the new edges + # tn._edges = tuple(new_edge_list) + return tn @@ -154,11 +164,17 @@ def contract(self, contraction_info: ContractionInfo) -> np.ndarray: print([t.shape for t in self._tensors]) print(self._edges) print(len(self._tensors)) - import pdb; pdb.set_trace() try: return np.einsum(einsum_expr, *self._tensors) - except: + except Exception as e: + print(e) import pdb; pdb.set_trace() + keep_going = True + while keep_going: + einsum_expr = self._get_einsum_expr(contraction_info) + res = np.einsum(einsum_expr, *self._tensors) + import pdb; pdb.set_trace() + # for reference, see qtensor/contraction_backends/numpy.py -> get_einsum_expr def _get_einsum_expr(self, contraction_info: ContractionInfo) -> str: @@ -266,21 +282,31 @@ def __repr__(self): dim = 3 tn = TensorNetwork.new_random_cpu(2, dim, 4) slice_dict = {0: slice(0, 2), 1: slice(1, 3)} - sliced_tn = tn.slice(slice_dict) # TODO: go through debugger here to make sure that certain edges of the same port aren't being skipped + sliced_tn = tn.slice(slice_dict) + print(len(sliced_tn._edges)) + import pdb; pdb.set_trace() + # TODO: go through slice method debugger here to make sure that certain edges of the same port aren't being skipped # TODO: st i can run contract on a sliced tn without it breaking - - # Where did I leave off? - # Having trouble verifying tests, perhaps logic is incorrect but it makes sense to me + """ + # TODO: issue is that slicing results in no edges + ,-> #einsum expression + [(2, 2, 2), (2, 2, 2)] #tensor shapes + () #edges + 2 #tensors count + """ # can also do "contract all except..." by knowing indices of edges in tn # generate random indices to contract - random_indices_to_contract = tn._get_random_indices_to_contract(2) + random_indices_to_contract = sliced_tn._get_random_indices_to_contract(2) contraction_info = ContractionInfo(tuple(random_indices_to_contract)) - import pdb; pdb.set_trace() - contracted_tensor = tn.contract(contraction_info) - print("success") + print("starting not sliced tensor") + contracted_tensor_not_slice = tn.contract(contraction_info) + print("finished not sliced tensor") + print("starting sliced tensor") + contracted_tensor = sliced_tn.contract(contraction_info) + print("finished sliced tensor") import pdb; pdb.set_trace() """