diff --git a/doc/excuter/op-mem-cuda/list.md b/doc/excuter/op-mem-cuda/list.md index 9913a248..1d63988d 100644 --- a/doc/excuter/op-mem-cuda/list.md +++ b/doc/excuter/op-mem-cuda/list.md @@ -16,5 +16,6 @@ | newtensor | none | newtensor(vector shape)->(tensor tensor1) | T1 = zeros(shape) | newtensor(vector shape)->(tensor tensor1) | | newtensor | none | newtensor(var shape)->(tensor tensor1) | T1 = zeros(shape) | newtensor(var shape)->(tensor tensor1) | | vecset | none | vecset(vector value)->(vector name) | shape = [3 4 5] | vecset(vector value)->(vector name) | +| matmul | cublas | matmul(tensor A, tensor B)->(tensor C) | T3=T1 @ T2 | matmul(tensor A, tensor B)->(tensor C) | | sub | miaobyte | sub(tensor A, tensor B)->(tensor C) | T3=T1-T2 | sub(tensor A, tensor B)->(tensor C) | | argset | none | argset(var value)->(var name) | var argname = argvalue | argset(var value)->(var name) | diff --git a/doc/excuter/op-mem-ompsimd/list.md b/doc/excuter/op-mem-ompsimd/list.md index f10183f4..0b7d18d3 100644 --- a/doc/excuter/op-mem-ompsimd/list.md +++ b/doc/excuter/op-mem-ompsimd/list.md @@ -17,5 +17,7 @@ | newtensor | none | newtensor(vector shape)->(tensor tensor1) | T1 =Tensor(shape=[...]) | newtensor(vector shape)->(tensor tensor1) | | newtensor | none | newtensor(var shape)->(tensor tensor1) | T1 =Tensor(shape=[...]) | newtensor(var shape)->(tensor tensor1) | | vecset | none | vecset(vector value)->(vector name) | shape = [3 4 5] | vecset(vector value)->(vector name) | +| matmul | cblas | matmul(tensor A, tensor B)->(tensor C) | T3=T1 @ T2 | matmul(tensor A, tensor B)->(tensor C) | +| matmul | miaobyte | matmul(tensor A, tensor B)->(tensor C) | T3=T1 @ T2 | matmul(tensor A, tensor B)->(tensor C) | | sub | miaobyte | sub(tensor a, tensor b)->(tensor c) | T3=T1-T2 | sub(tensor a, tensor b)->(tensor c) | | argset | none | argset(var value)->(var name) | var argname = argvalue | argset(var value)->(var name) | diff --git a/excuter/cpp-common/src/deepx/tensorfunc/matmul.hpp b/excuter/cpp-common/src/deepx/tensorfunc/matmul.hpp index 7e95f24c..ca844f4b 100644 --- a/excuter/cpp-common/src/deepx/tensorfunc/matmul.hpp +++ b/excuter/cpp-common/src/deepx/tensorfunc/matmul.hpp @@ -3,7 +3,7 @@ #include "deepx/tensor.hpp" #include "deepx/tensorfunc/authors.hpp" - +#include "stdutil/error.hpp" namespace deepx::tensorfunc { bool check_matmul_shape(const Shape &a, const Shape &b) @@ -29,7 +29,10 @@ namespace deepx::tensorfunc template struct matmulDispatcher { - static void matmul(const Tensor &A, const Tensor &B, Tensor &C) = delete; + static void matmul(const Tensor &A, const Tensor &B, Tensor &C) + { + throw NotImplementError("matmul"); + } }; template diff --git a/excuter/op-mem-cuda/src/client/tfs.cpp b/excuter/op-mem-cuda/src/client/tfs.cpp index cfcbec3b..fa83af24 100644 --- a/excuter/op-mem-cuda/src/client/tfs.cpp +++ b/excuter/op-mem-cuda/src/client/tfs.cpp @@ -4,6 +4,7 @@ #include "deepx/tf/print.hpp" #include "deepx/tf/init.hpp" #include "deepx/tf/elementwise_basic.hpp" +#include "deepx/tf/matmul.hpp" #include "deepx/dtype.hpp" #include "deepx/tf/tffactory.hpp" #include "deepx/tensorfunc/authors.hpp" @@ -173,12 +174,19 @@ namespace deepx::tf // opfactory.add_op(Powscalar_miaobyte()); // opfactory.add_op(Powscalar_miaobyte()); } - // // matmul - // void register_matmul(OpFactory &opfactory) - // { - // opfactory.add_op(MatMul()); - // opfactory.add_op(MatMul()); - // } + // matmul + void register_matmul(TfFactory &tffactory) + { + tffactory.add_tf(std::make_shared>(vector( + { + Param("A", DataCategory::Tensor, Precision::Any), + Param("B", DataCategory::Tensor, Precision::Any), + }), + vector( + { + Param("C", DataCategory::Tensor, Precision::Any), + }))); + } // // changeshape void register_changeshape(TfFactory &tffactory) { @@ -207,7 +215,7 @@ namespace deepx::tf register_init(tffactory); register_util(tffactory); register_elementwise(tffactory); - // register_matmul(opfactory); + register_matmul(tffactory); register_changeshape(tffactory); // register_reduce(opfactory); return 0; diff --git a/excuter/op-mem-cuda/src/deepx/tf/matmul.hpp b/excuter/op-mem-cuda/src/deepx/tf/matmul.hpp new file mode 100644 index 00000000..76c444fd --- /dev/null +++ b/excuter/op-mem-cuda/src/deepx/tf/matmul.hpp @@ -0,0 +1,88 @@ +#ifndef DEEPX_TF_MATMUL_HPP +#define DEEPX_TF_MATMUL_HPP + +#include +#include + +#include "deepx/tf/tf.hpp" +#include "deepx/dtype.hpp" +#include "deepx/dtype_cuda.hpp" +#include "deepx/tensorfunc/matmul_cublas.hpp" + +namespace deepx::tf +{ + template + class MatMul : public TF + { + public: + MatMul(const vector &args, const vector &returns) + { + this->name = "matmul"; + this->author = Author::name(); + this->args = args; + this->returns = returns; + } + + MatMul(string text) + { + this->parse(text); + this->author = Author::name(); + if (this->name != "matmul") + { + throw std::runtime_error("Invalid name: " + this->name); + } + } + string math_formula() const override + { + return "T3=T1 @ T2"; + } + shared_ptr clone() const override + { + return make_shared>(*this); + } + int run(shared_ptr mem, string &error) override + { + Precision a_type = mem->gettensor(this->args[0].textvalue).get()->shape.dtype; + Precision b_type = mem->gettensor(this->args[1].textvalue).get()->shape.dtype; + Precision c_type = mem->gettensor(this->returns[0].textvalue).get()->shape.dtype; + if (a_type != b_type || a_type != c_type) + { + error = "Type mismatch: " + precision_str(a_type) + " != " + precision_str(b_type) + " != " + precision_str(c_type); + return 1; + } + switch (a_type) + { + case Precision::Float64: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Float32: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Float16: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::BFloat16: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Int64: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Int32: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Int16: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Int8: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + default: + error = "Unsupported dtype: " + precision_str(a_type); + return 1; + } + return 0; + } + }; +} + +#endif diff --git a/excuter/op-mem-ompsimd/src/client/tfs.cpp b/excuter/op-mem-ompsimd/src/client/tfs.cpp index b2de5145..4eab0c4d 100644 --- a/excuter/op-mem-ompsimd/src/client/tfs.cpp +++ b/excuter/op-mem-ompsimd/src/client/tfs.cpp @@ -8,7 +8,7 @@ #include "deepx/tf/changeshape.hpp" #include "deepx/tf/elementwise.hpp" #include "deepx/tf/tffactory.hpp" - +#include "deepx/tf/matmul.hpp" #include "deepx/tensorfunc/authors.hpp" namespace deepx::tf { @@ -186,12 +186,28 @@ namespace deepx::tf // opfactory.add_op(Powscalar_miaobyte()); // opfactory.add_op(Powscalar_miaobyte()); } - // // matmul - // void register_matmul(OpFactory &opfactory) - // { - // opfactory.add_op(MatMul()); - // opfactory.add_op(MatMul()); - // } + // matmul + void register_matmul(TfFactory &tffactory) + { + tffactory.add_tf(std::make_shared>(vector( + { + Param("A", DataCategory::Tensor, Precision::Any), + Param("B", DataCategory::Tensor, Precision::Any), + }), + vector( + { + Param("C", DataCategory::Tensor, Precision::Any), + }))); + tffactory.add_tf(std::make_shared>(vector( + { + Param("A", DataCategory::Tensor, Precision::Float64|Precision::Float32), + Param("B", DataCategory::Tensor, Precision::Float64|Precision::Float32), + }), + vector( + { + Param("C", DataCategory::Tensor, Precision::Float64|Precision::Float32), + }))); + } // // changeshape void register_changeshape(TfFactory &tffactory) { @@ -220,7 +236,7 @@ namespace deepx::tf register_init(tffactory); register_util(tffactory); register_elementwise(tffactory); - // register_matmul(opfactory); + register_matmul(tffactory); register_changeshape(tffactory); // register_reduce(opfactory); return 0; diff --git a/excuter/op-mem-ompsimd/src/deepx/tensorfunc/matmul_cblas.hpp b/excuter/op-mem-ompsimd/src/deepx/tensorfunc/matmul_cblas.hpp index 8f6401c7..1e1371f6 100644 --- a/excuter/op-mem-ompsimd/src/deepx/tensorfunc/matmul_cblas.hpp +++ b/excuter/op-mem-ompsimd/src/deepx/tensorfunc/matmul_cblas.hpp @@ -1,5 +1,5 @@ -#ifndef DEEPX_TENSORFUNC_MATMUL_HPP -#define DEEPX_TENSORFUNC_MATMUL_HPP +#ifndef DEEPX_TENSORFUNC_MATMUL_CBLAS_HPP +#define DEEPX_TENSORFUNC_MATMUL_CBLAS_HPP #include // 如果使用 OpenBLAS #include "deepx/tensor.hpp" @@ -64,7 +64,7 @@ namespace deepx::tensorfunc { static void matmul(const Tensor &a, const Tensor &b, Tensor &c) { - if (!check_shape(a.shape, b.shape)) + if (!check_matmul_shape(a.shape, b.shape)) { throw std::invalid_argument("a.shape could matmul with b.shape"); } @@ -150,7 +150,7 @@ namespace deepx::tensorfunc { static void matmuladd(const Tensor &a, const Tensor &b, const float &alpha, const float &beta, Tensor &c) { - if (!check_shape(a.shape, b.shape)) + if (!check_matmul_shape(a.shape, b.shape)) { throw std::invalid_argument("a.shape could matmul with b.shape"); } @@ -208,7 +208,7 @@ namespace deepx::tensorfunc { static void matmuladd(const Tensor &a, const Tensor &b, const double &alpha, const double &beta, Tensor &c) { - if (!check_shape(a.shape, b.shape)) + if (!check_matmul_shape(a.shape, b.shape)) { throw std::invalid_argument("a.shape could matmul with b.shape"); } @@ -261,4 +261,4 @@ namespace deepx::tensorfunc } }; } -#endif // DEEPX_TENSORFUNC_MATMUL_HPP \ No newline at end of file +#endif // DEEPX_TENSORFUNC_MATMUL_CBLAS_HPP \ No newline at end of file diff --git a/excuter/op-mem-ompsimd/src/deepx/tf/matmul.hpp b/excuter/op-mem-ompsimd/src/deepx/tf/matmul.hpp new file mode 100644 index 00000000..77d61208 --- /dev/null +++ b/excuter/op-mem-ompsimd/src/deepx/tf/matmul.hpp @@ -0,0 +1,80 @@ +#ifndef DEEPX_TF_MATMUL_HPP +#define DEEPX_TF_MATMUL_HPP + +#include "deepx/tf/tf.hpp" +#include "deepx/dtype.hpp" +#include "deepx/dtype_ompsimd.hpp" +#include "deepx/tensorfunc/matmul.hpp" +#include "deepx/tensorfunc/matmul_cblas.hpp" +#include "deepx/tensorfunc/matmul_miaobyte.hpp" +namespace deepx::tf +{ + template + class MatMul : public TF + { + public: + MatMul(const vector &args, const vector &returns) + { + this->name = "matmul"; + this->author = Author::name(); + this->args = args; + this->returns = returns; + } + + MatMul(string text) + { + this->parse(text); + this->author = Author::name(); + if (this->name != "matmul") + { + throw std::runtime_error("Invalid name: " + this->name); + } + } + string math_formula() const override + { + return "T3=T1 @ T2"; + } + shared_ptr clone() const override + { + return make_shared>(*this); + } + int run(shared_ptr mem, string &error) override + { + Precision a_type = mem->gettensor(this->args[0].textvalue).get()->shape.dtype; + Precision b_type = mem->gettensor(this->args[1].textvalue).get()->shape.dtype; + Precision c_type = mem->gettensor(this->returns[0].textvalue).get()->shape.dtype; + if (a_type != b_type || a_type != c_type) + { + error = "Type mismatch: " + precision_str(a_type) + " != " + precision_str(b_type) + " != " + precision_str(c_type); + return 1; + } + switch (a_type) + { + case Precision::Float64: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Float32: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Int64: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Int32: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Int16: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + case Precision::Int8: + tensorfunc::matmul(*mem->gettensor(this->args[0].textvalue), *mem->gettensor(this->args[1].textvalue), *mem->gettensor(this->returns[0].textvalue)); + break; + default: + error = "Unsupported dtype: " + precision_str(a_type); + return 1; + } + return 0; + } + }; +} + +#endif diff --git a/front/py/deepx/nn/functional/elementwise.py b/front/py/deepx/nn/functional/elementwise.py index cd12979c..ecf3c0c3 100644 --- a/front/py/deepx/nn/functional/elementwise.py +++ b/front/py/deepx/nn/functional/elementwise.py @@ -1,13 +1,13 @@ from typing import Optional, Union from deepx import Tensor from deepx.autograd import Graph,DataNode,OpNode -from deepx.nn import DeepxIR +from deepx.nn import DeepxIR,Param from deepx.scheduler import send from .changeshape import broadcast_shape def _A_elementwiseop_C( a:Tensor, op:str=None, - out:Union[Tensor,str]="")->Tensor: + out:Union[Tensor,str]="",author='miaobyte')->Tensor: g=a.graph opnode = g.add_op(op) @@ -20,7 +20,7 @@ def _A_elementwiseop_C( outtensor=out outtensor.node.add_input(opnode) if g.eager: - ir=DeepxIR(op, a.dtype, [a.node.name], [outtensor.node.name]) + ir=DeepxIR(op, [a.node.name], [outtensor.node.name],author) send(ir) return outtensor @@ -28,7 +28,7 @@ def _A_B_elementwiseop_C( a:Tensor, b: Tensor, op:str=None, - out:Union[Tensor,str]="")->Tensor: + out:Union[Tensor,str]="",author='miaobyte')->Tensor: g=a.graph if g is None: g=b.graph @@ -53,14 +53,14 @@ def _A_B_elementwiseop_C( outtensor=out outtensor.node.add_input(opnode) if g.eager: - ir=DeepxIR(op, A.dtype, [A.node.name, B.node.name], [outtensor.node.name]) + ir=DeepxIR(op, [A.node.name, B.node.name], [outtensor.node.name],author) send(ir) return outtensor def _A_b_elementwiseop_C( a:Tensor, b: Union[ float, int] , op:str=None, - out:Union[Tensor,str]="")->Tensor: + out:Union[Tensor,str]="",author='miaobyte')->Tensor: g=a.graph opnode = g.add_op(op) opnode.add_input(a.node) @@ -74,14 +74,14 @@ def _A_b_elementwiseop_C( outtensor=out outtensor.node.add_input(opnode) if g.eager: - ir=DeepxIR(op, a.dtype, [a.node.name,b], [outtensor.node.name]) + ir=DeepxIR(op, [a.node.name,b], [outtensor.node.name],author) send(ir) return outtensor def _a_B_elementwiseop_C( a: Union[ float, int] , b: Tensor, op:str=None, - out:Union[Tensor,str]="")->Tensor: + out:Union[Tensor,str]="",author='miaobyte')->Tensor: g=b.graph opnode = g.add_op(op) opnode.add_input(g.add_var("",a)) @@ -95,7 +95,7 @@ def _a_B_elementwiseop_C( outtensor=out outtensor.node.add_input(opnode) if g.eager: - ir=DeepxIR(op, b.dtype, [a,b.node.name], [outtensor.node.name]) + ir=DeepxIR(op, [a,b.node.name], [outtensor.node.name],author) send(ir) return outtensor @@ -106,7 +106,7 @@ def _a_B_elementwiseop_C( def add( a:Tensor, b: Optional[Union[Tensor, float, int]] = None, - out:Union[Tensor,str]='')->Tensor: + out:Union[Tensor,str]='',author='miaobyte')->Tensor: if isinstance(b,Tensor): return _A_B_elementwiseop_C(a,b,"add",out) else: @@ -120,7 +120,7 @@ def add( def sub( a:Tensor, b: Optional[Union[Tensor, float, int]] = None, - out:Union[Tensor,str]='')->Tensor: + out:Union[Tensor,str]='',author='miaobyte')->Tensor: if isinstance(b,Tensor): return _A_B_elementwiseop_C(a,b,"sub",out) else: @@ -133,7 +133,7 @@ def sub( def mul( a:Tensor, b: Optional[Union[Tensor, float, int]] = None, - out:Union[Tensor,str]='')->Tensor: + out:Union[Tensor,str]='',author='miaobyte')->Tensor: if isinstance(b,Tensor): return _A_B_elementwiseop_C(a,b,"mul",out) else: @@ -147,7 +147,7 @@ def mul( def div( a: Optional[Union[Tensor, float, int]] = None, b: Optional[Union[Tensor, float, int]] = None, - out:Union[Tensor,str]='')->Tensor: + out:Union[Tensor,str]='',author='miaobyte')->Tensor: if isinstance(b,Tensor) and isinstance(a,Tensor): return _A_B_elementwiseop_C(a,b,"div",out) else: diff --git a/front/py/deepx/nn/functional/init.py b/front/py/deepx/nn/functional/init.py index 182e696d..bb5b8f40 100644 --- a/front/py/deepx/nn/functional/init.py +++ b/front/py/deepx/nn/functional/init.py @@ -3,19 +3,19 @@ from deepx import Tensor from deepx.autograd.graph import OpNode -from deepx.nn.deepxir import DeepxIR +from deepx.nn.deepxir import DeepxIR,Param from deepx.scheduler import send OpNode.register("constant") def constant(t:Tensor, value:Optional[Union[ - float,int]]=None) -> Tensor: + float,int]]=None,author='miaobyte') -> Tensor: opnode = t.graph.add_op("constant") argnode=t.graph.add_var('',value) opnode.add_input(argnode) t.node.add_input(opnode) if t.graph.eager: - ir=DeepxIR("constant", t.dtype, [value], [t.node.name]) + ir=DeepxIR("constant", [Param(t.node.name, 'tensor', t.dtype),Param(value)], [],author) send(ir) return t @@ -39,7 +39,7 @@ def ones(*size, dtype=None, device=None, name:Union[str]='')->Tensor: return full(*size, value=1, dtype=dtype, device=device,name=name) -def arange(start=0, end=None, step=1,dtype=None, device=None,name:Union[Tensor,str]='')->Tensor: +def arange(start=0, end=None, step=1,dtype=None, device=None,name:Union[Tensor,str]='',author='miaobyte')->Tensor: outtensor=None if isinstance(name,str): shape=[end-start] @@ -49,12 +49,12 @@ def arange(start=0, end=None, step=1,dtype=None, device=None,name:Union[Tensor,s outtensor=name g=outtensor.graph if g.eager: - ir=DeepxIR("arange", outtensor.dtype, [start,step], [outtensor.node.name]) + ir=DeepxIR("arange", [outtensor.node.name,start,step], [],author) send(ir) return outtensor OpNode.register("uniform") -def uniform(t:Tensor,low=0, high=1,seed:int=0)->Tensor: +def uniform(t:Tensor,low=0, high=1,seed:int=0,author='miaobyte')->Tensor: if low >= high: raise ValueError(f"low({low})必须小于high({high})") if t is None: @@ -68,7 +68,7 @@ def uniform(t:Tensor,low=0, high=1,seed:int=0)->Tensor: opnode.add_input(g.add_var('',seed)) t.node.add_input(opnode) if t.graph.eager: - ir=DeepxIR("uniform", t.dtype, [low, high,seed], [t.node.name]) + ir=DeepxIR("uniform", [t.node.name,low, high,seed], [],author) send(ir) return t diff --git a/front/py/deepx/nn/functional/matmul.py b/front/py/deepx/nn/functional/matmul.py index 0609cb0d..4a788f9f 100644 --- a/front/py/deepx/nn/functional/matmul.py +++ b/front/py/deepx/nn/functional/matmul.py @@ -11,7 +11,8 @@ def matmul( a:Tensor, b: Tensor, - out:Union[Tensor,str]='')->Tensor: + out:Union[Tensor,str]='', + author:str='cublas'): opnode = a.graph.add_op("matmul") opnode.add_input(a.node) opnode.add_input(b.node) @@ -25,6 +26,6 @@ def matmul( outtensor=out outtensor.node.add_input(opnode) if a.graph.eager: - ir=DeepxIR("matmul", a.dtype, [a.node.name,b.node.name], [outtensor.node.name]) + ir=DeepxIR("matmul", [a.node.name,b.node.name], [outtensor.node.name], author=author) send(ir) return outtensor diff --git a/front/py/deepx/scheduler/client/udpconn.py b/front/py/deepx/scheduler/client/udpconn.py index a25b0963..6a12c26a 100644 --- a/front/py/deepx/scheduler/client/udpconn.py +++ b/front/py/deepx/scheduler/client/udpconn.py @@ -3,7 +3,7 @@ import select class UDPConn: - def __init__(self, endpoint: str = "localhost:8080"): + def __init__(self, endpoint: str = "localhost:9090"): # 解析endpoint self._host, port_str = endpoint.split(':') self._port = int(port_str) diff --git a/front/py/examples/2_ir/1_init_zeroones.dot b/front/py/examples/2_ir/1_init_zeroones.dot index a78b90cc..33d54af8 100644 --- a/front/py/examples/2_ir/1_init_zeroones.dot +++ b/front/py/examples/2_ir/1_init_zeroones.dot @@ -2,55 +2,55 @@ digraph { rankdir=TB node [shape=record] - 134462353640752 [label="t1 + 132815942520016 [label="t1 (3, 4, 5)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460378946608 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] - 134460378946848 [label="var_1 + 132813646230768 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 132814271881056 [label="var_1 0" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377149792 [label="t2 + 132813645298272 [label="t2 (3, 4, 5)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377149936 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] - 134460377150032 [label="var_2 + 132813645298464 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 132813645298080 [label="var_2 1" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377150272 [label=add color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] - 134460377150464 [label="t3 + 132813645298704 [label=add color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 132813645298512 [label="t3 (3, 4, 5)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377150560 [label="t4 + 132813645298800 [label="t4 (3, 4, 5)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377150800 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] - 134460377150752 [label="var_3 + 132813645299136 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 132813645299088 [label="var_3 0.5" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377150896 [label=add color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] - 134460377151040 [label="t5 + 132813645298944 [label=add color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 132813645299424 [label="t5 (3, 4, 5)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377151472 [label="tensor_6 + 132813645299664 [label="tensor_6 (3, 4, 5)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377151568 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] - 134460377151520 [label="var_4 + 132813645293616 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 132813645293664 [label="var_4 0" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377151712 [label=uniform color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] - 134460377151232 [label="var_5 + 132813645293280 [label=uniform color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 132813645299616 [label="var_5 -0.5477225575051661" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377151904 [label="var_6 + 132813645293568 [label="var_6 0.5477225575051661" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460377151856 [label="var_7 + 132813645293424 [label="var_7 0" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] - 134460378946608 -> 134462353640752 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460378946848 -> 134460378946608 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377149936 -> 134460377149792 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377150032 -> 134460377149936 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134462353640752 -> 134460377150272 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377149792 -> 134460377150272 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377150272 -> 134460377150464 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377150800 -> 134460377150560 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377150752 -> 134460377150800 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377150560 -> 134460377150896 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377150464 -> 134460377150896 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377150896 -> 134460377151040 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377151568 -> 134460377151472 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377151712 -> 134460377151472 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377151520 -> 134460377151568 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377151232 -> 134460377151712 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377151904 -> 134460377151712 [arrowsize=0.8 color=gray40 penwidth=1.2] - 134460377151856 -> 134460377151712 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813646230768 -> 132815942520016 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132814271881056 -> 132813646230768 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645298464 -> 132813645298272 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645298080 -> 132813645298464 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132815942520016 -> 132813645298704 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645298272 -> 132813645298704 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645298704 -> 132813645298512 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645299136 -> 132813645298800 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645299088 -> 132813645299136 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645298800 -> 132813645298944 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645298512 -> 132813645298944 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645298944 -> 132813645299424 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645293616 -> 132813645299664 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645293280 -> 132813645299664 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645293664 -> 132813645293616 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645299616 -> 132813645293280 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645293568 -> 132813645293280 [arrowsize=0.8 color=gray40 penwidth=1.2] + 132813645293424 -> 132813645293280 [arrowsize=0.8 color=gray40 penwidth=1.2] } diff --git a/front/py/examples/2_ir/1_init_zeroones.dot.svg b/front/py/examples/2_ir/1_init_zeroones.dot.svg index ffd2506d..474bbbd8 100644 --- a/front/py/examples/2_ir/1_init_zeroones.dot.svg +++ b/front/py/examples/2_ir/1_init_zeroones.dot.svg @@ -9,244 +9,244 @@ %3 - + -134462353640752 +132815942520016 t1 (3, 4, 5) - + -134460377150272 +132813645298704 add - + -134462353640752->134460377150272 +132815942520016->132813645298704 - + -134460378946608 +132813646230768 constant - + -134460378946608->134462353640752 +132813646230768->132815942520016 - + -134460378946848 +132814271881056 var_1 0 - + -134460378946848->134460378946608 +132814271881056->132813646230768 - + -134460377149792 +132813645298272 t2 (3, 4, 5) - + -134460377149792->134460377150272 +132813645298272->132813645298704 - + -134460377149936 +132813645298464 constant - + -134460377149936->134460377149792 +132813645298464->132813645298272 - + -134460377150032 +132813645298080 var_2 1 - + -134460377150032->134460377149936 +132813645298080->132813645298464 - + -134460377150464 +132813645298512 t3 (3, 4, 5) - + -134460377150272->134460377150464 +132813645298704->132813645298512 - + -134460377150896 +132813645298944 add - + -134460377150464->134460377150896 +132813645298512->132813645298944 - + -134460377150560 +132813645298800 t4 (3, 4, 5) - + -134460377150560->134460377150896 +132813645298800->132813645298944 - + -134460377150800 +132813645299136 constant - + -134460377150800->134460377150560 +132813645299136->132813645298800 - + -134460377150752 +132813645299088 var_3 0.5 - + -134460377150752->134460377150800 +132813645299088->132813645299136 - + -134460377151040 +132813645299424 t5 (3, 4, 5) - + -134460377150896->134460377151040 +132813645298944->132813645299424 - + -134460377151472 +132813645299664 tensor_6 (3, 4, 5) - + -134460377151568 +132813645293616 constant - + -134460377151568->134460377151472 +132813645293616->132813645299664 - + -134460377151520 +132813645293664 var_4 0 - + -134460377151520->134460377151568 +132813645293664->132813645293616 - + -134460377151712 +132813645293280 uniform - + -134460377151712->134460377151472 +132813645293280->132813645299664 - + -134460377151232 +132813645299616 var_5 -0.5477225575051661 - + -134460377151232->134460377151712 +132813645299616->132813645293280 - + -134460377151904 +132813645293568 var_6 0.5477225575051661 - + -134460377151904->134460377151712 +132813645293568->132813645293280 - + -134460377151856 +132813645293424 var_7 0 - + -134460377151856->134460377151712 +132813645293424->132813645293280 diff --git a/front/py/examples/2_ir/3_matmul.dot b/front/py/examples/2_ir/3_matmul.dot new file mode 100644 index 00000000..c8e3c65f --- /dev/null +++ b/front/py/examples/2_ir/3_matmul.dot @@ -0,0 +1,25 @@ +// Computational Graph +digraph { + rankdir=TB + node [shape=record] + 135175655853216 [label="t1 +(3, 4)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] + 135173962560752 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 135173963166624 [label="var_1 +1" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] + 135173961432896 [label="t2 +(4, 5)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] + 135173961432704 [label=constant color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 135173961432224 [label="var_2 +1" color=orange fillcolor=moccasin fontname="Sans-Serif" labeljust=l shape=box style=filled] + 135173961432464 [label=matmul color=darkslategray fillcolor=lightgray fontname="Courier Bold" labeljust=l shape=box style=filled] + 135173961432416 [label="tensor_3 +(3, 5)" color=skyblue fillcolor=aliceblue fontname="Sans-Serif" labeljust=l shape=box style=filled] + 135173962560752 -> 135175655853216 [arrowsize=0.8 color=gray40 penwidth=1.2] + 135173963166624 -> 135173962560752 [arrowsize=0.8 color=gray40 penwidth=1.2] + 135173961432704 -> 135173961432896 [arrowsize=0.8 color=gray40 penwidth=1.2] + 135173961432224 -> 135173961432704 [arrowsize=0.8 color=gray40 penwidth=1.2] + 135175655853216 -> 135173961432464 [arrowsize=0.8 color=gray40 penwidth=1.2] + 135173961432896 -> 135173961432464 [arrowsize=0.8 color=gray40 penwidth=1.2] + 135173961432464 -> 135173961432416 [arrowsize=0.8 color=gray40 penwidth=1.2] +} diff --git a/front/py/examples/2_ir/3_matmul.dot.svg b/front/py/examples/2_ir/3_matmul.dot.svg new file mode 100644 index 00000000..c3cffc0b --- /dev/null +++ b/front/py/examples/2_ir/3_matmul.dot.svg @@ -0,0 +1,108 @@ + + + + + + +%3 + + + +135175655853216 + +t1 +(3, 4) + + + +135173961432464 + +matmul + + + +135175655853216->135173961432464 + + + + + +135173962560752 + +constant + + + +135173962560752->135175655853216 + + + + + +135173963166624 + +var_1 +1 + + + +135173963166624->135173962560752 + + + + + +135173961432896 + +t2 +(4, 5) + + + +135173961432896->135173961432464 + + + + + +135173961432704 + +constant + + + +135173961432704->135173961432896 + + + + + +135173961432224 + +var_2 +1 + + + +135173961432224->135173961432704 + + + + + +135173961432416 + +tensor_3 +(3, 5) + + + +135173961432464->135173961432416 + + + + +