forked from tinygrad/tinygrad
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_conv.py
More file actions
143 lines (120 loc) · 4.22 KB
/
test_conv.py
File metadata and controls
143 lines (120 loc) · 4.22 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import unittest
import numpy as np
from tinygrad.tensor import Tensor, Device
from tinygrad.helpers import Context
class TestConv(unittest.TestCase):
def test_simple(self):
x = Tensor.ones(1,12,128,256).contiguous().realize()
w = Tensor.ones(32,12,3,3).contiguous().realize()
ret = x.conv2d(w, stride=(2,2), padding=(1,1)).numpy()
# it's not 108 around the padding
assert (ret[:, :, 1:-1, 1:-1] == 108).all()
assert ret[0,0,0,0] == 48
assert ret[0,0,0,1] == 72
def test_simple_rand(self):
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(32,12,3,3)
x.conv2d(w, stride=(2,2), padding=(1,1)).numpy()
def test_many_simple(self):
x = Tensor(np.arange(8*2*8).reshape(1,8,2,8).astype(np.float32))
#w = Tensor(np.arange(8*8*1*1).reshape(8,8,1,1).astype(np.float32))
w = Tensor.eye(8).reshape((8,8,1,1))
ret = x.conv2d(w, stride=(1,2), padding=(0,0)).numpy()
print(ret)
def test_lazycache(self):
Tensor.no_grad = True
x = Tensor.rand(1, 32)
y = Tensor.rand(32)
out = x + y.reshape((1,32,1)).reshape((1,32)) + y.reshape((1,32,1)).reshape((1,32))
out.numpy()
Tensor.no_grad = False
def test_simple_biased(self):
C = 8
x = Tensor.rand(1,C,5,5)
w = Tensor.eye(C).reshape((C,C,1,1))
b = Tensor(np.arange(C).astype(np.float32))
ret = Tensor.conv2d(x,w,b).relu().conv2d(w,b)
print(ret.numpy())
def test_two_binops_no_rerun(self):
Tensor.no_grad = True
x = Tensor.randn(1,12,128,256)
w = Tensor.randn(32,12,3,3)
out = x.conv2d(w, stride=(2,2), padding=(1,1))
r1, r2 = out.relu(), (out-1)
np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
np.testing.assert_allclose(r2.numpy(), out.numpy() - 1)
Tensor.no_grad = False
def test_two_overlapping_binops_no_rerun(self):
Tensor.no_grad = True
x = Tensor.randn(1,12,128,256)
w = Tensor.randn(32,12,3,3)
out = x.conv2d(w, stride=(2,2), padding=(1,1))
r1, r2 = out.relu(), out.elu()
np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
np.testing.assert_allclose(r2.numpy(), np.where(out.numpy() > 0, out.numpy(), (np.exp(out.numpy()) - 1)), atol=1e-5)
Tensor.no_grad = False
@unittest.skipIf(Device.DEFAULT != "TORCH", "Takes too long to compile for Compiled backends")
def test_two_overlapping_binops_no_rerun_wino(self):
Tensor.no_grad = True
with Context(WINO=1):
x = Tensor.randn(1,4,16,16)
w = Tensor.randn(6,4,3,3)
out = x.conv2d(w, padding=(1,1))
r1, r2 = out.relu(), out.elu()
np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
np.testing.assert_allclose(r2.numpy(), np.where(out.numpy() > 0, out.numpy(), (np.exp(out.numpy()) - 1)), atol=1e-5)
Tensor.no_grad = False
def test_first_three(self):
Tensor.no_grad = True
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(32,12,3,3)
x = x.conv2d(w, stride=(2,2), padding=(1,1)).elu()
w = Tensor.rand(32,1,3,3)
x = x.conv2d(w, padding=(1,1), groups=32).elu()
w = Tensor.rand(16,32,1,1)
x = x.conv2d(w).elu()
x = x.numpy()
print(x.shape)
Tensor.no_grad = False
def test_elu(self):
Tensor.no_grad = True
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(32,12,3,3)
x = x.conv2d(w, stride=(2,2), padding=(1,1))
x = x.elu()
w = Tensor.rand(32,1,3,3)
x = x.conv2d(w, padding=(1,1), groups=32)
x.numpy()
Tensor.no_grad = False
def test_reduce_relu(self):
Tensor.no_grad = True
x = Tensor.rand(1,12,128,256)
x = x.sum(keepdim=True).relu()
x.numpy()
Tensor.no_grad = False
def test_bias(self):
Tensor.no_grad = True
from tinygrad.nn import Conv2d
x = Tensor.rand(1,12,128,256)
c = Conv2d(12, 32, 3)
x = c(x).relu()
w = Tensor.uniform(32, 1, 3, 3)
x = x.conv2d(w, groups=32)
x.numpy()
Tensor.no_grad = False
def test_multiadd(self):
w = Tensor.rand(32)
x = Tensor.rand(32).relu()
(w+x).numpy()
def test_reorder(self):
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(12,12,3,3)
x = x.conv2d(w, padding=(1,1))
print(x.shape)
x = x.reshape((1, 12, 256, 128))
x += 1
x += 1
x = x.reshape((1, 12, 128, 256))
x.numpy()
if __name__ == '__main__':
unittest.main()