-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathgpu_example.py
More file actions
116 lines (76 loc) · 2.58 KB
/
gpu_example.py
File metadata and controls
116 lines (76 loc) · 2.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import torch
from torch.autograd import Variable
import poly
import numpy as np
import time
torch.set_num_threads(4)
def gpu_test():
dtype = torch.cuda.FloatTensor
x = torch.linspace(-1, 1, 100).type(dtype)
order = 30
vand_torch = poly.legendre(x, order)
print("torch done")
x = x.cpu()
vand = np.polynomial.legendre.legvander(x.data.numpy(), order)
vand_torch = vand_torch.cpu()
difference = np.linalg.norm(vand - vand_torch.data.numpy())
print("difference = ", difference)
assert difference < 1e-5, "pytorch and numpy.legendre not same"
def gpu_time(N, order):
dtype = torch.cuda.FloatTensor
x = torch.linspace(-1, 1, N).type(dtype)
vand_torch = poly.legendre(x, order)
return vand_torch
# def gpu_time_nd(order, dim, x):
# dtype = torch.cuda.FloatTensor
# ptorch = MultiLegendre(dim, order)
# vand_torch = ptorch(Variable(x.type(dtype)))
# return vand_torch
def cpu_time(N, order):
dtype = torch.FloatTensor
x = torch.linspace(-1, 1, N).type(dtype)
vand_torch = poly.legendre(x, order)
return vand_torch
# def cpu_time_nd(order, dim, x):
# dtype = torch.FloatTensor
# ptorch = MultiLegendre(dim, order)
# vand_torch = ptorch(Variable(x.type(dtype)))
# return vand_torch
def numpy_time(N , order):
xnump = np.linspace(-1, 1, N)
vand = np.polynomial.legendre.legvander(xnump, order)
return vand
if __name__ == "__main__":
gpu_test()
N = 1000000
order = 200
print("Univariate ")
start = time.clock()
gpu_vand = gpu_time(N, order)
end = time.clock()
print("GPU Elapsed time = ", end - start)
start = time.clock()
cpu_vand = cpu_time(N, order)
end = time.clock()
print("CPU Elapsed time = ", end - start)
start = time.clock()
numpy_vand = numpy_time(N, order)
end = time.clock()
print("Numpy Elapsed time = ", end - start)
diff = torch.norm(gpu_vand.cpu() - cpu_vand) / torch.norm(cpu_vand)
print("diff = ", diff)
# print("Multivariate (warning on CPU takes almost a minute )")
# dim = 10
# order = 5
# N = 100000
# x = torch.rand(N, dim) * 2.0 - 1.0
# start = time.clock()
# gpu_vand = gpu_time_nd(order, dim, x)
# end = time.clock()
# print("Multivariate GPU Elapsed time = ", end - start)
# start = time.clock()
# cpu_vand = cpu_time_nd(order, dim, x)
# end = time.clock()
# print("Multivariate CPU Elapsed time = ", end - start)
# diff = torch.norm(gpu_vand.cpu() - cpu_vand) / torch.norm(cpu_vand)
# print("diff = ", diff)