forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathStorage.cpp
More file actions
47 lines (40 loc) · 1.02 KB
/
Storage.cpp
File metadata and controls
47 lines (40 loc) · 1.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
#define __STDC_FORMAT_MACROS
#include "torch/csrc/python_headers.h"
#ifdef _MSC_VER
#include <Windows.h>
#endif
#include <structmember.h>
#define THP_HOST_HALF
#include <stdbool.h>
#include <TH/TH.h>
// See Note [TH abstraction violation]
// - Used to get at the allocator associated with a storage
#include <TH/THStorageFunctions.hpp>
#include <libshm.h>
#include "THP.h"
#include "copy_utils.h"
#include "DynamicTypes.h"
#ifdef USE_CUDA
#include <THC/THCStorage.hpp>
#endif
#include "generic/Storage.cpp"
#include <TH/THGenerateAllTypes.h>
#include "generic/Storage.cpp"
#include <TH/THGenerateHalfType.h>
// NB: If you ever divest libtorch of USE_CUDA, you'll have to virtualize
// the CUDA call.
template<>
void THPPointer<THStorage>::free() {
if (ptr) {
if (ptr->data_ptr().device().is_cpu()) {
THStorage_free(ptr);
} else {
AT_ASSERT(ptr->data_ptr().device().is_cuda());
#ifdef USE_CUDA
THStorage_free(ptr);
#else
AT_ERROR("Cannot free THCStorage when not built with CUDA");
#endif
}
}
}