forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathOnehot.cpp
44 lines (38 loc) · 1.5 KB
/
Onehot.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
#include <ATen/ATen.h>
namespace at { namespace native {
Tensor one_hot(const Tensor &self, int64_t num_classes) {
TORCH_CHECK(self.dtype() == kLong, "one_hot is only applicable to index tensor.");
auto shape = self.sizes().vec();
// empty tensor could be converted to one hot representation,
// but shape inference is not possible.
if (self.numel() == 0) {
if (num_classes <= 0) {
AT_ERROR("Can not infer total number of classes from empty tensor.");
} else {
shape.push_back(num_classes);
return at::empty(shape, self.options());
}
}
// non-empty tensor
if (self.device().type() != at::kCUDA) {
//for cuda, rely on device assert thrown by scatter
TORCH_CHECK(self.min().item().toLong() >= 0, "Class values must be non-negative.");
}
if (num_classes == -1) {
num_classes = self.max().item().toLong() + 1;
} else {
if (self.device().type() != at::kCUDA) {
//rely on device asserts from scatter to avoid sync here
TORCH_CHECK(num_classes > self.max().item().toLong(), "Class values must be smaller than num_classes.");
} else {
//for cuda, assert that num_classes is at least 1
TORCH_CHECK(num_classes >= 1, "num_classes should be positive");
}
}
shape.push_back(num_classes);
Tensor ret = at::zeros(shape, self.options());
ret.scatter_(-1, self.unsqueeze(-1), 1);
return ret;
}
} // namespace native
} // namespace at