forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathTensorImpl.h
3249 lines (2928 loc) · 112 KB
/
TensorImpl.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#pragma once
#include <c10/core/Allocator.h>
#include <c10/core/Device.h>
#include <c10/core/DeviceType.h>
#include <c10/core/DispatchKey.h>
#include <c10/core/DispatchKeySet.h>
#include <c10/core/InferenceMode.h>
#include <c10/core/Layout.h>
#include <c10/core/MemoryFormat.h>
#include <c10/core/ScalarType.h>
#include <c10/core/ScalarTypeToTypeMeta.h>
#include <c10/core/Storage.h>
#include <c10/core/SymBool.h>
#include <c10/core/SymInt.h>
#include <c10/core/SymIntArrayRef.h>
#include <c10/core/SymbolicShapeMeta.h>
#include <c10/core/WrapDimMinimal.h>
#include <c10/core/impl/PyObjectSlot.h>
#include <c10/core/impl/SizesAndStrides.h>
#include <c10/macros/Export.h>
#include <c10/macros/Macros.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/DimVector.h>
#include <c10/util/Exception.h>
#include <c10/util/Flags.h>
#include <c10/util/Optional.h>
#include <c10/util/accumulate.h>
#include <c10/util/intrusive_ptr.h>
#include <c10/util/irange.h>
#include <c10/util/safe_numerics.h>
#include <c10/util/typeid.h>
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
// A global boolean variable to control whether we free memory when a Tensor
// is shrunk to a smaller size. As a result, a Tensor is always going to
// keep the memory allocated for its maximum capacity reshaped to so far.
//
// This parameter is respected "upper-case" methods which call Resize()
// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_
// or ShrinkTo, both of which guarantee to never to free memory.
C10_DECLARE_bool(caffe2_keep_on_shrink);
// Since we can have high variance in blob memory allocated across different
// inputs in the same run, we will shrink the blob only if the memory gain
// is larger than this flag in bytes. This only applies to functions which
// respect caffe2_keep_on_shrink.
C10_DECLARE_int64(caffe2_max_keep_on_shrink_memory);
namespace at {
class Tensor;
class TensorBase;
} // namespace at
namespace c10 {
/**
* A utility function to convert vector<int> to vector<int64_t>.
*/
inline std::vector<int64_t> ToVectorint64_t(const ArrayRef<int>& src) {
return std::vector<int64_t>(src.begin(), src.end());
}
/**
* Return product of all dimensions starting from k
*/
inline int64_t size_from_dim_(int k, IntArrayRef dims) {
int64_t r = 1;
for (const auto i : c10::irange(k, dims.size())) {
r *= dims[i];
}
return r;
}
// Product of all dims up to k (not including dims[k])
inline int64_t size_to_dim_(int k, IntArrayRef dims) {
TORCH_CHECK(k >= 0 && static_cast<size_t>(k) <= dims.size());
int64_t r = 1;
for (const auto i : c10::irange(k)) {
r *= dims[i];
}
return r;
}
// Product of all dims between k and l (not including dims[k] and dims[l])
inline int64_t size_between_dim_(int k, int l, IntArrayRef dims) {
TORCH_CHECK((unsigned)l < dims.size() && (unsigned)k < dims.size());
int64_t r = 1;
if (k < l) {
for (int i = k + 1; i < l; ++i) {
r *= dims[i];
}
} else {
for (int i = l + 1; i < k; ++i) {
r *= dims[i];
}
}
return r;
}
// Wrap around axis_index if it is negative, s.t., -1 is the last dim
inline int canonical_axis_index_(int axis_index, int ndims) {
TORCH_CHECK(axis_index >= -ndims);
TORCH_CHECK(axis_index < ndims);
if (axis_index < 0) {
return axis_index + ndims;
}
return axis_index;
}
using PlacementDtor = void (*)(void*, size_t);
/*
* A Context that will call extra placement deleter during
* deconstruction.
*
* Accept a already constructed DataPtr and store it as member
* during destruction, we'll call extra deleter on the underlying
* data pointer before the DataPtr is destructed.
* `data_ptr_` owns the memory.
*/
struct C10_API PlacementDeleteContext {
DataPtr data_ptr_;
PlacementDtor placement_dtor_;
size_t size_;
PlacementDeleteContext(
DataPtr&& data_ptr,
PlacementDtor placement_dtor,
size_t size)
: data_ptr_(std::move(data_ptr)),
placement_dtor_(placement_dtor),
size_(size) {}
static DataPtr makeDataPtr(
DataPtr&& data_ptr,
PlacementDtor placement_dtor,
size_t size,
Device device);
~PlacementDeleteContext() {
placement_dtor_(data_ptr_.get(), size_);
// original memory will be freed when data_ptr_ is destructed
}
};
struct C10_API AutogradMetaInterface {
virtual void set_requires_grad(
bool requires_grad,
at::TensorImpl* self_impl) = 0;
virtual bool requires_grad() const = 0;
virtual at::Tensor& mutable_grad() = 0;
virtual const at::Tensor& grad() const = 0;
virtual const at::Tensor& fw_grad(uint64_t level, const at::TensorBase& self)
const = 0;
virtual void set_fw_grad(
const at::TensorBase& new_grad,
const at::TensorBase& self,
uint64_t level,
bool is_inplace_op) = 0;
virtual ~AutogradMetaInterface();
};
namespace impl {
// Unfortunately, the definition of AutogradMeta lives in a separate
// compilation unit than TensorImpl (libtorch.so versus libc10.so)
// which means that we cannot construct an AutogradMeta from TensorImpl,
// not even from the cpp file. So we have to indirect it through a factory
// function which will be initialized when we load libtorch.so.
struct C10_API AutogradMetaFactory {
virtual ~AutogradMetaFactory() = default;
virtual std::unique_ptr<AutogradMetaInterface> make() const = 0;
// This method is the dumbest method. But I don't have access
// to Tensor (not TensorImpl) which is undefined in this header.
virtual const at::Tensor& undefined_tensor() const = 0;
};
C10_API void SetAutogradMetaFactory(AutogradMetaFactory* factory);
C10_API AutogradMetaFactory* GetAutogradMetaFactory();
struct C10_API AutogradMetaFactoryRegisterer {
explicit AutogradMetaFactoryRegisterer(AutogradMetaFactory* factory) {
SetAutogradMetaFactory(factory);
}
};
} // namespace impl
struct C10_API NamedTensorMetaInterface {
virtual ~NamedTensorMetaInterface() = default;
virtual std::unique_ptr<NamedTensorMetaInterface> clone() const {
TORCH_INTERNAL_ASSERT(
false, "Not implemented: NamedTensorMetaInterface::clone");
};
virtual int64_t slow_dim() const {
TORCH_INTERNAL_ASSERT(
false, "Not implemented: NamedTensorMetaInterface::slow_dim");
};
};
// For ease of copy pasting
#if 0
is_contiguous
is_channels_last_contiguous
is_channels_last_3d_contiguous
is_channels_last
is_channels_last_3d
is_non_overlapping_and_dense
#endif
/**
* This structure is intended to hold additional metadata of the specific device
* backend.
**/
struct C10_API BackendMeta : intrusive_ptr_target {
~BackendMeta() override = default;
virtual intrusive_ptr<BackendMeta> clone(
const intrusive_ptr<BackendMeta>& ptr) const {
return ptr;
}
};
struct C10_API ExtraMeta {
std::unique_ptr<c10::SymbolicShapeMeta> symbolic_shape_meta_ = nullptr;
std::unique_ptr<c10::NamedTensorMetaInterface> named_tensor_meta_ = nullptr;
intrusive_ptr<c10::BackendMeta> backend_meta_ = nullptr;
c10::optional<std::string> custom_data_ptr_error_msg_ = c10::nullopt;
c10::optional<std::string> custom_storage_error_msg_ = c10::nullopt;
ExtraMeta() = default;
ExtraMeta(const ExtraMeta& other) {
if (other.symbolic_shape_meta_) {
symbolic_shape_meta_ =
std::make_unique<c10::SymbolicShapeMeta>(*other.symbolic_shape_meta_);
}
if (other.named_tensor_meta_) {
named_tensor_meta_ = other.named_tensor_meta_->clone();
}
if (other.backend_meta_) {
backend_meta_ = other.backend_meta_->clone(other.backend_meta_);
}
if (other.custom_data_ptr_error_msg_) {
custom_data_ptr_error_msg_ = other.custom_data_ptr_error_msg_;
}
if (other.custom_storage_error_msg_) {
custom_storage_error_msg_ = other.custom_storage_error_msg_;
}
}
ExtraMeta(
std::unique_ptr<c10::SymbolicShapeMeta> symbolic_shape_meta,
std::unique_ptr<c10::NamedTensorMetaInterface> named_tensor_meta,
intrusive_ptr<c10::BackendMeta> backend_meta,
c10::optional<std::string> custom_data_ptr_error_msg = c10::nullopt,
c10::optional<std::string> custom_storage_access_error_msg = c10::nullopt)
: symbolic_shape_meta_(std::move(symbolic_shape_meta)),
named_tensor_meta_(std::move(named_tensor_meta)),
backend_meta_(std::move(backend_meta)),
custom_data_ptr_error_msg_(std::move(custom_data_ptr_error_msg)),
custom_storage_error_msg_(std::move(custom_storage_access_error_msg)) {}
std::unique_ptr<ExtraMeta> clone() const {
return std::make_unique<ExtraMeta>(*this);
}
};
// NOTE [ Version Counter Sharing ]
//
// Every Tensor has a version counter. Version counters are incremented whenever
// the data or size of a tensor changes through in-place Variable operations.
// Version counters are used to detect modifications to saved variables which
// would result in incorrect gradient calculations. Version counters may be
// shared between Variables:
//
// 1. A view shares the version counter of the base Variable,
// 2. `x.detach()` shares the version counter of `x`,
// 3. Unpacked saved variables share the version counter of the source.
//
// Version counters are not shared in these scenarios:
//
// 1. When we replace a `Variable`'s underlying `Tensor` by calling
// `set_data(...)`,
// 2. `x.data` does not share the version counter of `x`. (See discussion at
// https://github.com/pytorch/pytorch/issues/5396)
//
// Question: Why do we put the version counter in TensorImpl instead of
// AutogradMeta?
//
// Answer: After the Variable/Tensor merge, a tensor will not have AutogradMeta
// when its `requires_grad_` is false, but when we use this tensor in the
// forward pass of a function that requires saving this tensor for backward, we
// need to keep track of this tensor's version to make sure it's always valid in
// the autograd graph.
//
// To achieve this goal, we put the version counter in TensorImpl instead of
// AutogradMeta, and have it always be available. This allows us to have the
// optimization of not carrying AutogradMeta when a tensor doesn't require
// gradient.
//
// A hypothetical alternative way to achieve this goal is to initialize
// AutogradMeta and create the version counter for the non-requires-grad tensor
// only when it's saved for backward. However, since saving a tensor for
// backward happens in the forward pass, and our invariant is that forward pass
// needs to be thread-safe, lazy-initializing AutogradMeta when saving a tensor
// can introduce race conditions when we are running the forward pass in
// multi-thread scenarios, thus making the forward pass not thread-safe anymore,
// which breaks the invariant.
struct C10_API VariableVersion {
private:
struct VersionCounter : intrusive_ptr_target {
VersionCounter(uint32_t version) : version_(version) {}
std::atomic<uint32_t> version_;
};
c10::intrusive_ptr<VersionCounter> version_counter_;
public:
// Note [Disabled VariableVersion]
// VariableVersion struct has an intrusive_ptr pointing VersionCounter struct
// with an atomic variable. Thus `VariableVersion(/*version=*/0)` is not as
// cheap as we expected. In some cases constructing a VariableVersion with
// version 0 is not necessary so we add a cheap constructor which
// doesn't allocate the intrusive_ptr.
// Example use cases are:
// - Inference tensors don't track version counter, so they'll just always
// have disabled VariableVersion.
// - In SavedVariable class we override version_counter_ inside its
// constructor
// so that we can use the cheap constructor there.
enum Disabled { DISABLED };
// It's okay to return true even for inference tensor which
// doesn't have version counter enabled.
// We want to be permissive here since in many cases (e.g. make_variable)
// we can std::move a TensorImpl if there's no other uses which saves us
// an additional TensorImpl allocation.
bool unique() const {
return version_counter_ ? 1 == version_counter_.use_count() : true;
}
// NOTE: As of C++11 and 14, default-constructing a std::atomic variable
// leaves it in a persistently undefined state. See
// https://cplusplus.github.io/LWG/issue2334.
VariableVersion(uint32_t version)
: version_counter_(c10::make_intrusive<VersionCounter>(version)) {}
VariableVersion(Disabled = DISABLED) {}
bool enabled() const {
return version_counter_;
}
// Note [Inplace update inference tensor]
// 1. Inplace update to inference tensor is forbidden in normal mode.
// For example:
// inference_tensor.copy_(normal_tensor_requires_grad)
// This inplace makes inference_tensor have requires_grad=True and
// have a grad_fn. This is bad because views of `inference_tensor`
// created in InferenceMode won't be able to know the grad_fn since
// their ViewMeta were not recorded. To match NoGradMode behavior
// that "inplace update to a view created in NoGradMode raise an error",
// we just ban inplace update to inference tensor since we can't tell
// if an inference tensor is a view created in InferenceMode.
//
// Note that views of normal tensor created in InferenceMode has proper
// ViewMeta so that they're aware of the grad_fn correctly.
//
// 2. Inplace update to inference tensor in inference tensor doesn't bump
// version counter.
// * It either doesn't call bump() by skipping ADInplaceOrView kernel,
// - e.g. inference_tensor.add_(1)
// * or bump() is a no-op for inference tensor.
// - e.g. inference_tensor.add_(normal_tensor)
void bump() {
// TODO: Replace the link to the documentation once it's available.
TORCH_CHECK(
version_counter_ || InferenceMode::is_enabled(),
"Inplace update to inference tensor outside InferenceMode is not allowed."
"You can make a clone to get a normal tensor before doing inplace update."
"See https://github.com/pytorch/rfcs/pull/17 for more details.");
if (version_counter_) {
++version_counter_->version_;
}
}
void set_version(int64_t i) {
TORCH_CHECK(
version_counter_,
"Tried to call torch.autograd._unsafe_set_version() on a tensor "
"that does not have a version counter. Was it created in inference mode?");
TORCH_CHECK(i >= 0, "Cannot set a version_counter to a value below 0: ", i);
version_counter_->version_ = i;
}
// Inference tensor doesn't have version counter so it shouldn't be
// accessed.
uint32_t current_version() const {
TORCH_CHECK(
version_counter_, "Inference tensors do not track version counter.");
return version_counter_->version_;
}
};
// Forward declaration of TensorImpl needed for forward declaration of
// C10_TensorImpl_Size_Check_Dummy_Class
struct C10_API TensorImpl;
/**
* NOTE: Some TensorImpl methods are small and not overridden in the
* PyTorch codebase itself, but may theoretically need to be
* overridden by third-party TensorImpl subclasses. This macro allows
* users that need maximum performance and don't need these extension
* points to disable them with a build-time flag. (In particular,
* XLA's XLATensorImpl currently overrides these methods, so we can't
* enable this flag by default.)
*/
#ifdef C10_DISABLE_TENSORIMPL_EXTENSIBILITY
#define TENSORIMPL_MAYBE_VIRTUAL
#else
#define TENSORIMPL_MAYBE_VIRTUAL virtual
#endif
/**
* The low-level representation of a tensor, which contains a pointer
* to a storage (which contains the actual data) and metadata (e.g., sizes and
* strides) describing this particular view of the data as a tensor.
*
* Some basic characteristics about our in-memory representation of
* tensors:
*
* - It contains a pointer to a storage struct (Storage/StorageImpl)
* which contains the pointer to the actual data and records the
* data type and device of the view. This allows multiple tensors
* to alias the same underlying data, which allows to efficiently
* implement differing *views* on a tensor.
*
* - The tensor struct itself records view-specific metadata about
* the tensor, e.g., sizes, strides and offset into storage.
* Each view of a storage can have a different size or offset.
*
* - This class is intrusively refcounted. It is refcounted so that
* we can support prompt deallocation of large tensors; it is
* intrusively refcounted so that we can still perform reference
* counted operations on raw pointers, which is often more convenient
* when passing tensors across language boundaries.
*
* - For backwards-compatibility reasons, a tensor may be in an
* uninitialized state. A tensor may be uninitialized in the following
* two ways:
*
* - A tensor may be DTYPE UNINITIALIZED. A tensor of this
* form has an uninitialized dtype. This situation most
* frequently arises when a user writes Tensor x(CPU). The dtype
* is subsequently initialized when mutable_data<T>() is
* invoked for the first time.
*
* - A tensor may be STORAGE UNINITIALIZED. A tensor of this form
* has non-zero size, but has a storage with a null data pointer.
* This situation most frequently arises when a user calls
* Resize() or FreeMemory(). This is because Caffe2 historically
* does lazy allocation: allocation of data doesn't occur until
* mutable_data<T>() is invoked. A tensor with zero size is
* always storage initialized, because no allocation is necessary
* in this case.
*
* All combinations of these two uninitialized states are possible.
* Consider the following transcript in idiomatic Caffe2 API:
*
* Tensor x(CPU); // x is storage-initialized, dtype-UNINITIALIZED
* x.Resize(4); // x is storage-UNINITIALIZED, dtype-UNINITIALIZED
* x.mutable_data<float>(); // x is storage-initialized, dtype-initialized
* x.FreeMemory(); // x is storage-UNINITIALIZED, dtype-initialized.
*
* All other fields on tensor are always initialized. In particular,
* size is always valid. (Historically, a tensor declared as Tensor x(CPU)
* also had uninitialized size, encoded as numel == -1, but we have now
* decided to default to zero size, resulting in numel == 0).
*
* Uninitialized storages MUST be uniquely owned, to keep our model
* simple. Thus, we will reject operations which could cause an
* uninitialized storage to become shared (or a shared storage to
* become uninitialized, e.g., from FreeMemory).
*
* In practice, tensors which are storage-UNINITIALIZED and
* dtype-UNINITIALIZED are *extremely* ephemeral: essentially,
* after you do a Resize(), you basically always call mutable_data()
* immediately afterwards. Most functions are not designed to
* work if given a storage-UNINITIALIZED, dtype-UNINITIALIZED tensor.
*
* We intend to eliminate all uninitialized states, so that every
* tensor is fully initialized in all fields. Please do not write new code
* that depends on these uninitialized states.
*/
struct C10_API TensorImpl : public c10::intrusive_ptr_target {
TensorImpl() = delete;
~TensorImpl() override;
// Note [Enum ImplType]
// This enum is temporary. In the followup refactor we should
// think about how to specialize TensorImpl creation for view
// tensors. Currently we only special case its key_set_ but
// there's also potential to share version_counter_ directly
// without creating first and then override in as_view.
enum ImplType { VIEW };
/**
* Construct a 1-dim 0-size tensor backed by the given storage.
*/
TensorImpl(
Storage&& storage,
DispatchKeySet,
const caffe2::TypeMeta data_type);
// See Note [Enum ImplType]
TensorImpl(
ImplType,
Storage&& storage,
DispatchKeySet,
const caffe2::TypeMeta data_type);
/**
* Construct a 1-dim 0 size tensor that doesn't have a storage.
*/
TensorImpl(
DispatchKeySet,
const caffe2::TypeMeta data_type,
c10::optional<c10::Device> device_opt);
// Legacy constructors so I don't have to go update call sites.
// TODO: When Variable is added, delete these constructors
TensorImpl(
Storage&& storage,
DispatchKey dispatch_key,
const caffe2::TypeMeta data_type)
: TensorImpl(
std::move(storage),
DispatchKeySet(dispatch_key),
data_type) {}
TensorImpl(
DispatchKey dispatch_key,
const caffe2::TypeMeta data_type,
c10::optional<c10::Device> device_opt)
: TensorImpl(DispatchKeySet(dispatch_key), data_type, device_opt) {}
private:
// This constructor is private, because the data_type is redundant with
// storage. Still, we pass it in separately because it's easier to write
// the initializer list if we're not worried about storage being moved out
// from under us.
TensorImpl(
Storage&& storage,
DispatchKeySet,
const caffe2::TypeMeta data_type,
c10::optional<c10::Device>);
public:
TensorImpl(const TensorImpl&) = delete;
TensorImpl& operator=(const TensorImpl&) = delete;
TensorImpl(TensorImpl&&) = delete;
TensorImpl& operator=(TensorImpl&&) = delete;
/**
* Release (decref) storage, and any other external allocations. This
* override is for `intrusive_ptr_target` and is used to implement weak
* tensors.
*/
void release_resources() override;
public:
/**
* Return the DispatchKeySet corresponding to this Tensor, specifying
* all of the DispatchKeys that this Tensor identifies as. This is the
* information used to dispatch operations on this tensor.
*/
DispatchKeySet key_set() const {
return key_set_;
}
private:
[[noreturn]] void throw_cannot_call_with_symbolic(const char* meth) const;
// NOTE: The general recipe for customizable methods is that the fastpath
// function (e.g., sizes()) does an unlikely policy test, and if doesn't
// trigger, it does the fast path implementation with no checks and going
// directly to on-TensorImpl fields. In particular, you never need to
// check ExtraMeta if the policy doesn't trigger, as non-trivial ExtraMeta
// implies the policy will always match.
//
// The default implementations of methods are "safe": they do extra tests
// to make sure the internal state is consistent no matter if you are
// doing symbolic shapes or not. If you don't want the tests, directly
// override the custom method (e.g., custom_sizes()) to do your preferred
// behavior.
public:
/**
* Return a reference to the sizes of this tensor. This reference remains
* valid as long as the tensor is live and not resized.
*/
IntArrayRef sizes() const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return sizes_custom();
}
return sizes_and_strides_.sizes_arrayref();
}
SymIntArrayRef sym_sizes() const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return sym_sizes_custom();
}
// Sizes guaranteed to be non-negative, so unchecked cast is OK
return c10::fromIntArrayRefKnownNonNegative(
sizes_and_strides_.sizes_arrayref());
}
IntArrayRef sizes_default() const {
if (C10_UNLIKELY(has_symbolic_sizes_strides_)) {
throw_cannot_call_with_symbolic("sizes");
}
return sizes_and_strides_.sizes_arrayref();
}
SymIntArrayRef sym_sizes_default() const {
if (has_symbolic_sizes_strides_) {
return symbolic_shape_meta().sizes_;
} else {
// Sizes guaranteed to be non-negative, so unchecked cast is OK
return c10::fromIntArrayRefKnownNonNegative(sizes_default());
}
}
// From https://stackoverflow.com/a/3057522/23845
// TODO: does C++14 have a stdlib template for this?
template <typename T>
struct identity {
typedef T type;
};
template <typename T>
ArrayRef<T> generic_sizes() {
return _generic_sizes(identity<T>());
}
ArrayRef<int64_t> _generic_sizes(identity<int64_t>) {
return sizes();
}
ArrayRef<c10::SymInt> _generic_sizes(identity<c10::SymInt>) {
return sym_sizes();
}
template <typename T>
ArrayRef<T> generic_strides() {
return _generic_strides(identity<T>());
}
ArrayRef<int64_t> _generic_strides(identity<int64_t>) {
return strides();
}
ArrayRef<c10::SymInt> _generic_strides(identity<c10::SymInt>) {
return sym_strides();
}
template <typename T>
T generic_storage_offset() {
return _generic_storage_offset(identity<T>());
}
int64_t _generic_storage_offset(identity<int64_t>) {
return storage_offset();
}
c10::SymInt _generic_storage_offset(identity<c10::SymInt>) {
return sym_storage_offset();
}
/**
* The number of elements in a tensor.
*
* WARNING: Previously, if you were using the Caffe2 API, you could
* test numel() == -1 to see if a tensor was uninitialized. This
* is no longer true; numel always accurately reports the product
* of sizes of a tensor.
*/
int64_t numel() const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return numel_custom();
}
return numel_;
}
c10::SymInt sym_numel() const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return sym_numel_custom();
}
return c10::SymInt(SymInt::UNCHECKED, numel_);
}
int64_t numel_default() const {
if (C10_UNLIKELY(has_symbolic_sizes_strides_)) {
throw_cannot_call_with_symbolic("numel");
}
return numel_;
}
c10::SymInt sym_numel_default() const {
if (has_symbolic_sizes_strides_) {
return symbolic_shape_meta().numel();
} else {
return c10::SymInt(SymInt::UNCHECKED, numel_);
}
}
/**
* Return the number of dimensions of this tensor. Note that 0-dimension
* represents a Tensor that is a Scalar, e.g., one that has a single element.
*/
int64_t dim() const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return dim_custom();
}
return static_cast<int64_t>(sizes_and_strides_.size());
}
int64_t dim_default() const {
if (has_symbolic_sizes_strides_) {
return static_cast<int64_t>(symbolic_shape_meta().sizes_.size());
} else {
return static_cast<int64_t>(sizes_and_strides_.size());
}
}
/**
* Return the offset in number of elements into the storage that this
* tensor points to. Most tensors have storage_offset() == 0, but,
* for example, an index into a tensor will have a non-zero storage_offset().
*
* WARNING: This is NOT computed in bytes.
*/
int64_t storage_offset() const {
// TODO: maybe this should be toggled by strides
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return storage_offset_custom();
}
return storage_offset_;
}
c10::SymInt sym_storage_offset() const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return sym_storage_offset_custom();
}
return c10::SymInt(SymInt::UNCHECKED, storage_offset_);
}
int64_t storage_offset_default() const {
if (C10_UNLIKELY(has_symbolic_sizes_strides_)) {
throw_cannot_call_with_symbolic("storage_offset");
}
return storage_offset_;
}
c10::SymInt sym_storage_offset_default() const {
if (has_symbolic_sizes_strides_) {
return symbolic_shape_meta().storage_offset_;
} else {
return c10::SymInt(SymInt::UNCHECKED, storage_offset_);
}
}
/**
* Return a reference to the strides of this tensor. This reference remains
* valid as long as the tensor is live and not restrided.
*/
IntArrayRef strides() const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) {
return strides_custom();
}
return sizes_and_strides_.strides_arrayref();
}
c10::SymIntArrayRef sym_strides() const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) {
return sym_strides_custom();
}
return c10::fromIntArrayRefKnownNonNegative(strides_default());
}
IntArrayRef strides_default() const {
if (C10_UNLIKELY(has_symbolic_sizes_strides_)) {
throw_cannot_call_with_symbolic("strides");
}
return sizes_and_strides_.strides_arrayref();
}
c10::SymIntArrayRef sym_strides_default() const {
if (has_symbolic_sizes_strides_) {
return symbolic_shape_meta().strides_;
} else {
return c10::fromIntArrayRefKnownNonNegative(strides_default());
}
}
/**
* Whether or not a tensor is laid out in contiguous memory.
*
* Tensors with non-trivial strides are not contiguous. See
* compute_contiguous() for the exact definition of whether or not
* a tensor is contiguous or not.
*/
bool is_contiguous(
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) {
return is_contiguous_custom(memory_format);
}
return is_contiguous_default(memory_format);
}
// These are factored into separate functions in case subclasses
// want to use them
bool is_contiguous_default(at::MemoryFormat memory_format) const {
if (has_symbolic_sizes_strides_) {
if (memory_format == at::MemoryFormat::ChannelsLast) {
return symbolic_shape_meta().is_channels_last_contiguous().guard_bool(
__FILE__, __LINE__);
} else if (memory_format == at::MemoryFormat::ChannelsLast3d) {
return symbolic_shape_meta()
.is_channels_last_3d_contiguous()
.guard_bool(__FILE__, __LINE__);
}
return symbolic_shape_meta().is_contiguous().guard_bool(
__FILE__, __LINE__);
}
if (memory_format == at::MemoryFormat::ChannelsLast) {
return is_channels_last_contiguous_;
} else if (memory_format == at::MemoryFormat::ChannelsLast3d) {
return is_channels_last_3d_contiguous_;
}
return is_contiguous_;
}
bool is_strides_like_default(at::MemoryFormat memory_format) const {
if (has_symbolic_sizes_strides_) {
if (memory_format == at::MemoryFormat::ChannelsLast) {
return symbolic_shape_meta().is_channels_last().guard_bool(
__FILE__, __LINE__);
} else if (memory_format == at::MemoryFormat::ChannelsLast3d) {
return symbolic_shape_meta().is_channels_last_3d().guard_bool(
__FILE__, __LINE__);
} else {
return false;
}
}
if (memory_format == at::MemoryFormat::ChannelsLast) {
return is_channels_last_;
} else if (memory_format == at::MemoryFormat::ChannelsLast3d) {
return is_channels_last_3d_;
} else {
return false;
}
}
bool is_non_overlapping_and_dense_default() const {
if (has_symbolic_sizes_strides_) {
return symbolic_shape_meta().is_non_overlapping_and_dense().guard_bool(
__FILE__, __LINE__);
} else {
return is_non_overlapping_and_dense_;
}
}
// NB: these dim accessor functions don't have _default(), as you can use
// sizes_default/strides_default
/**
* Return the size of a tensor at some dimension, wrapping the dimension if
* necessary.
*
* NOTE: if you know wrapping is unnecessary, do sizes()[d] instead; it will
* be faster
*/
int64_t size(int64_t d) const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return size_custom(d);
}
d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false);
return sizes_and_strides_.size_at_unchecked(d);
}
c10::SymInt sym_size(int64_t d) const {
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) {
return sym_size_custom(d);
}
d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false);
const auto sizes = this->sym_sizes();
return sizes[d];
}
/**
* Return the stride of a tensor at some dimension, wrapping the dimension
* if necessary.
*
* NOTE: if you know wrapping is unnecessary, do sizes()[d] instead; it will
* be faster
*/
int64_t stride(int64_t d) const {
d = maybe_wrap_dim(d, dim(), false);
if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) {
// TODO: provide stride_custom, symmetrically with size_custom.
// There is presently no user for it; only NestedTensor is using
// size_custom overrideability
return strides_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds)
}
// Intentionally don't call default, which also handles symbolic
return sizes_and_strides_.stride_at_unchecked(d);
}
enum class SizesStridesPolicy : uint8_t {
// Default behavior, e.g., dense tensor.
//
// Can override: nothing
Default = 0,
// Customizable strides behavior, e.g., sparse tensor,
// mkldnn tensor.
//
// Can override: strides(), is_contiguous()
CustomStrides = 1,
// Customizable sizes behavior, e.g., nested tensor
//
// Can override: strides(), is_contiguous(), sizes(), dim(), numel()
CustomSizes = 2
};
protected:
inline bool matches_policy(SizesStridesPolicy policy) const {
return sizes_strides_policy_ >= static_cast<uint8_t>(policy);
}
inline bool matches_custom(SizesStridesPolicy policy) const {
return custom_sizes_strides_ >= static_cast<uint8_t>(policy);
}
inline bool matches_python_custom(SizesStridesPolicy policy) const {
auto r = python_custom_sizes_strides_ >= static_cast<uint8_t>(policy);
if (r) {
TORCH_INTERNAL_ASSERT(is_python_dispatch())
}
return r;
}
/**
* Customization points for the functions above. sizes_strides_policy_
* must be set to enable these.
*
* NB: dim is overrideable separately from sizes because it is possible
* for a tensor to have rank, but not well defined sizes.
*/
// sizes_strides_policy_ >= CustomStrides
virtual bool is_contiguous_custom(at::MemoryFormat memory_format) const;
virtual bool is_strides_like_custom(at::MemoryFormat memory_format) const;
virtual bool is_non_overlapping_and_dense_custom() const;
// sizes_strides_policy_ >= CustomSizes
// Currently this method only exists to be overwritten by subclasses such as
// NestedTensorImpl.
virtual int64_t size_custom(int64_t d) const {
// TODO: We could add support to Python dispatch here.
// TODO: We could call into aten::size.int instead of
// sizes_custom()[d] and enable use of the dispatcher.
d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false);
return sizes_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds)
}
virtual c10::SymInt sym_size_custom(int64_t d) const {
// TODO: We could add support to Python dispatch here.
// TODO: We could call into aten::size.int instead of
// sym_sizes_custom()[d] and enable use of the dispatcher.
d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false);
return sym_sizes_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds)
}
virtual IntArrayRef sizes_custom() const;
virtual IntArrayRef strides_custom() const;
virtual int64_t numel_custom() const;
virtual int64_t storage_offset_custom() const;
virtual int64_t dim_custom() const;
virtual Device device_custom() const;
virtual Layout layout_custom() const;
virtual c10::SymIntArrayRef sym_sizes_custom() const;
virtual c10::SymIntArrayRef sym_strides_custom() const;
virtual c10::SymInt sym_numel_custom() const;
virtual c10::SymInt sym_storage_offset_custom() const;
public:
/**
* True if this tensor has storage. See storage() for details.
*/
#ifdef DEBUG