Skip to content

Commit

Permalink
Add variable_factories.h to cppdocs (pytorch#14381)
Browse files Browse the repository at this point in the history
Summary:
This will document `torch::from_blob` and such.

soumith ezyang
Pull Request resolved: pytorch#14381

Differential Revision: D13216560

Pulled By: goldsborough

fbshipit-source-id: 112f60e45e4d38a8a9983fa71e9cc56bc1a73465
  • Loading branch information
goldsborough authored and facebook-github-bot committed Nov 27, 2018
1 parent c19af59 commit 49fe678
Show file tree
Hide file tree
Showing 5 changed files with 74 additions and 25 deletions.
5 changes: 4 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@ matrix:
install: pip install mypy mypy-extensions
script: mypy @mypy-files.txt
- env: CPP_DOC_CHECK
install: sudo apt-get install -y doxygen
python: "3.6"
install:
- sudo apt-get install -y doxygen
- pip install -r requirements.txt
script: cd docs/cpp/source && ./check-doxygen.sh
- env: CLANG_TIDY
python: "3.6"
Expand Down
35 changes: 18 additions & 17 deletions docs/cpp/source/Doxyfile
Original file line number Diff line number Diff line change
Expand Up @@ -29,36 +29,37 @@ HTML_OUTPUT = doxygen_html
# {repo_root}/docs/cpp/source/../../.. -> {repo_root}
STRIP_FROM_PATH = ../../..
# What folders / files Doxygen should process.
INPUT = ../../../torch/csrc/api/include \
../../../torch/csrc/api/src \
../../../torch/csrc/jit/custom_operator.h \
../../../torch/csrc/jit/import.h \
../../../torch/csrc/jit/ivalue.h \
../../../torch/csrc/jit/script/module.h \
../../../aten/src/ATen/ATen.h \
INPUT = ../../../aten/src/ATen/ATen.h \
../../../aten/src/ATen/Backend.h \
../../../aten/src/ATen/DeviceGuard.h \
../../../aten/src/ATen/Layout.h \
../../../aten/src/ATen/OptionsGuard.h \
../../../aten/src/ATen/Scalar.h \
../../../aten/src/ATen/TensorOptions.h \
../../../aten/src/ATen/core/Half.h \
../../../aten/src/ATen/core/ScalarType.h \
../../../aten/src/ATen/core/Tensor.h \
../../../aten/src/ATen/cuda/CUDAContext.h \
../../../aten/src/ATen/cuda/CUDAGuard.h \
../../../aten/src/ATen/cuda/CUDAStream.h \
../../../aten/src/ATen/cuda/CUDAContext.h \
../../../aten/src/ATen/cudnn/Descriptors.h \
../../../aten/src/ATen/cudnn/Handles.h \
../../../aten/src/ATen/cudnn/Types.h \
../../../aten/src/ATen/cudnn/Utils.h \
../../../aten/src/ATen/DeviceGuard.h \
../../../aten/src/ATen/Layout.h \
../../../aten/src/ATen/mkl/Descriptors.h \
../../../c10/util/Optional.h \
../../../c10/util/Exception.h \
../../../c10/util/ArrayRef.h \
../../../aten/src/ATen/OptionsGuard.h \
../../../aten/src/ATen/Scalar.h \
../../../aten/src/ATen/TensorOptions.h \
../../../build/aten/src/ATen/Functions.h \
../../../c10/Device.h \
../../../c10/DeviceType.h \
../../../build/aten/src/ATen/Functions.h
../../../c10/util/ArrayRef.h \
../../../c10/util/Exception.h \
../../../c10/util/Optional.h \
../../../torch/csrc/api/include \
../../../torch/csrc/api/src \
../../../torch/csrc/autograd/generated/variable_factories.h \
../../../torch/csrc/jit/custom_operator.h \
../../../torch/csrc/jit/import.h \
../../../torch/csrc/jit/ivalue.h \
../../../torch/csrc/jit/script/module.h
# Don't include .cpp files!
FILE_PATTERNS = *.h
# If you need this to be YES, exhale will probably break.
Expand Down
21 changes: 20 additions & 1 deletion docs/cpp/source/check-doxygen.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,26 @@ ignore_warning() {
mv temp.txt doxygen-log.txt
}

pushd "$(dirname "$0")/../../.."

cp aten/src/ATen/common_with_cwrap.py tools/shared/cwrap_common.py
cp torch/_utils_internal.py tools/shared

python aten/src/ATen/gen.py \
-s aten/src/ATen \
-d build/aten/src/ATen \
aten/src/ATen/Declarations.cwrap \
aten/src/THNN/generic/THNN.h \
aten/src/THCUNN/generic/THCUNN.h \
aten/src/ATen/nn.yaml \
aten/src/ATen/native/native_functions.yaml

python tools/setup_helpers/generate_code.py \
--declarations-path build/aten/src/ATen/Declarations.yaml \
--nn-path aten/src

popd

# Run doxygen and log all output.
doxygen 2> original-doxygen-log.txt
cp original-doxygen-log.txt doxygen-log.txt
Expand All @@ -19,7 +39,6 @@ cat original-doxygen-log.txt

# Filter out some warnings.
ignore_warning "warning: no uniquely matching class member found for"
ignore_warning "warning:.*\.\./\.\./\.\./build/aten.*"

# Count the number of remaining warnings.
warnings="$(grep 'warning:' doxygen-log.txt | wc -l)"
Expand Down
1 change: 1 addition & 0 deletions docs/cpp/source/notes/tensor_basics.rst
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ to a single element in a larger ``Tensor``. They can be used anywhere a
which reduce the dimensions of a ``Tensor``.

.. code-block:: cpp
torch::Tensor two = torch::rand({10, 20});
two[1][2] = 4;
// ^^^^^^ <- zero-dimensional Tensor
37 changes: 31 additions & 6 deletions tools/autograd/templates/variable_factories.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,22 +39,37 @@ namespace torch {
AT_FORALL_SCALAR_TYPES_EXCEPT_HALF(TENSOR)
#undef TENSOR

/// A generic deleter function.
using Deleter = std::function<void(void*)>;

/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor, `strides` the
/// stride in each dimension. The `deleter` function (a
/// `std::function<void(void*)>`) will be called on the `data` when the Tensor
/// data would normally be deallocated. The `TensorOptions` specify additional
/// configuration options for the returned tensor, such as what type to
/// interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntList sizes,
at::IntList strides,
const std::function<void(void*)>& deleter,
const at::TensorOptions& options = {}) {
const Deleter& deleter,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor =
at::from_blob(data, sizes, strides, deleter, options.is_variable(false));
return autograd::make_variable(tensor, options.requires_grad());
}

/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor, `strides` the
/// stride in each dimension. The `TensorOptions`
/// specify additional configuration options for the returned tensor, such as
/// what type to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntList sizes,
at::IntList strides,
const at::TensorOptions& options = {}) {
const at::TensorOptions& options = at::TensorOptions()) {
return torch::from_blob(
data,
sizes,
Expand All @@ -63,20 +78,30 @@ inline at::Tensor from_blob(
options);
}

/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor. The `deleter`
/// (a `std::function<void(void*)>`) function will be called on the `data` when
/// the Tensor data would normally be deallocated. The `TensorOptions` specify
/// additional configuration options for the returned tensor, such as what type
/// to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntList sizes,
const std::function<void(void*)>& deleter,
const at::TensorOptions& options = {}) {
const Deleter& deleter,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor =
at::from_blob(data, sizes, deleter, options.is_variable(false));
return autograd::make_variable(tensor, options.requires_grad());
}

/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor. The
/// `TensorOptions` specify additional configuration options for the returned
/// tensor, such as what type to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntList sizes,
const at::TensorOptions& options = {}) {
const at::TensorOptions& options = at::TensorOptions()) {
return torch::from_blob(data, sizes, /*deleter=*/[](void*) {}, options);
}

Expand Down

0 comments on commit 49fe678

Please sign in to comment.