Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add clipgrad doc and contiguous #6130

Merged
merged 15 commits into from
Sep 2, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions python/oneflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,7 @@ def atexit_hook(hook):
from oneflow.nn.modules.tensor_buffer import (
tensor_buffer_to_tensor_op as tensor_buffer_to_tensor,
)
from oneflow.nn.modules.as_tensor import as_tensor
from oneflow.nn.modules.tensor_buffer import tensor_to_tensor_buffer
from oneflow.nn.modules.tile import tile_op as tile
from oneflow.nn.modules.to import to_op as to
Expand Down Expand Up @@ -334,6 +335,7 @@ def atexit_hook(hook):
zeros_initializer,
)


from . import (
autograd,
distributed,
Expand Down
6 changes: 6 additions & 0 deletions python/oneflow/framework/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,11 @@ def _ne(self, other):
return self.ne(other)


def _contiguous(self):
# TODO: support stride mechanism
return self


def _getstate(self):
assert self.is_local, "Only support local tensor to pickle"
return {"data": self.numpy(), "dtype": self.dtype}
Expand Down Expand Up @@ -443,6 +448,7 @@ def RegisterMethods():
Tensor.copy_ = _copy
Tensor.get_device = _get_device
Tensor._meta_repr = _meta_repr
Tensor.contiguous = _contiguous


def register_tensor_op(op_name):
Expand Down
22 changes: 22 additions & 0 deletions python/oneflow/nn/modules/as_tensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
"""
Copyright 2020 The OneFlow Authors. All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

import oneflow as flow


def as_tensor(*args, **kwargs):
# TODO: support construct tensor with sharing memory with data
return flow.tensor(*args, **kwargs)
4 changes: 2 additions & 2 deletions python/oneflow/nn/utils/clip_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def clip_grad_norm_(

>>> import oneflow as flow
>>> import numpy as np
>>> x1 = flow.Tensor(np.array([[2, 3, 4], [1.5, 2.6, 3.7]]).astype(np.float32), requires_grad=True)
>>> x1 = flow.tensor(np.array([[2, 3, 4], [1.5, 2.6, 3.7]]).astype(np.float32), requires_grad=True)
>>> m1 = flow.nn.ReLU()
>>> out1 = m1(x1)
>>> out1 = out1.sum()
Expand All @@ -70,7 +70,7 @@ def clip_grad_norm_(
>>> x1.grad
tensor([[0.1000, 0.1000, 0.1000],
[0.1000, 0.1000, 0.1000]], dtype=oneflow.float32)
>>> x2 = flow.Tensor(np.array([[-2, -3, -4], [2.5, 0, 3.2]]).astype(np.float32), requires_grad=True)
>>> x2 = flow.tensor(np.array([[-2, -3, -4], [2.5, 0, 3.2]]).astype(np.float32), requires_grad=True)
>>> out2 = flow.atan(x2)
>>> out2 = out2.sum()
>>> out2.backward()
Expand Down