Skip to content

Commit

Permalink
remove is_npu_pinned_place (#53391)
Browse files Browse the repository at this point in the history
  • Loading branch information
jjyaoao authored Apr 28, 2023
1 parent 9c40653 commit 4ccbcce
Show file tree
Hide file tree
Showing 6 changed files with 2 additions and 12 deletions.
1 change: 0 additions & 1 deletion paddle/fluid/eager/amp_auto_cast.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ static inline bool NeedCast(const paddle::Tensor& tensor,
if (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place) ||
paddle::platform::is_npu_pinned_place(place) ||
paddle::platform::is_custom_place(place)) {
// CudaPinndePlace is added for varbase created by dataloader
if ((data_type == phi::DataType::FLOAT32 ||
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/eager/amp_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@ inline phi::DataType GetDtypeWithPlace(
is_right_place = (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place) ||
paddle::platform::is_npu_pinned_place(place) ||
paddle::platform::is_custom_place(place));
if (is_right_place) {
break;
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/eager/eager_amp_auto_cast.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ static inline bool NeedCast(const paddle::Tensor& tensor,
if (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place) ||
paddle::platform::is_npu_pinned_place(place) ||
paddle::platform::is_custom_place(place) ||
paddle::platform::is_cpu_place(place)) {
// CudaPinndePlace is added for varbase created by dataloader
Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/imperative/amp_auto_cast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -243,8 +243,7 @@ inline bool NeedCast(const std::shared_ptr<VarType>& var) {
if (paddle::platform::is_gpu_place(place) ||
paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place) ||
paddle::platform::is_custom_place(place) ||
paddle::platform::is_npu_pinned_place(place)) {
paddle::platform::is_custom_place(place)) {
// CudaPinndePlace is added for varbase created by dataloader
if (data_type == paddle::framework::proto::VarType::FP32 ||
data_type == paddle::framework::proto::VarType::FP16 ||
Expand Down
7 changes: 1 addition & 6 deletions paddle/fluid/platform/place.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,6 @@ bool is_cuda_pinned_place(const Place &p) {
return p.GetType() == phi::AllocationType::GPUPINNED;
}

bool is_npu_pinned_place(const Place &p) {
return p.GetType() == phi::AllocationType::NPUPINNED;
}

bool is_custom_place(const Place &p) {
return p.GetType() == phi::AllocationType::CUSTOM;
}
Expand All @@ -64,8 +60,7 @@ bool places_are_same_class(const Place &p1, const Place &p2) {

bool is_same_place(const Place &p1, const Place &p2) {
if (places_are_same_class(p1, p2)) {
if (is_cpu_place(p1) || is_cuda_pinned_place(p1) ||
is_npu_pinned_place(p1)) {
if (is_cpu_place(p1) || is_cuda_pinned_place(p1)) {
return true;
} else if (is_xpu_place(p1)) {
return p1 == p2;
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/platform/place.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ bool is_xpu_place(const Place &);
bool is_ipu_place(const Place &);
bool is_cpu_place(const Place &);
bool is_cuda_pinned_place(const Place &);
bool is_npu_pinned_place(const Place &);
bool is_custom_place(const Place &p);
bool places_are_same_class(const Place &, const Place &);
bool is_same_place(const Place &, const Place &);
Expand Down

0 comments on commit 4ccbcce

Please sign in to comment.