Skip to content

Commit

Permalink
use C10_API in libc10.so (pytorch#94171)
Browse files Browse the repository at this point in the history
MSVC emits several C4273 warning  when compiling c10. I think the offending files should use C10_API instead of TORCH_API. If the tests pass, the changes should be safe.

Pull Request resolved: pytorch#94171
Approved by: https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed Feb 6, 2023
1 parent a07d129 commit 3c6bc58
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 20 deletions.
2 changes: 1 addition & 1 deletion c10/core/GeneratorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ struct C10_API GeneratorImpl : public c10::intrusive_ptr_target {

namespace detail {

TORCH_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
C10_API uint64_t getNonDeterministicRandom(bool is_cuda = false);

} // namespace detail

Expand Down
8 changes: 4 additions & 4 deletions c10/core/GradMode.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@

namespace c10 {

struct TORCH_API GradMode {
struct C10_API GradMode {
static bool is_enabled();
static void set_enabled(bool enabled);
};

// A RAII, thread local (!) guard that enables or disables grad mode upon
// construction, and sets it back to the original value upon destruction.
struct TORCH_API AutoGradMode {
struct C10_API AutoGradMode {
AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) {
GradMode::set_enabled(enabled);
}
Expand All @@ -24,13 +24,13 @@ struct TORCH_API AutoGradMode {

// A RAII, thread local (!) guard that stops future operations from building
// gradients.
struct TORCH_API NoGradGuard : public AutoGradMode {
struct C10_API NoGradGuard : public AutoGradMode {
NoGradGuard() : AutoGradMode(/*enabled=*/false) {}
};

// A RAII, thread local (!) guard that enables or disables forward grad mode
// upon construction, and sets it back to the original value upon destruction.
struct TORCH_API AutoFwGradMode {
struct C10_API AutoFwGradMode {
AutoFwGradMode(bool enabled)
: prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) {
AutogradState::get_tls_state().set_fw_grad_mode(enabled);
Expand Down
2 changes: 1 addition & 1 deletion c10/core/InferenceMode.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ namespace c10 {

// A RAII, thread local (!) guard that enables or disables inference mode upon
// construction, and sets it back to the original value upon destruction.
struct TORCH_API InferenceMode {
struct C10_API InferenceMode {
// Note [Expected TLS state in InferenceMode]:
// InferenceMode: ADInplaceOrView not in
// raw_local_dispatch_key_set.included(),
Expand Down
10 changes: 5 additions & 5 deletions c10/cuda/CUDAStream.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ class C10_CUDA_API CUDAStream {
* isHighPriority to true, or a stream for a specific device by setting device
* (defaulting to the current CUDA stream.)
*/
TORCH_API CUDAStream
C10_API CUDAStream
getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);

/**
Expand All @@ -213,7 +213,7 @@ getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
* want to operate on a non-torch allocated stream for data exchange or similar
* purposes
*/
TORCH_API CUDAStream
C10_API CUDAStream
getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index);

/**
Expand All @@ -222,7 +222,7 @@ getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index);
* where most computation occurs when you aren't explicitly using
* streams.
*/
TORCH_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
C10_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);

/**
* Get the current CUDA stream, for the passed CUDA device, or for the
Expand All @@ -231,7 +231,7 @@ TORCH_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
* be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard'
* or 'CUDAStreamGuard'.
*/
TORCH_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
C10_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);

/**
* Set the current stream on the device of the passed in stream to be
Expand All @@ -243,7 +243,7 @@ TORCH_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
* (which will switch both your current device and current stream in the way you
* expect, and reset it back to its original state afterwards).
*/
TORCH_API void setCurrentCUDAStream(CUDAStream stream);
C10_API void setCurrentCUDAStream(CUDAStream stream);

C10_API std::ostream& operator<<(std::ostream& stream, const CUDAStream& s);

Expand Down
2 changes: 1 addition & 1 deletion c10/util/UniqueVoidPtr.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ using DeleterFnPtr = void (*)(void*);
namespace detail {

// Does not delete anything
TORCH_API void deleteNothing(void*);
C10_API void deleteNothing(void*);

// A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but
// with three major differences:
Expand Down
8 changes: 4 additions & 4 deletions c10/util/complex_math.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ C10_HOST_DEVICE inline c10::complex<T> log2(const c10::complex<T>& x) {
#if defined(_LIBCPP_VERSION) || \
(defined(__GLIBCXX__) && !defined(_GLIBCXX11_USE_C99_COMPLEX))
namespace _detail {
TORCH_API c10::complex<float> sqrt(const c10::complex<float>& in);
TORCH_API c10::complex<double> sqrt(const c10::complex<double>& in);
TORCH_API c10::complex<float> acos(const c10::complex<float>& in);
TORCH_API c10::complex<double> acos(const c10::complex<double>& in);
C10_API c10::complex<float> sqrt(const c10::complex<float>& in);
C10_API c10::complex<double> sqrt(const c10::complex<double>& in);
C10_API c10::complex<float> acos(const c10::complex<float>& in);
C10_API c10::complex<double> acos(const c10::complex<double>& in);
}; // namespace _detail
#endif

Expand Down
8 changes: 4 additions & 4 deletions c10/util/signal_handler.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

namespace c10 {

class TORCH_API SignalHandler {
class C10_API SignalHandler {
public:
enum class Action { NONE, STOP };

Expand All @@ -40,13 +40,13 @@ class TORCH_API SignalHandler {
};

#if defined(C10_SUPPORTS_FATAL_SIGNAL_HANDLERS)
class TORCH_API FatalSignalHandler {
class C10_API FatalSignalHandler {
// This works by setting up certain fatal signal handlers. Previous fatal
// signal handlers will still be called when the signal is raised. Defaults
// to being off.
public:
TORCH_API void setPrintStackTracesOnFatalSignal(bool print);
TORCH_API bool printStackTracesOnFatalSignal();
C10_API void setPrintStackTracesOnFatalSignal(bool print);
C10_API bool printStackTracesOnFatalSignal();
static FatalSignalHandler& getInstance();
virtual ~FatalSignalHandler();

Expand Down

0 comments on commit 3c6bc58

Please sign in to comment.