7
7
*/
8
8
9
9
#include < executorch/backends/xnnpack/runtime/XNNCompiler.h>
10
+ #include < executorch/backends/xnnpack/runtime/XNNPACKBackend.h>
10
11
#include < executorch/backends/xnnpack/runtime/XNNWeightsCache.h>
11
12
#include < executorch/runtime/backend/interface.h>
12
13
#include < executorch/runtime/core/error.h>
@@ -51,21 +52,7 @@ class XnnpackBackend final
51
52
}
52
53
53
54
#ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
54
- // Create a workspace for the XNNExecutor to use. This workspace will be
55
- // shared across all delegate instances.
56
- ET_LOG (Debug, " Creating XNN workspace" );
57
- xnn_workspace_t workspace = nullptr ;
58
- status = xnn_create_workspace (&workspace);
59
- if (status != xnn_status_success) {
60
- ET_LOG (
61
- Error,
62
- " Failed to create XNN workspace, XNNPACK status: 0x%x" ,
63
- (unsigned int )status);
64
- workspace = nullptr ;
65
- return ;
66
- }
67
- workspace_.reset (workspace);
68
- ET_LOG (Debug, " Created XNN workspace: %p" , workspace_.get ());
55
+ enable_shared_workspace_ = true ;
69
56
#endif // ENABLE_XNNPACK_SHARED_WORKSPACE
70
57
}
71
58
@@ -86,9 +73,28 @@ class XnnpackBackend final
86
73
const NamedDataMap* named_data_map = context.get_named_data_map ();
87
74
// thread safe. This can heppen when multiple threads call init() on
88
75
// the same backend instance.
89
- #ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
90
- const std::lock_guard<std::mutex> lock (workspace_mutex_);
91
- #endif
76
+
77
+ std::unique_lock<std::mutex> lock (workspace_mutex_, std::defer_lock);
78
+ if (enable_shared_workspace_) {
79
+ lock.lock ();
80
+ if (!workspace_) {
81
+ // Create a workspace for the XNNExecutor to use. This workspace will be
82
+ // shared across all delegate instances.
83
+ ET_LOG (Debug, " Creating XNN workspace" );
84
+ xnn_workspace_t workspace = nullptr ;
85
+ auto status = xnn_create_workspace (&workspace);
86
+ if (status != xnn_status_success) {
87
+ ET_LOG (
88
+ Error,
89
+ " Failed to create XNN workspace, XNNPACK status: 0x%x" ,
90
+ (unsigned int )status);
91
+ workspace = nullptr ;
92
+ return Error::Internal;
93
+ }
94
+ workspace_.reset (workspace);
95
+ ET_LOG (Debug, " Created XNN workspace: %p" , workspace_.get ());
96
+ }
97
+ }
92
98
93
99
#ifdef ENABLE_XNNPACK_WEIGHTS_CACHE
94
100
const std::lock_guard<std::mutex> lock_weight_cache (weights_cache_mutex_);
@@ -129,9 +135,10 @@ class XnnpackBackend final
129
135
EValue** args) const override {
130
136
auto executor = static_cast <xnnpack::delegate::XNNExecutor*>(handle);
131
137
132
- #ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
133
- const std::lock_guard<std::mutex> lock (workspace_mutex_);
134
- #endif
138
+ std::unique_lock<std::mutex> lock (workspace_mutex_, std::defer_lock);
139
+ if (enable_shared_workspace_) {
140
+ lock.lock ();
141
+ }
135
142
136
143
#ifdef ENABLE_XNNPACK_WEIGHTS_CACHE
137
144
const std::lock_guard<std::mutex> lock_weights_cache (weights_cache_mutex_);
@@ -160,9 +167,10 @@ class XnnpackBackend final
160
167
// This is needed to serialize access to xnn_delete_runtime which is not
161
168
// thread safe. This can heppen when multiple threads call destroy() on
162
169
// the same backend instance.
163
- #ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
164
- const std::lock_guard<std::mutex> lock (workspace_mutex_);
165
- #endif
170
+ std::unique_lock<std::mutex> lock (workspace_mutex_, std::defer_lock);
171
+ if (enable_shared_workspace_) {
172
+ lock.lock ();
173
+ }
166
174
167
175
auto executor = static_cast <xnnpack::delegate::XNNExecutor*>(handle);
168
176
@@ -181,12 +189,16 @@ class XnnpackBackend final
181
189
}
182
190
}
183
191
192
+ void set_workspace_sharing_enabled (bool enable) {
193
+ this ->enable_shared_workspace_ = enable;
194
+ }
195
+
184
196
private:
197
+ bool enable_shared_workspace_;
185
198
// This is a global workspace for all delegate instances.
186
199
mutable std::mutex workspace_mutex_;
187
- std::unique_ptr<xnn_workspace, decltype (&xnn_release_workspace)> workspace_{
188
- nullptr ,
189
- &xnn_release_workspace};
200
+ mutable std::unique_ptr<xnn_workspace, decltype (&xnn_release_workspace)>
201
+ workspace_{nullptr , &xnn_release_workspace};
190
202
191
203
// Weights cache is global to all delegate instances.
192
204
mutable std::mutex weights_cache_mutex_;
@@ -199,10 +211,16 @@ class XnnpackBackend final
199
211
};
200
212
201
213
namespace {
202
- auto cls = XnnpackBackend();
203
- Backend backend{" XnnpackBackend" , &cls };
214
+ auto backend_instance = XnnpackBackend();
215
+ Backend backend{" XnnpackBackend" , &backend_instance };
204
216
static auto success_with_compiler = register_backend(backend);
205
217
} // namespace
206
218
219
+ namespace executorch ::backend::xnnpack {
220
+ ET_EXPERIMENTAL void set_workspace_sharing_enabled (bool enable) {
221
+ backend_instance.set_workspace_sharing_enabled (enable);
222
+ }
223
+ } // namespace executorch::backend::xnnpack
224
+
207
225
} // namespace backends
208
226
} // namespace executorch
0 commit comments