@@ -51,21 +51,7 @@ class XnnpackBackend final
51
51
}
52
52
53
53
#ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
54
- // Create a workspace for the XNNExecutor to use. This workspace will be
55
- // shared across all delegate instances.
56
- ET_LOG (Debug, " Creating XNN workspace" );
57
- xnn_workspace_t workspace = nullptr ;
58
- status = xnn_create_workspace (&workspace);
59
- if (status != xnn_status_success) {
60
- ET_LOG (
61
- Error,
62
- " Failed to create XNN workspace, XNNPACK status: 0x%x" ,
63
- (unsigned int )status);
64
- workspace = nullptr ;
65
- return ;
66
- }
67
- workspace_.reset (workspace);
68
- ET_LOG (Debug, " Created XNN workspace: %p" , workspace_.get ());
54
+ enable_shared_workspace_ = true ;
69
55
#endif // ENABLE_XNNPACK_SHARED_WORKSPACE
70
56
}
71
57
@@ -86,9 +72,28 @@ class XnnpackBackend final
86
72
const NamedDataMap* named_data_map = context.get_named_data_map ();
87
73
// thread safe. This can heppen when multiple threads call init() on
88
74
// the same backend instance.
89
- #ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
90
- const std::lock_guard<std::mutex> lock (workspace_mutex_);
91
- #endif
75
+
76
+ std::unique_lock<std::mutex> lock (workspace_mutex_, std::defer_lock);
77
+ if (enable_shared_workspace_) {
78
+ lock.lock ();
79
+ if (!workspace_) {
80
+ // Create a workspace for the XNNExecutor to use. This workspace will be
81
+ // shared across all delegate instances.
82
+ ET_LOG (Debug, " Creating XNN workspace" );
83
+ xnn_workspace_t workspace = nullptr ;
84
+ auto status = xnn_create_workspace (&workspace);
85
+ if (status != xnn_status_success) {
86
+ ET_LOG (
87
+ Error,
88
+ " Failed to create XNN workspace, XNNPACK status: 0x%x" ,
89
+ (unsigned int )status);
90
+ workspace = nullptr ;
91
+ return Error::Internal;
92
+ }
93
+ workspace_.reset (workspace);
94
+ ET_LOG (Debug, " Created XNN workspace: %p" , workspace_.get ());
95
+ }
96
+ }
92
97
93
98
#ifdef ENABLE_XNNPACK_WEIGHTS_CACHE
94
99
const std::lock_guard<std::mutex> lock_weight_cache (weights_cache_mutex_);
@@ -129,9 +134,10 @@ class XnnpackBackend final
129
134
EValue** args) const override {
130
135
auto executor = static_cast <xnnpack::delegate::XNNExecutor*>(handle);
131
136
132
- #ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
133
- const std::lock_guard<std::mutex> lock (workspace_mutex_);
134
- #endif
137
+ std::unique_lock<std::mutex> lock (workspace_mutex_, std::defer_lock);
138
+ if (enable_shared_workspace_) {
139
+ lock.lock ();
140
+ }
135
141
136
142
#ifdef ENABLE_XNNPACK_WEIGHTS_CACHE
137
143
const std::lock_guard<std::mutex> lock_weights_cache (weights_cache_mutex_);
@@ -160,9 +166,10 @@ class XnnpackBackend final
160
166
// This is needed to serialize access to xnn_delete_runtime which is not
161
167
// thread safe. This can heppen when multiple threads call destroy() on
162
168
// the same backend instance.
163
- #ifdef ENABLE_XNNPACK_SHARED_WORKSPACE
164
- const std::lock_guard<std::mutex> lock (workspace_mutex_);
165
- #endif
169
+ std::unique_lock<std::mutex> lock (workspace_mutex_, std::defer_lock);
170
+ if (enable_shared_workspace_) {
171
+ lock.lock ();
172
+ }
166
173
167
174
auto executor = static_cast <xnnpack::delegate::XNNExecutor*>(handle);
168
175
@@ -181,10 +188,15 @@ class XnnpackBackend final
181
188
}
182
189
}
183
190
191
+ void set_workspace_sharing_enabled (bool enable) {
192
+ this ->enable_shared_workspace_ = enable;
193
+ }
194
+
184
195
private:
196
+ bool enable_shared_workspace_;
185
197
// This is a global workspace for all delegate instances.
186
198
mutable std::mutex workspace_mutex_;
187
- std::unique_ptr<xnn_workspace, decltype (&xnn_release_workspace)> workspace_{
199
+ mutable std::unique_ptr<xnn_workspace, decltype (&xnn_release_workspace)> workspace_{
188
200
nullptr ,
189
201
&xnn_release_workspace};
190
202
@@ -204,5 +216,11 @@ Backend backend{"XnnpackBackend", &cls};
204
216
static auto success_with_compiler = register_backend(backend);
205
217
} // namespace
206
218
219
+ namespace executorch ::backend::xnnpack {
220
+ ET_EXPERIMENTAL void set_workspace_sharing_enabled (bool enable) {
221
+
222
+ }
223
+ }
224
+
207
225
} // namespace backends
208
226
} // namespace executorch
0 commit comments