23
23
static void tee_shm_release (struct tee_shm * shm )
24
24
{
25
25
struct tee_device * teedev = shm -> teedev ;
26
- struct tee_shm_pool_mgr * poolm ;
27
26
28
27
mutex_lock (& teedev -> mutex );
29
28
idr_remove (& teedev -> idr , shm -> id );
30
29
if (shm -> ctx )
31
30
list_del (& shm -> link );
32
31
mutex_unlock (& teedev -> mutex );
33
32
34
- if (shm -> flags & TEE_SHM_DMA_BUF )
35
- poolm = teedev -> pool -> dma_buf_mgr ;
36
- else
37
- poolm = teedev -> pool -> private_mgr ;
33
+ if (shm -> flags & TEE_SHM_POOL ) {
34
+ struct tee_shm_pool_mgr * poolm ;
35
+
36
+ if (shm -> flags & TEE_SHM_DMA_BUF )
37
+ poolm = teedev -> pool -> dma_buf_mgr ;
38
+ else
39
+ poolm = teedev -> pool -> private_mgr ;
40
+
41
+ poolm -> ops -> free (poolm , shm );
42
+ } else if (shm -> flags & TEE_SHM_REGISTER ) {
43
+ size_t n ;
44
+ int rc = teedev -> desc -> ops -> shm_unregister (shm -> ctx , shm );
45
+
46
+ if (rc )
47
+ dev_err (teedev -> dev .parent ,
48
+ "unregister shm %p failed: %d" , shm , rc );
49
+
50
+ for (n = 0 ; n < shm -> num_pages ; n ++ )
51
+ put_page (shm -> pages [n ]);
52
+
53
+ kfree (shm -> pages );
54
+ }
38
55
39
- poolm -> ops -> free (poolm , shm );
40
56
kfree (shm );
41
57
42
58
tee_device_put (teedev );
@@ -76,6 +92,10 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
76
92
struct tee_shm * shm = dmabuf -> priv ;
77
93
size_t size = vma -> vm_end - vma -> vm_start ;
78
94
95
+ /* Refuse sharing shared memory provided by application */
96
+ if (shm -> flags & TEE_SHM_REGISTER )
97
+ return - EINVAL ;
98
+
79
99
return remap_pfn_range (vma , vma -> vm_start , shm -> paddr >> PAGE_SHIFT ,
80
100
size , vma -> vm_page_prot );
81
101
}
@@ -89,26 +109,20 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
89
109
.mmap = tee_shm_op_mmap ,
90
110
};
91
111
92
- /**
93
- * tee_shm_alloc() - Allocate shared memory
94
- * @ctx: Context that allocates the shared memory
95
- * @size: Requested size of shared memory
96
- * @flags: Flags setting properties for the requested shared memory.
97
- *
98
- * Memory allocated as global shared memory is automatically freed when the
99
- * TEE file pointer is closed. The @flags field uses the bits defined by
100
- * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
101
- * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
102
- * associated with a dma-buf handle, else driver private memory.
103
- */
104
- struct tee_shm * tee_shm_alloc (struct tee_context * ctx , size_t size , u32 flags )
112
+ struct tee_shm * __tee_shm_alloc (struct tee_context * ctx ,
113
+ struct tee_device * teedev ,
114
+ size_t size , u32 flags )
105
115
{
106
- struct tee_device * teedev = ctx -> teedev ;
107
116
struct tee_shm_pool_mgr * poolm = NULL ;
108
117
struct tee_shm * shm ;
109
118
void * ret ;
110
119
int rc ;
111
120
121
+ if (ctx && ctx -> teedev != teedev ) {
122
+ dev_err (teedev -> dev .parent , "ctx and teedev mismatch\n" );
123
+ return ERR_PTR (- EINVAL );
124
+ }
125
+
112
126
if (!(flags & TEE_SHM_MAPPED )) {
113
127
dev_err (teedev -> dev .parent ,
114
128
"only mapped allocations supported\n" );
@@ -135,7 +149,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
135
149
goto err_dev_put ;
136
150
}
137
151
138
- shm -> flags = flags ;
152
+ shm -> flags = flags | TEE_SHM_POOL ;
139
153
shm -> teedev = teedev ;
140
154
shm -> ctx = ctx ;
141
155
if (flags & TEE_SHM_DMA_BUF )
@@ -171,9 +185,12 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
171
185
goto err_rem ;
172
186
}
173
187
}
174
- mutex_lock (& teedev -> mutex );
175
- list_add_tail (& shm -> link , & ctx -> list_shm );
176
- mutex_unlock (& teedev -> mutex );
188
+
189
+ if (ctx ) {
190
+ mutex_lock (& teedev -> mutex );
191
+ list_add_tail (& shm -> link , & ctx -> list_shm );
192
+ mutex_unlock (& teedev -> mutex );
193
+ }
177
194
178
195
return shm ;
179
196
err_rem :
@@ -188,19 +205,150 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
188
205
tee_device_put (teedev );
189
206
return ret ;
190
207
}
208
+
209
+ /**
210
+ * tee_shm_alloc() - Allocate shared memory
211
+ * @ctx: Context that allocates the shared memory
212
+ * @size: Requested size of shared memory
213
+ * @flags: Flags setting properties for the requested shared memory.
214
+ *
215
+ * Memory allocated as global shared memory is automatically freed when the
216
+ * TEE file pointer is closed. The @flags field uses the bits defined by
217
+ * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
218
+ * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
219
+ * associated with a dma-buf handle, else driver private memory.
220
+ */
221
+ struct tee_shm * tee_shm_alloc (struct tee_context * ctx , size_t size , u32 flags )
222
+ {
223
+ return __tee_shm_alloc (ctx , ctx -> teedev , size , flags );
224
+ }
191
225
EXPORT_SYMBOL_GPL (tee_shm_alloc );
192
226
227
+ struct tee_shm * tee_shm_priv_alloc (struct tee_device * teedev , size_t size )
228
+ {
229
+ return __tee_shm_alloc (NULL , teedev , size , TEE_SHM_MAPPED );
230
+ }
231
+ EXPORT_SYMBOL_GPL (tee_shm_priv_alloc );
232
+
233
+ struct tee_shm * tee_shm_register (struct tee_context * ctx , unsigned long addr ,
234
+ size_t length , u32 flags )
235
+ {
236
+ struct tee_device * teedev = ctx -> teedev ;
237
+ const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED ;
238
+ struct tee_shm * shm ;
239
+ void * ret ;
240
+ int rc ;
241
+ int num_pages ;
242
+ unsigned long start ;
243
+
244
+ if (flags != req_flags )
245
+ return ERR_PTR (- ENOTSUPP );
246
+
247
+ if (!tee_device_get (teedev ))
248
+ return ERR_PTR (- EINVAL );
249
+
250
+ if (!teedev -> desc -> ops -> shm_register ||
251
+ !teedev -> desc -> ops -> shm_unregister ) {
252
+ tee_device_put (teedev );
253
+ return ERR_PTR (- ENOTSUPP );
254
+ }
255
+
256
+ shm = kzalloc (sizeof (* shm ), GFP_KERNEL );
257
+ if (!shm ) {
258
+ ret = ERR_PTR (- ENOMEM );
259
+ goto err ;
260
+ }
261
+
262
+ shm -> flags = flags | TEE_SHM_REGISTER ;
263
+ shm -> teedev = teedev ;
264
+ shm -> ctx = ctx ;
265
+ shm -> id = -1 ;
266
+ start = rounddown (addr , PAGE_SIZE );
267
+ shm -> offset = addr - start ;
268
+ shm -> size = length ;
269
+ num_pages = (roundup (addr + length , PAGE_SIZE ) - start ) / PAGE_SIZE ;
270
+ shm -> pages = kcalloc (num_pages , sizeof (* shm -> pages ), GFP_KERNEL );
271
+ if (!shm -> pages ) {
272
+ ret = ERR_PTR (- ENOMEM );
273
+ goto err ;
274
+ }
275
+
276
+ rc = get_user_pages_fast (start , num_pages , 1 , shm -> pages );
277
+ if (rc > 0 )
278
+ shm -> num_pages = rc ;
279
+ if (rc != num_pages ) {
280
+ if (rc > 0 )
281
+ rc = - ENOMEM ;
282
+ ret = ERR_PTR (rc );
283
+ goto err ;
284
+ }
285
+
286
+ mutex_lock (& teedev -> mutex );
287
+ shm -> id = idr_alloc (& teedev -> idr , shm , 1 , 0 , GFP_KERNEL );
288
+ mutex_unlock (& teedev -> mutex );
289
+
290
+ if (shm -> id < 0 ) {
291
+ ret = ERR_PTR (shm -> id );
292
+ goto err ;
293
+ }
294
+
295
+ rc = teedev -> desc -> ops -> shm_register (ctx , shm , shm -> pages ,
296
+ shm -> num_pages );
297
+ if (rc ) {
298
+ ret = ERR_PTR (rc );
299
+ goto err ;
300
+ }
301
+
302
+ if (flags & TEE_SHM_DMA_BUF ) {
303
+ DEFINE_DMA_BUF_EXPORT_INFO (exp_info );
304
+
305
+ exp_info .ops = & tee_shm_dma_buf_ops ;
306
+ exp_info .size = shm -> size ;
307
+ exp_info .flags = O_RDWR ;
308
+ exp_info .priv = shm ;
309
+
310
+ shm -> dmabuf = dma_buf_export (& exp_info );
311
+ if (IS_ERR (shm -> dmabuf )) {
312
+ ret = ERR_CAST (shm -> dmabuf );
313
+ teedev -> desc -> ops -> shm_unregister (ctx , shm );
314
+ goto err ;
315
+ }
316
+ }
317
+
318
+ mutex_lock (& teedev -> mutex );
319
+ list_add_tail (& shm -> link , & ctx -> list_shm );
320
+ mutex_unlock (& teedev -> mutex );
321
+
322
+ return shm ;
323
+ err :
324
+ if (shm ) {
325
+ size_t n ;
326
+
327
+ if (shm -> id >= 0 ) {
328
+ mutex_lock (& teedev -> mutex );
329
+ idr_remove (& teedev -> idr , shm -> id );
330
+ mutex_unlock (& teedev -> mutex );
331
+ }
332
+ for (n = 0 ; n < shm -> num_pages ; n ++ )
333
+ put_page (shm -> pages [n ]);
334
+ kfree (shm -> pages );
335
+ }
336
+ kfree (shm );
337
+ tee_device_put (teedev );
338
+ return ret ;
339
+ }
340
+ EXPORT_SYMBOL_GPL (tee_shm_register );
341
+
193
342
/**
194
343
* tee_shm_get_fd() - Increase reference count and return file descriptor
195
344
* @shm: Shared memory handle
196
345
* @returns user space file descriptor to shared memory
197
346
*/
198
347
int tee_shm_get_fd (struct tee_shm * shm )
199
348
{
200
- u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF ;
201
349
int fd ;
202
350
203
- if ((shm -> flags & req_flags ) != req_flags )
351
+ if (! (shm -> flags & TEE_SHM_DMA_BUF ) )
204
352
return - EINVAL ;
205
353
206
354
fd = dma_buf_fd (shm -> dmabuf , O_CLOEXEC );
@@ -238,6 +386,8 @@ EXPORT_SYMBOL_GPL(tee_shm_free);
238
386
*/
239
387
int tee_shm_va2pa (struct tee_shm * shm , void * va , phys_addr_t * pa )
240
388
{
389
+ if (!(shm -> flags & TEE_SHM_MAPPED ))
390
+ return - EINVAL ;
241
391
/* Check that we're in the range of the shm */
242
392
if ((char * )va < (char * )shm -> kaddr )
243
393
return - EINVAL ;
@@ -258,6 +408,8 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa);
258
408
*/
259
409
int tee_shm_pa2va (struct tee_shm * shm , phys_addr_t pa , void * * va )
260
410
{
411
+ if (!(shm -> flags & TEE_SHM_MAPPED ))
412
+ return - EINVAL ;
261
413
/* Check that we're in the range of the shm */
262
414
if (pa < shm -> paddr )
263
415
return - EINVAL ;
@@ -284,6 +436,8 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va);
284
436
*/
285
437
void * tee_shm_get_va (struct tee_shm * shm , size_t offs )
286
438
{
439
+ if (!(shm -> flags & TEE_SHM_MAPPED ))
440
+ return ERR_PTR (- EINVAL );
287
441
if (offs >= shm -> size )
288
442
return ERR_PTR (- EINVAL );
289
443
return (char * )shm -> kaddr + offs ;
0 commit comments