Skip to content

Commit 91e8bcd

Browse files
Sebastian Andrzej Siewiorherbertx
authored andcommitted
crypto: cryptd - Protect per-CPU resource by disabling BH.
The access to cryptd_queue::cpu_queue is synchronized by disabling preemption in cryptd_enqueue_request() and disabling BH in cryptd_queue_worker(). This implies that access is allowed from BH. If cryptd_enqueue_request() is invoked from preemptible context _and_ soft interrupt then this can lead to list corruption since cryptd_enqueue_request() is not protected against access from soft interrupt. Replace get_cpu() in cryptd_enqueue_request() with local_bh_disable() to ensure BH is always disabled. Remove preempt_disable() from cryptd_queue_worker() since it is not needed because local_bh_disable() ensures synchronisation. Fixes: 254eff7 ("crypto: cryptd - Per-CPU thread implementation...") Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 42a01af commit 91e8bcd

File tree

1 file changed

+11
-12
lines changed

1 file changed

+11
-12
lines changed

crypto/cryptd.c

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
3939
};
4040

4141
struct cryptd_queue {
42+
/*
43+
* Protected by disabling BH to allow enqueueing from softinterrupt and
44+
* dequeuing from kworker (cryptd_queue_worker()).
45+
*/
4246
struct cryptd_cpu_queue __percpu *cpu_queue;
4347
};
4448

@@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue)
125129
static int cryptd_enqueue_request(struct cryptd_queue *queue,
126130
struct crypto_async_request *request)
127131
{
128-
int cpu, err;
132+
int err;
129133
struct cryptd_cpu_queue *cpu_queue;
130134
refcount_t *refcnt;
131135

132-
cpu = get_cpu();
136+
local_bh_disable();
133137
cpu_queue = this_cpu_ptr(queue->cpu_queue);
134138
err = crypto_enqueue_request(&cpu_queue->queue, request);
135139

136140
refcnt = crypto_tfm_ctx(request->tfm);
137141

138142
if (err == -ENOSPC)
139-
goto out_put_cpu;
143+
goto out;
140144

141-
queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
145+
queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
142146

143147
if (!refcount_read(refcnt))
144-
goto out_put_cpu;
148+
goto out;
145149

146150
refcount_inc(refcnt);
147151

148-
out_put_cpu:
149-
put_cpu();
152+
out:
153+
local_bh_enable();
150154

151155
return err;
152156
}
@@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work)
162166
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
163167
/*
164168
* Only handle one request at a time to avoid hogging crypto workqueue.
165-
* preempt_disable/enable is used to prevent being preempted by
166-
* cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
167-
* cryptd_enqueue_request() being accessed from software interrupts.
168169
*/
169170
local_bh_disable();
170-
preempt_disable();
171171
backlog = crypto_get_backlog(&cpu_queue->queue);
172172
req = crypto_dequeue_request(&cpu_queue->queue);
173-
preempt_enable();
174173
local_bh_enable();
175174

176175
if (!req)

0 commit comments

Comments
 (0)