Skip to content

Commit 5de8acb

Browse files
author
Miklos Szeredi
committed
fuse: cleanup request queuing towards virtiofs
Virtiofs has its own queuing mechanism, but still requests are first queued on fiq->pending to be immediately dequeued and queued onto the virtio queue. The queuing on fiq->pending is unnecessary and might even have some performance impact due to being a contention point. Forget requests are handled similarly. Move the queuing of requests and forgets into the fiq->ops->*. fuse_iqueue_ops are renamed to reflect the new semantics. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Fixed-by: Jingbo Xu <jefflexu@linux.alibaba.com> Reviewed-by: Jingbo Xu <jefflexu@linux.alibaba.com> Tested-by: Peter-Jan Gootzen <pgootzen@nvidia.com> Reviewed-by: Peter-Jan Gootzen <pgootzen@nvidia.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
1 parent 3ab394b commit 5de8acb

File tree

3 files changed

+106
-113
lines changed

3 files changed

+106
-113
lines changed

fs/fuse/dev.c

Lines changed: 87 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -194,11 +194,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
194194
}
195195
EXPORT_SYMBOL_GPL(fuse_len_args);
196196

197-
u64 fuse_get_unique(struct fuse_iqueue *fiq)
197+
static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq)
198198
{
199199
fiq->reqctr += FUSE_REQ_ID_STEP;
200200
return fiq->reqctr;
201201
}
202+
203+
u64 fuse_get_unique(struct fuse_iqueue *fiq)
204+
{
205+
u64 ret;
206+
207+
spin_lock(&fiq->lock);
208+
ret = fuse_get_unique_locked(fiq);
209+
spin_unlock(&fiq->lock);
210+
211+
return ret;
212+
}
202213
EXPORT_SYMBOL_GPL(fuse_get_unique);
203214

204215
static unsigned int fuse_req_hash(u64 unique)
@@ -217,22 +228,68 @@ __releases(fiq->lock)
217228
spin_unlock(&fiq->lock);
218229
}
219230

231+
static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
232+
{
233+
spin_lock(&fiq->lock);
234+
if (fiq->connected) {
235+
fiq->forget_list_tail->next = forget;
236+
fiq->forget_list_tail = forget;
237+
fuse_dev_wake_and_unlock(fiq);
238+
} else {
239+
kfree(forget);
240+
spin_unlock(&fiq->lock);
241+
}
242+
}
243+
244+
static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
245+
{
246+
spin_lock(&fiq->lock);
247+
if (list_empty(&req->intr_entry)) {
248+
list_add_tail(&req->intr_entry, &fiq->interrupts);
249+
/*
250+
* Pairs with smp_mb() implied by test_and_set_bit()
251+
* from fuse_request_end().
252+
*/
253+
smp_mb();
254+
if (test_bit(FR_FINISHED, &req->flags)) {
255+
list_del_init(&req->intr_entry);
256+
spin_unlock(&fiq->lock);
257+
} else {
258+
fuse_dev_wake_and_unlock(fiq);
259+
}
260+
} else {
261+
spin_unlock(&fiq->lock);
262+
}
263+
}
264+
265+
static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
266+
{
267+
spin_lock(&fiq->lock);
268+
if (fiq->connected) {
269+
if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
270+
req->in.h.unique = fuse_get_unique_locked(fiq);
271+
list_add_tail(&req->list, &fiq->pending);
272+
fuse_dev_wake_and_unlock(fiq);
273+
} else {
274+
spin_unlock(&fiq->lock);
275+
req->out.h.error = -ENOTCONN;
276+
fuse_request_end(req);
277+
}
278+
}
279+
220280
const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
221-
.wake_forget_and_unlock = fuse_dev_wake_and_unlock,
222-
.wake_interrupt_and_unlock = fuse_dev_wake_and_unlock,
223-
.wake_pending_and_unlock = fuse_dev_wake_and_unlock,
281+
.send_forget = fuse_dev_queue_forget,
282+
.send_interrupt = fuse_dev_queue_interrupt,
283+
.send_req = fuse_dev_queue_req,
224284
};
225285
EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
226286

227-
static void queue_request_and_unlock(struct fuse_iqueue *fiq,
228-
struct fuse_req *req)
229-
__releases(fiq->lock)
287+
static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
230288
{
231289
req->in.h.len = sizeof(struct fuse_in_header) +
232290
fuse_len_args(req->args->in_numargs,
233291
(struct fuse_arg *) req->args->in_args);
234-
list_add_tail(&req->list, &fiq->pending);
235-
fiq->ops->wake_pending_and_unlock(fiq);
292+
fiq->ops->send_req(fiq, req);
236293
}
237294

238295
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -243,15 +300,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
243300
forget->forget_one.nodeid = nodeid;
244301
forget->forget_one.nlookup = nlookup;
245302

246-
spin_lock(&fiq->lock);
247-
if (fiq->connected) {
248-
fiq->forget_list_tail->next = forget;
249-
fiq->forget_list_tail = forget;
250-
fiq->ops->wake_forget_and_unlock(fiq);
251-
} else {
252-
kfree(forget);
253-
spin_unlock(&fiq->lock);
254-
}
303+
fiq->ops->send_forget(fiq, forget);
255304
}
256305

257306
static void flush_bg_queue(struct fuse_conn *fc)
@@ -265,9 +314,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
265314
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
266315
list_del(&req->list);
267316
fc->active_background++;
268-
spin_lock(&fiq->lock);
269-
req->in.h.unique = fuse_get_unique(fiq);
270-
queue_request_and_unlock(fiq, req);
317+
fuse_send_one(fiq, req);
271318
}
272319
}
273320

@@ -337,29 +384,12 @@ static int queue_interrupt(struct fuse_req *req)
337384
{
338385
struct fuse_iqueue *fiq = &req->fm->fc->iq;
339386

340-
spin_lock(&fiq->lock);
341387
/* Check for we've sent request to interrupt this req */
342-
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
343-
spin_unlock(&fiq->lock);
388+
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
344389
return -EINVAL;
345-
}
346390

347-
if (list_empty(&req->intr_entry)) {
348-
list_add_tail(&req->intr_entry, &fiq->interrupts);
349-
/*
350-
* Pairs with smp_mb() implied by test_and_set_bit()
351-
* from fuse_request_end().
352-
*/
353-
smp_mb();
354-
if (test_bit(FR_FINISHED, &req->flags)) {
355-
list_del_init(&req->intr_entry);
356-
spin_unlock(&fiq->lock);
357-
return 0;
358-
}
359-
fiq->ops->wake_interrupt_and_unlock(fiq);
360-
} else {
361-
spin_unlock(&fiq->lock);
362-
}
391+
fiq->ops->send_interrupt(fiq, req);
392+
363393
return 0;
364394
}
365395

@@ -414,21 +444,15 @@ static void __fuse_request_send(struct fuse_req *req)
414444
struct fuse_iqueue *fiq = &req->fm->fc->iq;
415445

416446
BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
417-
spin_lock(&fiq->lock);
418-
if (!fiq->connected) {
419-
spin_unlock(&fiq->lock);
420-
req->out.h.error = -ENOTCONN;
421-
} else {
422-
req->in.h.unique = fuse_get_unique(fiq);
423-
/* acquire extra reference, since request is still needed
424-
after fuse_request_end() */
425-
__fuse_get_request(req);
426-
queue_request_and_unlock(fiq, req);
427447

428-
request_wait_answer(req);
429-
/* Pairs with smp_wmb() in fuse_request_end() */
430-
smp_rmb();
431-
}
448+
/* acquire extra reference, since request is still needed after
449+
fuse_request_end() */
450+
__fuse_get_request(req);
451+
fuse_send_one(fiq, req);
452+
453+
request_wait_answer(req);
454+
/* Pairs with smp_wmb() in fuse_request_end() */
455+
smp_rmb();
432456
}
433457

434458
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
@@ -583,7 +607,6 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
583607
{
584608
struct fuse_req *req;
585609
struct fuse_iqueue *fiq = &fm->fc->iq;
586-
int err = 0;
587610

588611
req = fuse_get_req(fm, false);
589612
if (IS_ERR(req))
@@ -594,16 +617,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
594617

595618
fuse_args_to_req(req, args);
596619

597-
spin_lock(&fiq->lock);
598-
if (fiq->connected) {
599-
queue_request_and_unlock(fiq, req);
600-
} else {
601-
err = -ENODEV;
602-
spin_unlock(&fiq->lock);
603-
fuse_put_request(req);
604-
}
620+
fuse_send_one(fiq, req);
605621

606-
return err;
622+
return 0;
607623
}
608624

609625
/*
@@ -1075,9 +1091,9 @@ __releases(fiq->lock)
10751091
return err ? err : reqsize;
10761092
}
10771093

1078-
struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1079-
unsigned int max,
1080-
unsigned int *countp)
1094+
static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1095+
unsigned int max,
1096+
unsigned int *countp)
10811097
{
10821098
struct fuse_forget_link *head = fiq->forget_list_head.next;
10831099
struct fuse_forget_link **newhead = &head;
@@ -1096,7 +1112,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
10961112

10971113
return head;
10981114
}
1099-
EXPORT_SYMBOL(fuse_dequeue_forget);
11001115

11011116
static int fuse_read_single_forget(struct fuse_iqueue *fiq,
11021117
struct fuse_copy_state *cs,
@@ -1111,7 +1126,7 @@ __releases(fiq->lock)
11111126
struct fuse_in_header ih = {
11121127
.opcode = FUSE_FORGET,
11131128
.nodeid = forget->forget_one.nodeid,
1114-
.unique = fuse_get_unique(fiq),
1129+
.unique = fuse_get_unique_locked(fiq),
11151130
.len = sizeof(ih) + sizeof(arg),
11161131
};
11171132

@@ -1142,7 +1157,7 @@ __releases(fiq->lock)
11421157
struct fuse_batch_forget_in arg = { .count = 0 };
11431158
struct fuse_in_header ih = {
11441159
.opcode = FUSE_BATCH_FORGET,
1145-
.unique = fuse_get_unique(fiq),
1160+
.unique = fuse_get_unique_locked(fiq),
11461161
.len = sizeof(ih) + sizeof(arg),
11471162
};
11481163

@@ -1828,7 +1843,7 @@ static void fuse_resend(struct fuse_conn *fc)
18281843
}
18291844
/* iq and pq requests are both oldest to newest */
18301845
list_splice(&to_queue, &fiq->pending);
1831-
fiq->ops->wake_pending_and_unlock(fiq);
1846+
fuse_dev_wake_and_unlock(fiq);
18321847
}
18331848

18341849
static int fuse_notify_resend(struct fuse_conn *fc)

fs/fuse/fuse_i.h

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -449,22 +449,19 @@ struct fuse_iqueue;
449449
*/
450450
struct fuse_iqueue_ops {
451451
/**
452-
* Signal that a forget has been queued
452+
* Send one forget
453453
*/
454-
void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq)
455-
__releases(fiq->lock);
454+
void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link);
456455

457456
/**
458-
* Signal that an INTERRUPT request has been queued
457+
* Send interrupt for request
459458
*/
460-
void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq)
461-
__releases(fiq->lock);
459+
void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req);
462460

463461
/**
464-
* Signal that a request has been queued
462+
* Send one request
465463
*/
466-
void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq)
467-
__releases(fiq->lock);
464+
void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req);
468465

469466
/**
470467
* Clean up when fuse_iqueue is destroyed
@@ -1053,10 +1050,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
10531050

10541051
struct fuse_forget_link *fuse_alloc_forget(void);
10551052

1056-
struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
1057-
unsigned int max,
1058-
unsigned int *countp);
1059-
10601053
/*
10611054
* Initialize READ or READDIR request
10621055
*/

fs/fuse/virtio_fs.c

Lines changed: 13 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1091,22 +1091,13 @@ static struct virtio_driver virtio_fs_driver = {
10911091
#endif
10921092
};
10931093

1094-
static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
1095-
__releases(fiq->lock)
1094+
static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
10961095
{
1097-
struct fuse_forget_link *link;
10981096
struct virtio_fs_forget *forget;
10991097
struct virtio_fs_forget_req *req;
1100-
struct virtio_fs *fs;
1101-
struct virtio_fs_vq *fsvq;
1102-
u64 unique;
1103-
1104-
link = fuse_dequeue_forget(fiq, 1, NULL);
1105-
unique = fuse_get_unique(fiq);
1106-
1107-
fs = fiq->priv;
1108-
fsvq = &fs->vqs[VQ_HIPRIO];
1109-
spin_unlock(&fiq->lock);
1098+
struct virtio_fs *fs = fiq->priv;
1099+
struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
1100+
u64 unique = fuse_get_unique(fiq);
11101101

11111102
/* Allocate a buffer for the request */
11121103
forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
@@ -1126,8 +1117,7 @@ __releases(fiq->lock)
11261117
kfree(link);
11271118
}
11281119

1129-
static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
1130-
__releases(fiq->lock)
1120+
static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
11311121
{
11321122
/*
11331123
* TODO interrupts.
@@ -1136,7 +1126,6 @@ __releases(fiq->lock)
11361126
* Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
11371127
* with shared lock between host and guest.
11381128
*/
1139-
spin_unlock(&fiq->lock);
11401129
}
11411130

11421131
/* Count number of scatter-gather elements required */
@@ -1341,21 +1330,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
13411330
return ret;
13421331
}
13431332

1344-
static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
1345-
__releases(fiq->lock)
1333+
static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
13461334
{
13471335
unsigned int queue_id;
13481336
struct virtio_fs *fs;
1349-
struct fuse_req *req;
13501337
struct virtio_fs_vq *fsvq;
13511338
int ret;
13521339

1353-
WARN_ON(list_empty(&fiq->pending));
1354-
req = list_last_entry(&fiq->pending, struct fuse_req, list);
1340+
if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
1341+
req->in.h.unique = fuse_get_unique(fiq);
1342+
13551343
clear_bit(FR_PENDING, &req->flags);
1356-
list_del_init(&req->list);
1357-
WARN_ON(!list_empty(&fiq->pending));
1358-
spin_unlock(&fiq->lock);
13591344

13601345
fs = fiq->priv;
13611346
queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
@@ -1393,10 +1378,10 @@ __releases(fiq->lock)
13931378
}
13941379

13951380
static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
1396-
.wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock,
1397-
.wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock,
1398-
.wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock,
1399-
.release = virtio_fs_fiq_release,
1381+
.send_forget = virtio_fs_send_forget,
1382+
.send_interrupt = virtio_fs_send_interrupt,
1383+
.send_req = virtio_fs_send_req,
1384+
.release = virtio_fs_fiq_release,
14001385
};
14011386

14021387
static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)

0 commit comments

Comments
 (0)