Lines Matching refs:fc
133 void fuse_set_initialized(struct fuse_conn *fc) in fuse_set_initialized() argument
137 fc->initialized = 1; in fuse_set_initialized()
140 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) in fuse_block_alloc() argument
142 return !fc->initialized || (for_background && fc->blocked); in fuse_block_alloc()
145 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, in __fuse_get_req() argument
150 atomic_inc(&fc->num_waiting); in __fuse_get_req()
152 if (fuse_block_alloc(fc, for_background)) { in __fuse_get_req()
157 intr = wait_event_interruptible_exclusive(fc->blocked_waitq, in __fuse_get_req()
158 !fuse_block_alloc(fc, for_background)); in __fuse_get_req()
168 if (!fc->connected) in __fuse_get_req()
175 wake_up(&fc->blocked_waitq); in __fuse_get_req()
185 atomic_dec(&fc->num_waiting); in __fuse_get_req()
189 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) in fuse_get_req() argument
191 return __fuse_get_req(fc, npages, false); in fuse_get_req()
195 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, in fuse_get_req_for_background() argument
198 return __fuse_get_req(fc, npages, true); in fuse_get_req_for_background()
207 static struct fuse_req *get_reserved_req(struct fuse_conn *fc, in get_reserved_req() argument
214 wait_event(fc->reserved_req_waitq, ff->reserved_req); in get_reserved_req()
215 spin_lock(&fc->lock); in get_reserved_req()
221 spin_unlock(&fc->lock); in get_reserved_req()
230 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) in put_reserved_req() argument
235 spin_lock(&fc->lock); in put_reserved_req()
239 wake_up_all(&fc->reserved_req_waitq); in put_reserved_req()
240 spin_unlock(&fc->lock); in put_reserved_req()
257 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, in fuse_get_req_nofail_nopages() argument
262 atomic_inc(&fc->num_waiting); in fuse_get_req_nofail_nopages()
263 wait_event(fc->blocked_waitq, fc->initialized); in fuse_get_req_nofail_nopages()
268 req = get_reserved_req(fc, file); in fuse_get_req_nofail_nopages()
276 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) in fuse_put_request() argument
284 spin_lock(&fc->lock); in fuse_put_request()
285 if (!fc->blocked) in fuse_put_request()
286 wake_up(&fc->blocked_waitq); in fuse_put_request()
287 spin_unlock(&fc->lock); in fuse_put_request()
291 atomic_dec(&fc->num_waiting); in fuse_put_request()
294 put_reserved_req(fc, req); in fuse_put_request()
312 static u64 fuse_get_unique(struct fuse_conn *fc) in fuse_get_unique() argument
314 fc->reqctr++; in fuse_get_unique()
316 if (fc->reqctr == 0) in fuse_get_unique()
317 fc->reqctr = 1; in fuse_get_unique()
319 return fc->reqctr; in fuse_get_unique()
322 static void queue_request(struct fuse_conn *fc, struct fuse_req *req) in queue_request() argument
326 list_add_tail(&req->list, &fc->pending); in queue_request()
330 atomic_inc(&fc->num_waiting); in queue_request()
332 wake_up(&fc->waitq); in queue_request()
333 kill_fasync(&fc->fasync, SIGIO, POLL_IN); in queue_request()
336 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, in fuse_queue_forget() argument
342 spin_lock(&fc->lock); in fuse_queue_forget()
343 if (fc->connected) { in fuse_queue_forget()
344 fc->forget_list_tail->next = forget; in fuse_queue_forget()
345 fc->forget_list_tail = forget; in fuse_queue_forget()
346 wake_up(&fc->waitq); in fuse_queue_forget()
347 kill_fasync(&fc->fasync, SIGIO, POLL_IN); in fuse_queue_forget()
351 spin_unlock(&fc->lock); in fuse_queue_forget()
354 static void flush_bg_queue(struct fuse_conn *fc) in flush_bg_queue() argument
356 while (fc->active_background < fc->max_background && in flush_bg_queue()
357 !list_empty(&fc->bg_queue)) { in flush_bg_queue()
360 req = list_entry(fc->bg_queue.next, struct fuse_req, list); in flush_bg_queue()
362 fc->active_background++; in flush_bg_queue()
363 req->in.h.unique = fuse_get_unique(fc); in flush_bg_queue()
364 queue_request(fc, req); in flush_bg_queue()
378 static void request_end(struct fuse_conn *fc, struct fuse_req *req) in request_end() argument
379 __releases(fc->lock) in request_end()
389 if (fc->num_background == fc->max_background) in request_end()
390 fc->blocked = 0; in request_end()
393 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) in request_end()
394 wake_up(&fc->blocked_waitq); in request_end()
396 if (fc->num_background == fc->congestion_threshold && in request_end()
397 fc->connected && fc->bdi_initialized) { in request_end()
398 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); in request_end()
399 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC); in request_end()
401 fc->num_background--; in request_end()
402 fc->active_background--; in request_end()
403 flush_bg_queue(fc); in request_end()
405 spin_unlock(&fc->lock); in request_end()
408 end(fc, req); in request_end()
409 fuse_put_request(fc, req); in request_end()
412 static void wait_answer_interruptible(struct fuse_conn *fc, in wait_answer_interruptible() argument
414 __releases(fc->lock) in wait_answer_interruptible()
415 __acquires(fc->lock) in wait_answer_interruptible()
420 spin_unlock(&fc->lock); in wait_answer_interruptible()
422 spin_lock(&fc->lock); in wait_answer_interruptible()
425 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) in queue_interrupt() argument
427 list_add_tail(&req->intr_entry, &fc->interrupts); in queue_interrupt()
428 wake_up(&fc->waitq); in queue_interrupt()
429 kill_fasync(&fc->fasync, SIGIO, POLL_IN); in queue_interrupt()
432 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) in request_wait_answer() argument
433 __releases(fc->lock) in request_wait_answer()
434 __acquires(fc->lock) in request_wait_answer()
436 if (!fc->no_interrupt) { in request_wait_answer()
438 wait_answer_interruptible(fc, req); in request_wait_answer()
447 queue_interrupt(fc, req); in request_wait_answer()
455 wait_answer_interruptible(fc, req); in request_wait_answer()
476 spin_unlock(&fc->lock); in request_wait_answer()
478 spin_lock(&fc->lock); in request_wait_answer()
491 spin_unlock(&fc->lock); in request_wait_answer()
493 spin_lock(&fc->lock); in request_wait_answer()
497 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) in __fuse_request_send() argument
500 spin_lock(&fc->lock); in __fuse_request_send()
501 if (!fc->connected) in __fuse_request_send()
503 else if (fc->conn_error) in __fuse_request_send()
506 req->in.h.unique = fuse_get_unique(fc); in __fuse_request_send()
507 queue_request(fc, req); in __fuse_request_send()
512 request_wait_answer(fc, req); in __fuse_request_send()
514 spin_unlock(&fc->lock); in __fuse_request_send()
517 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) in fuse_request_send() argument
520 __fuse_request_send(fc, req); in fuse_request_send()
524 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) in fuse_adjust_compat() argument
526 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) in fuse_adjust_compat()
529 if (fc->minor < 9) { in fuse_adjust_compat()
545 if (fc->minor < 12) { in fuse_adjust_compat()
557 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) in fuse_simple_request() argument
562 req = fuse_get_req(fc, 0); in fuse_simple_request()
567 fuse_adjust_compat(fc, args); in fuse_simple_request()
578 fuse_request_send(fc, req); in fuse_simple_request()
584 fuse_put_request(fc, req); in fuse_simple_request()
589 static void fuse_request_send_nowait_locked(struct fuse_conn *fc, in fuse_request_send_nowait_locked() argument
593 fc->num_background++; in fuse_request_send_nowait_locked()
594 if (fc->num_background == fc->max_background) in fuse_request_send_nowait_locked()
595 fc->blocked = 1; in fuse_request_send_nowait_locked()
596 if (fc->num_background == fc->congestion_threshold && in fuse_request_send_nowait_locked()
597 fc->bdi_initialized) { in fuse_request_send_nowait_locked()
598 set_bdi_congested(&fc->bdi, BLK_RW_SYNC); in fuse_request_send_nowait_locked()
599 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC); in fuse_request_send_nowait_locked()
601 list_add_tail(&req->list, &fc->bg_queue); in fuse_request_send_nowait_locked()
602 flush_bg_queue(fc); in fuse_request_send_nowait_locked()
605 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) in fuse_request_send_nowait() argument
607 spin_lock(&fc->lock); in fuse_request_send_nowait()
608 if (fc->connected) { in fuse_request_send_nowait()
609 fuse_request_send_nowait_locked(fc, req); in fuse_request_send_nowait()
610 spin_unlock(&fc->lock); in fuse_request_send_nowait()
613 request_end(fc, req); in fuse_request_send_nowait()
617 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) in fuse_request_send_background() argument
620 fuse_request_send_nowait(fc, req); in fuse_request_send_background()
624 static int fuse_request_send_notify_reply(struct fuse_conn *fc, in fuse_request_send_notify_reply() argument
631 spin_lock(&fc->lock); in fuse_request_send_notify_reply()
632 if (fc->connected) { in fuse_request_send_notify_reply()
633 queue_request(fc, req); in fuse_request_send_notify_reply()
636 spin_unlock(&fc->lock); in fuse_request_send_notify_reply()
646 void fuse_request_send_background_locked(struct fuse_conn *fc, in fuse_request_send_background_locked() argument
650 fuse_request_send_nowait_locked(fc, req); in fuse_request_send_background_locked()
656 struct fuse_conn *fc = get_fuse_conn(inode); in fuse_force_forget() local
662 req = fuse_get_req_nofail_nopages(fc, file); in fuse_force_forget()
669 __fuse_request_send(fc, req); in fuse_force_forget()
671 fuse_put_request(fc, req); in fuse_force_forget()
679 static int lock_request(struct fuse_conn *fc, struct fuse_req *req) in lock_request() argument
683 spin_lock(&fc->lock); in lock_request()
688 spin_unlock(&fc->lock); in lock_request()
698 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) in unlock_request() argument
701 spin_lock(&fc->lock); in unlock_request()
705 spin_unlock(&fc->lock); in unlock_request()
710 struct fuse_conn *fc; member
725 struct fuse_conn *fc, in fuse_copy_init() argument
730 cs->fc = fc; in fuse_copy_init()
763 unlock_request(cs->fc, cs->req); in fuse_copy_fill()
812 return lock_request(cs->fc, cs->req); in fuse_copy_fill()
863 unlock_request(cs->fc, cs->req); in fuse_try_move_page()
917 spin_lock(&cs->fc->lock); in fuse_try_move_page()
922 spin_unlock(&cs->fc->lock); in fuse_try_move_page()
942 err = lock_request(cs->fc, cs->req); in fuse_try_move_page()
957 unlock_request(cs->fc, cs->req); in fuse_ref_page()
1068 static int forget_pending(struct fuse_conn *fc) in forget_pending() argument
1070 return fc->forget_list_head.next != NULL; in forget_pending()
1073 static int request_pending(struct fuse_conn *fc) in request_pending() argument
1075 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) || in request_pending()
1076 forget_pending(fc); in request_pending()
1080 static void request_wait(struct fuse_conn *fc) in request_wait() argument
1081 __releases(fc->lock) in request_wait()
1082 __acquires(fc->lock) in request_wait()
1086 add_wait_queue_exclusive(&fc->waitq, &wait); in request_wait()
1087 while (fc->connected && !request_pending(fc)) { in request_wait()
1092 spin_unlock(&fc->lock); in request_wait()
1094 spin_lock(&fc->lock); in request_wait()
1097 remove_wait_queue(&fc->waitq, &wait); in request_wait()
1108 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, in fuse_read_interrupt() argument
1110 __releases(fc->lock) in fuse_read_interrupt()
1118 req->intr_unique = fuse_get_unique(fc); in fuse_read_interrupt()
1126 spin_unlock(&fc->lock); in fuse_read_interrupt()
1138 static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc, in dequeue_forget() argument
1142 struct fuse_forget_link *head = fc->forget_list_head.next; in dequeue_forget()
1149 fc->forget_list_head.next = *newhead; in dequeue_forget()
1151 if (fc->forget_list_head.next == NULL) in dequeue_forget()
1152 fc->forget_list_tail = &fc->forget_list_head; in dequeue_forget()
1160 static int fuse_read_single_forget(struct fuse_conn *fc, in fuse_read_single_forget() argument
1163 __releases(fc->lock) in fuse_read_single_forget()
1166 struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL); in fuse_read_single_forget()
1173 .unique = fuse_get_unique(fc), in fuse_read_single_forget()
1177 spin_unlock(&fc->lock); in fuse_read_single_forget()
1193 static int fuse_read_batch_forget(struct fuse_conn *fc, in fuse_read_batch_forget() argument
1195 __releases(fc->lock) in fuse_read_batch_forget()
1204 .unique = fuse_get_unique(fc), in fuse_read_batch_forget()
1209 spin_unlock(&fc->lock); in fuse_read_batch_forget()
1214 head = dequeue_forget(fc, max_forgets, &count); in fuse_read_batch_forget()
1215 spin_unlock(&fc->lock); in fuse_read_batch_forget()
1242 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, in fuse_read_forget() argument
1244 __releases(fc->lock) in fuse_read_forget()
1246 if (fc->minor < 16 || fc->forget_list_head.next->next == NULL) in fuse_read_forget()
1247 return fuse_read_single_forget(fc, cs, nbytes); in fuse_read_forget()
1249 return fuse_read_batch_forget(fc, cs, nbytes); in fuse_read_forget()
1261 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, in fuse_dev_do_read() argument
1270 spin_lock(&fc->lock); in fuse_dev_do_read()
1272 if ((file->f_flags & O_NONBLOCK) && fc->connected && in fuse_dev_do_read()
1273 !request_pending(fc)) in fuse_dev_do_read()
1276 request_wait(fc); in fuse_dev_do_read()
1278 if (!fc->connected) in fuse_dev_do_read()
1281 if (!request_pending(fc)) in fuse_dev_do_read()
1284 if (!list_empty(&fc->interrupts)) { in fuse_dev_do_read()
1285 req = list_entry(fc->interrupts.next, struct fuse_req, in fuse_dev_do_read()
1287 return fuse_read_interrupt(fc, cs, nbytes, req); in fuse_dev_do_read()
1290 if (forget_pending(fc)) { in fuse_dev_do_read()
1291 if (list_empty(&fc->pending) || fc->forget_batch-- > 0) in fuse_dev_do_read()
1292 return fuse_read_forget(fc, cs, nbytes); in fuse_dev_do_read()
1294 if (fc->forget_batch <= -8) in fuse_dev_do_read()
1295 fc->forget_batch = 16; in fuse_dev_do_read()
1298 req = list_entry(fc->pending.next, struct fuse_req, list); in fuse_dev_do_read()
1300 list_move(&req->list, &fc->io); in fuse_dev_do_read()
1310 request_end(fc, req); in fuse_dev_do_read()
1313 spin_unlock(&fc->lock); in fuse_dev_do_read()
1320 spin_lock(&fc->lock); in fuse_dev_do_read()
1323 request_end(fc, req); in fuse_dev_do_read()
1328 request_end(fc, req); in fuse_dev_do_read()
1332 request_end(fc, req); in fuse_dev_do_read()
1335 list_move_tail(&req->list, &fc->processing); in fuse_dev_do_read()
1337 queue_interrupt(fc, req); in fuse_dev_do_read()
1338 spin_unlock(&fc->lock); in fuse_dev_do_read()
1343 spin_unlock(&fc->lock); in fuse_dev_do_read()
1362 struct fuse_conn *fc = fuse_get_conn(file); in fuse_dev_read() local
1363 if (!fc) in fuse_dev_read()
1369 fuse_copy_init(&cs, fc, 1, to); in fuse_dev_read()
1371 return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to)); in fuse_dev_read()
1383 struct fuse_conn *fc = fuse_get_conn(in); in fuse_dev_splice_read() local
1384 if (!fc) in fuse_dev_splice_read()
1391 fuse_copy_init(&cs, fc, 1, NULL); in fuse_dev_splice_read()
1394 ret = fuse_dev_do_read(fc, in, &cs, len); in fuse_dev_splice_read()
1452 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, in fuse_notify_poll() argument
1466 return fuse_notify_poll_wakeup(fc, &outarg); in fuse_notify_poll()
1473 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, in fuse_notify_inval_inode() argument
1487 down_read(&fc->killsb); in fuse_notify_inval_inode()
1489 if (fc->sb) { in fuse_notify_inval_inode()
1490 err = fuse_reverse_inval_inode(fc->sb, outarg.ino, in fuse_notify_inval_inode()
1493 up_read(&fc->killsb); in fuse_notify_inval_inode()
1501 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, in fuse_notify_inval_entry() argument
1538 down_read(&fc->killsb); in fuse_notify_inval_entry()
1540 if (fc->sb) in fuse_notify_inval_entry()
1541 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name); in fuse_notify_inval_entry()
1542 up_read(&fc->killsb); in fuse_notify_inval_entry()
1552 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, in fuse_notify_delete() argument
1589 down_read(&fc->killsb); in fuse_notify_delete()
1591 if (fc->sb) in fuse_notify_delete()
1592 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, in fuse_notify_delete()
1594 up_read(&fc->killsb); in fuse_notify_delete()
1604 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, in fuse_notify_store() argument
1632 down_read(&fc->killsb); in fuse_notify_store()
1635 if (!fc->sb) in fuse_notify_store()
1638 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); in fuse_notify_store()
1684 up_read(&fc->killsb); in fuse_notify_store()
1690 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) in fuse_retrieve_end() argument
1695 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, in fuse_retrieve() argument
1720 req = fuse_get_req(fc, num_pages); in fuse_retrieve()
1757 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); in fuse_retrieve()
1759 fuse_retrieve_end(fc, req); in fuse_retrieve()
1764 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, in fuse_notify_retrieve() argument
1781 down_read(&fc->killsb); in fuse_notify_retrieve()
1783 if (fc->sb) { in fuse_notify_retrieve()
1786 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); in fuse_notify_retrieve()
1788 err = fuse_retrieve(fc, inode, &outarg); in fuse_notify_retrieve()
1792 up_read(&fc->killsb); in fuse_notify_retrieve()
1801 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, in fuse_notify() argument
1809 return fuse_notify_poll(fc, size, cs); in fuse_notify()
1812 return fuse_notify_inval_inode(fc, size, cs); in fuse_notify()
1815 return fuse_notify_inval_entry(fc, size, cs); in fuse_notify()
1818 return fuse_notify_store(fc, size, cs); in fuse_notify()
1821 return fuse_notify_retrieve(fc, size, cs); in fuse_notify()
1824 return fuse_notify_delete(fc, size, cs); in fuse_notify()
1833 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) in request_find() argument
1837 list_for_each_entry(req, &fc->processing, list) { in request_find()
1874 static ssize_t fuse_dev_do_write(struct fuse_conn *fc, in fuse_dev_do_write() argument
1897 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); in fuse_dev_do_write()
1905 spin_lock(&fc->lock); in fuse_dev_do_write()
1907 if (!fc->connected) in fuse_dev_do_write()
1910 req = request_find(fc, oh.unique); in fuse_dev_do_write()
1915 spin_unlock(&fc->lock); in fuse_dev_do_write()
1917 spin_lock(&fc->lock); in fuse_dev_do_write()
1918 request_end(fc, req); in fuse_dev_do_write()
1928 fc->no_interrupt = 1; in fuse_dev_do_write()
1930 queue_interrupt(fc, req); in fuse_dev_do_write()
1932 spin_unlock(&fc->lock); in fuse_dev_do_write()
1938 list_move(&req->list, &fc->io); in fuse_dev_do_write()
1944 spin_unlock(&fc->lock); in fuse_dev_do_write()
1949 spin_lock(&fc->lock); in fuse_dev_do_write()
1956 request_end(fc, req); in fuse_dev_do_write()
1961 spin_unlock(&fc->lock); in fuse_dev_do_write()
1970 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); in fuse_dev_write() local
1971 if (!fc) in fuse_dev_write()
1977 fuse_copy_init(&cs, fc, 0, from); in fuse_dev_write()
1979 return fuse_dev_do_write(fc, &cs, iov_iter_count(from)); in fuse_dev_write()
1990 struct fuse_conn *fc; in fuse_dev_splice_write() local
1994 fc = fuse_get_conn(out); in fuse_dev_splice_write()
1995 if (!fc) in fuse_dev_splice_write()
2042 fuse_copy_init(&cs, fc, 0, NULL); in fuse_dev_splice_write()
2050 ret = fuse_dev_do_write(fc, &cs, len); in fuse_dev_splice_write()
2064 struct fuse_conn *fc = fuse_get_conn(file); in fuse_dev_poll() local
2065 if (!fc) in fuse_dev_poll()
2068 poll_wait(file, &fc->waitq, wait); in fuse_dev_poll()
2070 spin_lock(&fc->lock); in fuse_dev_poll()
2071 if (!fc->connected) in fuse_dev_poll()
2073 else if (request_pending(fc)) in fuse_dev_poll()
2075 spin_unlock(&fc->lock); in fuse_dev_poll()
2085 static void end_requests(struct fuse_conn *fc, struct list_head *head) in end_requests() argument
2086 __releases(fc->lock) in end_requests()
2087 __acquires(fc->lock) in end_requests()
2093 request_end(fc, req); in end_requests()
2094 spin_lock(&fc->lock); in end_requests()
2109 static void end_io_requests(struct fuse_conn *fc) in end_io_requests() argument
2110 __releases(fc->lock) in end_io_requests()
2111 __acquires(fc->lock) in end_io_requests()
2113 while (!list_empty(&fc->io)) { in end_io_requests()
2115 list_entry(fc->io.next, struct fuse_req, list); in end_io_requests()
2126 spin_unlock(&fc->lock); in end_io_requests()
2128 end(fc, req); in end_io_requests()
2129 fuse_put_request(fc, req); in end_io_requests()
2130 spin_lock(&fc->lock); in end_io_requests()
2135 static void end_queued_requests(struct fuse_conn *fc) in end_queued_requests() argument
2136 __releases(fc->lock) in end_queued_requests()
2137 __acquires(fc->lock) in end_queued_requests()
2139 fc->max_background = UINT_MAX; in end_queued_requests()
2140 flush_bg_queue(fc); in end_queued_requests()
2141 end_requests(fc, &fc->pending); in end_queued_requests()
2142 end_requests(fc, &fc->processing); in end_queued_requests()
2143 while (forget_pending(fc)) in end_queued_requests()
2144 kfree(dequeue_forget(fc, 1, NULL)); in end_queued_requests()
2147 static void end_polls(struct fuse_conn *fc) in end_polls() argument
2151 p = rb_first(&fc->polled_files); in end_polls()
2181 void fuse_abort_conn(struct fuse_conn *fc) in fuse_abort_conn() argument
2183 spin_lock(&fc->lock); in fuse_abort_conn()
2184 if (fc->connected) { in fuse_abort_conn()
2185 fc->connected = 0; in fuse_abort_conn()
2186 fc->blocked = 0; in fuse_abort_conn()
2187 fuse_set_initialized(fc); in fuse_abort_conn()
2188 end_io_requests(fc); in fuse_abort_conn()
2189 end_queued_requests(fc); in fuse_abort_conn()
2190 end_polls(fc); in fuse_abort_conn()
2191 wake_up_all(&fc->waitq); in fuse_abort_conn()
2192 wake_up_all(&fc->blocked_waitq); in fuse_abort_conn()
2193 kill_fasync(&fc->fasync, SIGIO, POLL_IN); in fuse_abort_conn()
2195 spin_unlock(&fc->lock); in fuse_abort_conn()
2201 struct fuse_conn *fc = fuse_get_conn(file); in fuse_dev_release() local
2202 if (fc) { in fuse_dev_release()
2203 spin_lock(&fc->lock); in fuse_dev_release()
2204 fc->connected = 0; in fuse_dev_release()
2205 fc->blocked = 0; in fuse_dev_release()
2206 fuse_set_initialized(fc); in fuse_dev_release()
2207 end_queued_requests(fc); in fuse_dev_release()
2208 end_polls(fc); in fuse_dev_release()
2209 wake_up_all(&fc->blocked_waitq); in fuse_dev_release()
2210 spin_unlock(&fc->lock); in fuse_dev_release()
2211 fuse_conn_put(fc); in fuse_dev_release()
2220 struct fuse_conn *fc = fuse_get_conn(file); in fuse_dev_fasync() local
2221 if (!fc) in fuse_dev_fasync()
2225 return fasync_helper(fd, file, on, &fc->fasync); in fuse_dev_fasync()