Lines Matching refs:m

171 static void p9_mux_poll_stop(struct p9_conn *m)  in p9_mux_poll_stop()  argument
176 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { in p9_mux_poll_stop()
177 struct p9_poll_wait *pwait = &m->poll_wait[i]; in p9_mux_poll_stop()
186 list_del_init(&m->poll_pending_link); in p9_mux_poll_stop()
197 static void p9_conn_cancel(struct p9_conn *m, int err) in p9_conn_cancel() argument
203 p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); in p9_conn_cancel()
205 spin_lock_irqsave(&m->client->lock, flags); in p9_conn_cancel()
207 if (m->err) { in p9_conn_cancel()
208 spin_unlock_irqrestore(&m->client->lock, flags); in p9_conn_cancel()
212 m->err = err; in p9_conn_cancel()
214 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { in p9_conn_cancel()
217 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { in p9_conn_cancel()
220 spin_unlock_irqrestore(&m->client->lock, flags); in p9_conn_cancel()
227 p9_client_cb(m->client, req, REQ_STATUS_ERROR); in p9_conn_cancel()
300 struct p9_conn *m; in p9_read_work() local
303 m = container_of(work, struct p9_conn, rq); in p9_read_work()
305 if (m->err < 0) in p9_read_work()
308 p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos); in p9_read_work()
310 if (!m->rbuf) { in p9_read_work()
311 m->rbuf = m->tmp_buf; in p9_read_work()
312 m->rpos = 0; in p9_read_work()
313 m->rsize = 7; /* start by reading header */ in p9_read_work()
316 clear_bit(Rpending, &m->wsched); in p9_read_work()
318 m, m->rpos, m->rsize, m->rsize-m->rpos); in p9_read_work()
319 err = p9_fd_read(m->client, m->rbuf + m->rpos, in p9_read_work()
320 m->rsize - m->rpos); in p9_read_work()
321 p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); in p9_read_work()
329 m->rpos += err; in p9_read_work()
331 if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */ in p9_read_work()
335 n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */ in p9_read_work()
336 if (n >= m->client->msize) { in p9_read_work()
343 tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */ in p9_read_work()
345 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); in p9_read_work()
347 m->req = p9_tag_lookup(m->client, tag); in p9_read_work()
348 if (!m->req || (m->req->status != REQ_STATUS_SENT)) { in p9_read_work()
355 if (m->req->rc == NULL) { in p9_read_work()
356 m->req->rc = kmalloc(sizeof(struct p9_fcall) + in p9_read_work()
357 m->client->msize, GFP_NOFS); in p9_read_work()
358 if (!m->req->rc) { in p9_read_work()
359 m->req = NULL; in p9_read_work()
364 m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall); in p9_read_work()
365 memcpy(m->rbuf, m->tmp_buf, m->rsize); in p9_read_work()
366 m->rsize = n; in p9_read_work()
370 if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ in p9_read_work()
372 spin_lock(&m->client->lock); in p9_read_work()
373 if (m->req->status != REQ_STATUS_ERROR) in p9_read_work()
375 list_del(&m->req->req_list); in p9_read_work()
376 spin_unlock(&m->client->lock); in p9_read_work()
377 p9_client_cb(m->client, m->req, status); in p9_read_work()
378 m->rbuf = NULL; in p9_read_work()
379 m->rpos = 0; in p9_read_work()
380 m->rsize = 0; in p9_read_work()
381 m->req = NULL; in p9_read_work()
385 clear_bit(Rworksched, &m->wsched); in p9_read_work()
387 if (!list_empty(&m->req_list)) { in p9_read_work()
388 if (test_and_clear_bit(Rpending, &m->wsched)) in p9_read_work()
391 n = p9_fd_poll(m->client, NULL); in p9_read_work()
393 if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { in p9_read_work()
394 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); in p9_read_work()
395 schedule_work(&m->rq); in p9_read_work()
401 p9_conn_cancel(m, err); in p9_read_work()
402 clear_bit(Rworksched, &m->wsched); in p9_read_work()
448 struct p9_conn *m; in p9_write_work() local
451 m = container_of(work, struct p9_conn, wq); in p9_write_work()
453 if (m->err < 0) { in p9_write_work()
454 clear_bit(Wworksched, &m->wsched); in p9_write_work()
458 if (!m->wsize) { in p9_write_work()
459 spin_lock(&m->client->lock); in p9_write_work()
460 if (list_empty(&m->unsent_req_list)) { in p9_write_work()
461 clear_bit(Wworksched, &m->wsched); in p9_write_work()
462 spin_unlock(&m->client->lock); in p9_write_work()
466 req = list_entry(m->unsent_req_list.next, struct p9_req_t, in p9_write_work()
470 list_move_tail(&req->req_list, &m->req_list); in p9_write_work()
472 m->wbuf = req->tc->sdata; in p9_write_work()
473 m->wsize = req->tc->size; in p9_write_work()
474 m->wpos = 0; in p9_write_work()
475 spin_unlock(&m->client->lock); in p9_write_work()
479 m, m->wpos, m->wsize); in p9_write_work()
480 clear_bit(Wpending, &m->wsched); in p9_write_work()
481 err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); in p9_write_work()
482 p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); in p9_write_work()
494 m->wpos += err; in p9_write_work()
495 if (m->wpos == m->wsize) in p9_write_work()
496 m->wpos = m->wsize = 0; in p9_write_work()
499 clear_bit(Wworksched, &m->wsched); in p9_write_work()
501 if (m->wsize || !list_empty(&m->unsent_req_list)) { in p9_write_work()
502 if (test_and_clear_bit(Wpending, &m->wsched)) in p9_write_work()
505 n = p9_fd_poll(m->client, NULL); in p9_write_work()
508 !test_and_set_bit(Wworksched, &m->wsched)) { in p9_write_work()
509 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); in p9_write_work()
510 schedule_work(&m->wq); in p9_write_work()
517 p9_conn_cancel(m, err); in p9_write_work()
518 clear_bit(Wworksched, &m->wsched); in p9_write_work()
525 struct p9_conn *m = pwait->conn; in p9_pollwake() local
529 if (list_empty(&m->poll_pending_link)) in p9_pollwake()
530 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); in p9_pollwake()
549 struct p9_conn *m = container_of(p, struct p9_conn, pt); in p9_pollwait() local
553 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { in p9_pollwait()
554 if (m->poll_wait[i].wait_addr == NULL) { in p9_pollwait()
555 pwait = &m->poll_wait[i]; in p9_pollwait()
565 pwait->conn = m; in p9_pollwait()
582 struct p9_conn *m = &ts->conn; in p9_conn_create() local
586 INIT_LIST_HEAD(&m->mux_list); in p9_conn_create()
587 m->client = client; in p9_conn_create()
589 INIT_LIST_HEAD(&m->req_list); in p9_conn_create()
590 INIT_LIST_HEAD(&m->unsent_req_list); in p9_conn_create()
591 INIT_WORK(&m->rq, p9_read_work); in p9_conn_create()
592 INIT_WORK(&m->wq, p9_write_work); in p9_conn_create()
593 INIT_LIST_HEAD(&m->poll_pending_link); in p9_conn_create()
594 init_poll_funcptr(&m->pt, p9_pollwait); in p9_conn_create()
596 n = p9_fd_poll(client, &m->pt); in p9_conn_create()
598 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); in p9_conn_create()
599 set_bit(Rpending, &m->wsched); in p9_conn_create()
603 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); in p9_conn_create()
604 set_bit(Wpending, &m->wsched); in p9_conn_create()
614 static void p9_poll_mux(struct p9_conn *m) in p9_poll_mux() argument
618 if (m->err < 0) in p9_poll_mux()
621 n = p9_fd_poll(m->client, NULL); in p9_poll_mux()
623 p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); in p9_poll_mux()
626 p9_conn_cancel(m, n); in p9_poll_mux()
630 set_bit(Rpending, &m->wsched); in p9_poll_mux()
631 p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); in p9_poll_mux()
632 if (!test_and_set_bit(Rworksched, &m->wsched)) { in p9_poll_mux()
633 p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); in p9_poll_mux()
634 schedule_work(&m->rq); in p9_poll_mux()
639 set_bit(Wpending, &m->wsched); in p9_poll_mux()
640 p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); in p9_poll_mux()
641 if ((m->wsize || !list_empty(&m->unsent_req_list)) && in p9_poll_mux()
642 !test_and_set_bit(Wworksched, &m->wsched)) { in p9_poll_mux()
643 p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); in p9_poll_mux()
644 schedule_work(&m->wq); in p9_poll_mux()
664 struct p9_conn *m = &ts->conn; in p9_fd_request() local
667 m, current, req->tc, req->tc->id); in p9_fd_request()
668 if (m->err < 0) in p9_fd_request()
669 return m->err; in p9_fd_request()
673 list_add_tail(&req->req_list, &m->unsent_req_list); in p9_fd_request()
676 if (test_and_clear_bit(Wpending, &m->wsched)) in p9_fd_request()
679 n = p9_fd_poll(m->client, NULL); in p9_fd_request()
681 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) in p9_fd_request()
682 schedule_work(&m->wq); in p9_fd_request()
846 static void p9_conn_destroy(struct p9_conn *m) in p9_conn_destroy() argument
849 m, m->mux_list.prev, m->mux_list.next); in p9_conn_destroy()
851 p9_mux_poll_stop(m); in p9_conn_destroy()
852 cancel_work_sync(&m->rq); in p9_conn_destroy()
853 cancel_work_sync(&m->wq); in p9_conn_destroy()
855 p9_conn_cancel(m, -ECONNRESET); in p9_conn_destroy()
857 m->client = NULL; in p9_conn_destroy()