Lines Matching refs:h5

67 struct h5 {  struct
99 static void h5_reset_rx(struct h5 *h5); argument
103 struct h5 *h5 = hu->priv; in h5_link_control() local
114 skb_queue_tail(&h5->unrel, nskb); in h5_link_control()
117 static u8 h5_cfg_field(struct h5 *h5) in h5_cfg_field() argument
122 field |= (h5->tx_win & 7); in h5_cfg_field()
132 struct h5 *h5 = hu->priv; in h5_timed_event() local
138 if (h5->state == H5_UNINITIALIZED) in h5_timed_event()
141 if (h5->state == H5_INITIALIZED) { in h5_timed_event()
142 conf_req[2] = h5_cfg_field(h5); in h5_timed_event()
146 if (h5->state != H5_ACTIVE) { in h5_timed_event()
147 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT); in h5_timed_event()
151 if (h5->sleep != H5_AWAKE) { in h5_timed_event()
152 h5->sleep = H5_SLEEPING; in h5_timed_event()
156 BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen); in h5_timed_event()
158 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); in h5_timed_event()
160 while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) { in h5_timed_event()
161 h5->tx_seq = (h5->tx_seq - 1) & 0x07; in h5_timed_event()
162 skb_queue_head(&h5->rel, skb); in h5_timed_event()
165 spin_unlock_irqrestore(&h5->unack.lock, flags); in h5_timed_event()
173 struct h5 *h5 = hu->priv; in h5_peer_reset() local
177 h5->state = H5_UNINITIALIZED; in h5_peer_reset()
179 del_timer(&h5->timer); in h5_peer_reset()
181 skb_queue_purge(&h5->rel); in h5_peer_reset()
182 skb_queue_purge(&h5->unrel); in h5_peer_reset()
183 skb_queue_purge(&h5->unack); in h5_peer_reset()
185 h5->tx_seq = 0; in h5_peer_reset()
186 h5->tx_ack = 0; in h5_peer_reset()
194 struct h5 *h5; in h5_open() local
199 h5 = kzalloc(sizeof(*h5), GFP_KERNEL); in h5_open()
200 if (!h5) in h5_open()
203 hu->priv = h5; in h5_open()
205 skb_queue_head_init(&h5->unack); in h5_open()
206 skb_queue_head_init(&h5->rel); in h5_open()
207 skb_queue_head_init(&h5->unrel); in h5_open()
209 h5_reset_rx(h5); in h5_open()
211 init_timer(&h5->timer); in h5_open()
212 h5->timer.function = h5_timed_event; in h5_open()
213 h5->timer.data = (unsigned long)hu; in h5_open()
215 h5->tx_win = H5_TX_WIN_MAX; in h5_open()
221 mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT); in h5_open()
228 struct h5 *h5 = hu->priv; in h5_close() local
230 del_timer_sync(&h5->timer); in h5_close()
232 skb_queue_purge(&h5->unack); in h5_close()
233 skb_queue_purge(&h5->rel); in h5_close()
234 skb_queue_purge(&h5->unrel); in h5_close()
236 kfree(h5); in h5_close()
241 static void h5_pkt_cull(struct h5 *h5) in h5_pkt_cull() argument
248 spin_lock_irqsave(&h5->unack.lock, flags); in h5_pkt_cull()
250 to_remove = skb_queue_len(&h5->unack); in h5_pkt_cull()
254 seq = h5->tx_seq; in h5_pkt_cull()
257 if (h5->rx_ack == seq) in h5_pkt_cull()
264 if (seq != h5->rx_ack) in h5_pkt_cull()
268 skb_queue_walk_safe(&h5->unack, skb, tmp) { in h5_pkt_cull()
272 __skb_unlink(skb, &h5->unack); in h5_pkt_cull()
276 if (skb_queue_empty(&h5->unack)) in h5_pkt_cull()
277 del_timer(&h5->timer); in h5_pkt_cull()
280 spin_unlock_irqrestore(&h5->unack.lock, flags); in h5_pkt_cull()
285 struct h5 *h5 = hu->priv; in h5_handle_internal_rx() local
293 const unsigned char *hdr = h5->rx_skb->data; in h5_handle_internal_rx()
294 const unsigned char *data = &h5->rx_skb->data[4]; in h5_handle_internal_rx()
304 conf_req[2] = h5_cfg_field(h5); in h5_handle_internal_rx()
307 if (h5->state == H5_ACTIVE) in h5_handle_internal_rx()
311 if (h5->state == H5_ACTIVE) in h5_handle_internal_rx()
313 h5->state = H5_INITIALIZED; in h5_handle_internal_rx()
320 h5->tx_win = (data[2] & 7); in h5_handle_internal_rx()
321 BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win); in h5_handle_internal_rx()
322 h5->state = H5_ACTIVE; in h5_handle_internal_rx()
327 h5->sleep = H5_SLEEPING; in h5_handle_internal_rx()
331 h5->sleep = H5_AWAKE; in h5_handle_internal_rx()
335 h5->sleep = H5_AWAKE; in h5_handle_internal_rx()
346 struct h5 *h5 = hu->priv; in h5_complete_rx_pkt() local
347 const unsigned char *hdr = h5->rx_skb->data; in h5_complete_rx_pkt()
350 h5->tx_ack = (h5->tx_ack + 1) % 8; in h5_complete_rx_pkt()
351 set_bit(H5_TX_ACK_REQ, &h5->flags); in h5_complete_rx_pkt()
355 h5->rx_ack = H5_HDR_ACK(hdr); in h5_complete_rx_pkt()
357 h5_pkt_cull(h5); in h5_complete_rx_pkt()
363 bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr); in h5_complete_rx_pkt()
366 skb_pull(h5->rx_skb, 4); in h5_complete_rx_pkt()
368 hci_recv_frame(hu->hdev, h5->rx_skb); in h5_complete_rx_pkt()
369 h5->rx_skb = NULL; in h5_complete_rx_pkt()
378 h5_reset_rx(h5); in h5_complete_rx_pkt()
390 struct h5 *h5 = hu->priv; in h5_rx_payload() local
391 const unsigned char *hdr = h5->rx_skb->data; in h5_rx_payload()
394 h5->rx_func = h5_rx_crc; in h5_rx_payload()
395 h5->rx_pending = 2; in h5_rx_payload()
405 struct h5 *h5 = hu->priv; in h5_rx_3wire_hdr() local
406 const unsigned char *hdr = h5->rx_skb->data; in h5_rx_3wire_hdr()
415 h5_reset_rx(h5); in h5_rx_3wire_hdr()
419 if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) { in h5_rx_3wire_hdr()
421 H5_HDR_SEQ(hdr), h5->tx_ack); in h5_rx_3wire_hdr()
422 h5_reset_rx(h5); in h5_rx_3wire_hdr()
426 if (h5->state != H5_ACTIVE && in h5_rx_3wire_hdr()
429 h5_reset_rx(h5); in h5_rx_3wire_hdr()
433 h5->rx_func = h5_rx_payload; in h5_rx_3wire_hdr()
434 h5->rx_pending = H5_HDR_LEN(hdr); in h5_rx_3wire_hdr()
441 struct h5 *h5 = hu->priv; in h5_rx_pkt_start() local
446 h5->rx_func = h5_rx_3wire_hdr; in h5_rx_pkt_start()
447 h5->rx_pending = 4; in h5_rx_pkt_start()
449 h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC); in h5_rx_pkt_start()
450 if (!h5->rx_skb) { in h5_rx_pkt_start()
452 h5_reset_rx(h5); in h5_rx_pkt_start()
456 h5->rx_skb->dev = (void *)hu->hdev; in h5_rx_pkt_start()
463 struct h5 *h5 = hu->priv; in h5_rx_delimiter() local
466 h5->rx_func = h5_rx_pkt_start; in h5_rx_delimiter()
471 static void h5_unslip_one_byte(struct h5 *h5, unsigned char c) in h5_unslip_one_byte() argument
476 if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) { in h5_unslip_one_byte()
477 set_bit(H5_RX_ESC, &h5->flags); in h5_unslip_one_byte()
481 if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) { in h5_unslip_one_byte()
491 h5_reset_rx(h5); in h5_unslip_one_byte()
496 memcpy(skb_put(h5->rx_skb, 1), byte, 1); in h5_unslip_one_byte()
497 h5->rx_pending--; in h5_unslip_one_byte()
499 BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending); in h5_unslip_one_byte()
502 static void h5_reset_rx(struct h5 *h5) in h5_reset_rx() argument
504 if (h5->rx_skb) { in h5_reset_rx()
505 kfree_skb(h5->rx_skb); in h5_reset_rx()
506 h5->rx_skb = NULL; in h5_reset_rx()
509 h5->rx_func = h5_rx_delimiter; in h5_reset_rx()
510 h5->rx_pending = 0; in h5_reset_rx()
511 clear_bit(H5_RX_ESC, &h5->flags); in h5_reset_rx()
516 struct h5 *h5 = hu->priv; in h5_recv() local
519 BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending, in h5_recv()
525 if (h5->rx_pending > 0) { in h5_recv()
528 h5_reset_rx(h5); in h5_recv()
532 h5_unslip_one_byte(h5, *ptr); in h5_recv()
538 processed = h5->rx_func(hu, *ptr); in h5_recv()
551 struct h5 *h5 = hu->priv; in h5_enqueue() local
559 if (h5->state != H5_ACTIVE) { in h5_enqueue()
568 skb_queue_tail(&h5->rel, skb); in h5_enqueue()
572 skb_queue_tail(&h5->unrel, skb); in h5_enqueue()
625 struct h5 *h5 = hu->priv; in h5_prepare_pkt() local
649 hdr[0] = h5->tx_ack << 3; in h5_prepare_pkt()
650 clear_bit(H5_TX_ACK_REQ, &h5->flags); in h5_prepare_pkt()
655 hdr[0] |= h5->tx_seq; in h5_prepare_pkt()
656 h5->tx_seq = (h5->tx_seq + 1) % 8; in h5_prepare_pkt()
681 struct h5 *h5 = hu->priv; in h5_dequeue() local
685 if (h5->sleep != H5_AWAKE) { in h5_dequeue()
688 if (h5->sleep == H5_WAKING_UP) in h5_dequeue()
691 h5->sleep = H5_WAKING_UP; in h5_dequeue()
694 mod_timer(&h5->timer, jiffies + HZ / 100); in h5_dequeue()
698 skb = skb_dequeue(&h5->unrel); in h5_dequeue()
707 skb_queue_head(&h5->unrel, skb); in h5_dequeue()
711 spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); in h5_dequeue()
713 if (h5->unack.qlen >= h5->tx_win) in h5_dequeue()
716 skb = skb_dequeue(&h5->rel); in h5_dequeue()
721 __skb_queue_tail(&h5->unack, skb); in h5_dequeue()
722 mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT); in h5_dequeue()
723 spin_unlock_irqrestore(&h5->unack.lock, flags); in h5_dequeue()
727 skb_queue_head(&h5->rel, skb); in h5_dequeue()
732 spin_unlock_irqrestore(&h5->unack.lock, flags); in h5_dequeue()
734 if (test_bit(H5_TX_ACK_REQ, &h5->flags)) in h5_dequeue()