Searched refs:ch (Results 1 - 200 of 1296) sorted by relevance

1234567

/linux-4.1.27/arch/x86/boot/
H A Dctype.h4 static inline int isdigit(int ch) isdigit() argument
6 return (ch >= '0') && (ch <= '9'); isdigit()
9 static inline int isxdigit(int ch) isxdigit() argument
11 if (isdigit(ch)) isxdigit()
14 if ((ch >= 'a') && (ch <= 'f')) isxdigit()
17 return (ch >= 'A') && (ch <= 'F'); isxdigit()
H A Dtty.c30 static void __attribute__((section(".inittext"))) serial_putchar(int ch) serial_putchar() argument
37 outb(ch, early_serial_base + TXR); serial_putchar()
40 static void __attribute__((section(".inittext"))) bios_putchar(int ch) bios_putchar() argument
48 ireg.al = ch; bios_putchar()
52 void __attribute__((section(".inittext"))) putchar(int ch) putchar() argument
54 if (ch == '\n') putchar()
57 bios_putchar(ch); putchar()
60 serial_putchar(ch); putchar()
H A Dvideo.c33 if (oreg.ch & 0x20) store_cursor_position()
36 if ((oreg.ch & 0x1f) > (oreg.cl & 0x1f)) store_cursor_position()
141 char ch; display_menu() local
160 ch = '0'; display_menu()
178 ch, mode_id, mi->x, resbuf, card->card_name); display_menu()
185 if (ch == '9') display_menu()
186 ch = 'a'; display_menu()
187 else if (ch == 'z' || ch == ' ') display_menu()
188 ch = ' '; /* Out of keys... */ display_menu()
190 ch++; display_menu()
/linux-4.1.27/drivers/isdn/hardware/mISDN/
H A DmISDNisar.c82 if (isar->ch[0].bch.debug & DEBUG_HW_BFIFO) { send_mbox()
111 if (isar->ch[0].bch.debug & DEBUG_HW_BFIFO) { rcv_mbox()
188 u32 saved_debug = isar->ch[0].bch.debug; load_firmware()
207 isar->ch[0].bch.debug &= ~DEBUG_HW_BFIFO; load_firmware()
294 isar->ch[0].bch.debug = saved_debug; load_firmware()
406 isar->ch[0].bch.debug = saved_debug; load_firmware()
415 deliver_status(struct isar_ch *ch, int status) deliver_status() argument
417 pr_debug("%s: HL->LL FAXIND %x\n", ch->is->name, status); deliver_status()
418 _queue_data(&ch->bch.ch, PH_CONTROL_IND, status, 0, NULL, GFP_ATOMIC); deliver_status()
422 isar_rcv_frame(struct isar_ch *ch) isar_rcv_frame() argument
427 if (!ch->is->clsb) { isar_rcv_frame()
428 pr_debug("%s; ISAR zero len frame\n", ch->is->name); isar_rcv_frame()
429 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
432 if (test_bit(FLG_RX_OFF, &ch->bch.Flags)) { isar_rcv_frame()
433 ch->bch.dropcnt += ch->is->clsb; isar_rcv_frame()
434 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
437 switch (ch->bch.state) { isar_rcv_frame()
440 ch->is->name, ch->is->iis, ch->is->cmsb, ch->is->clsb); isar_rcv_frame()
441 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
446 maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb); isar_rcv_frame()
449 ch->is->name, ch->bch.nr, ch->is->clsb); isar_rcv_frame()
450 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
453 rcv_mbox(ch->is, skb_put(ch->bch.rx_skb, ch->is->clsb)); isar_rcv_frame()
454 recv_Bchannel(&ch->bch, 0, false); isar_rcv_frame()
457 maxlen = bchannel_get_rxbuf(&ch->bch, ch->is->clsb); isar_rcv_frame()
460 ch->is->name, ch->bch.nr, ch->is->clsb); isar_rcv_frame()
461 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
464 if (ch->is->cmsb & HDLC_ERROR) { isar_rcv_frame()
466 ch->is->name, ch->is->cmsb, ch->is->clsb); isar_rcv_frame()
468 if (ch->is->cmsb & HDLC_ERR_RER) isar_rcv_frame()
469 ch->bch.err_inv++; isar_rcv_frame()
470 if (ch->is->cmsb & HDLC_ERR_CER) isar_rcv_frame()
471 ch->bch.err_crc++; isar_rcv_frame()
473 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
474 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
477 if (ch->is->cmsb & HDLC_FSD) isar_rcv_frame()
478 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
479 ptr = skb_put(ch->bch.rx_skb, ch->is->clsb); isar_rcv_frame()
480 rcv_mbox(ch->is, ptr); isar_rcv_frame()
481 if (ch->is->cmsb & HDLC_FED) { isar_rcv_frame()
482 if (ch->bch.rx_skb->len < 3) { /* last 2 are the FCS */ isar_rcv_frame()
484 ch->is->name, ch->bch.rx_skb->len); isar_rcv_frame()
485 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
488 skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2); isar_rcv_frame()
489 recv_Bchannel(&ch->bch, 0, false); isar_rcv_frame()
493 if (ch->state != STFAX_ACTIV) { isar_rcv_frame()
495 ch->is->name); isar_rcv_frame()
496 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
497 if (ch->bch.rx_skb) isar_rcv_frame()
498 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
501 if (!ch->bch.rx_skb) { isar_rcv_frame()
502 ch->bch.rx_skb = mI_alloc_skb(ch->bch.maxlen, isar_rcv_frame()
504 if (unlikely(!ch->bch.rx_skb)) { isar_rcv_frame()
507 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
511 if (ch->cmd == PCTRL_CMD_FRM) { isar_rcv_frame()
512 rcv_mbox(ch->is, skb_put(ch->bch.rx_skb, ch->is->clsb)); isar_rcv_frame()
514 ch->is->name, ch->bch.rx_skb->len); isar_rcv_frame()
515 if (ch->is->cmsb & SART_NMD) { /* ABORT */ isar_rcv_frame()
517 ch->is->name); isar_rcv_frame()
518 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
519 send_mbox(ch->is, SET_DPS(ch->dpath) | isar_rcv_frame()
522 ch->state = STFAX_ESCAPE; isar_rcv_frame()
525 recv_Bchannel(&ch->bch, 0, false); isar_rcv_frame()
526 if (ch->is->cmsb & SART_NMD) isar_rcv_frame()
527 deliver_status(ch, HW_MOD_NOCARR); isar_rcv_frame()
530 if (ch->cmd != PCTRL_CMD_FRH) { isar_rcv_frame()
532 ch->is->name, ch->cmd); isar_rcv_frame()
533 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
534 if (ch->bch.rx_skb) isar_rcv_frame()
535 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
539 if ((ch->bch.rx_skb->len + ch->is->clsb) > isar_rcv_frame()
540 (ch->bch.maxlen + 2)) { isar_rcv_frame()
542 ch->is->name, __func__); isar_rcv_frame()
543 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
544 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
546 } else if (ch->is->cmsb & HDLC_ERROR) { isar_rcv_frame()
548 ch->is->name, ch->is->cmsb, ch->is->clsb); isar_rcv_frame()
549 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
550 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
553 if (ch->is->cmsb & HDLC_FSD) isar_rcv_frame()
554 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
555 ptr = skb_put(ch->bch.rx_skb, ch->is->clsb); isar_rcv_frame()
556 rcv_mbox(ch->is, ptr); isar_rcv_frame()
557 if (ch->is->cmsb & HDLC_FED) { isar_rcv_frame()
558 if (ch->bch.rx_skb->len < 3) { /* last 2 are the FCS */ isar_rcv_frame()
560 ch->is->name, ch->bch.rx_skb->len); isar_rcv_frame()
561 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
564 skb_trim(ch->bch.rx_skb, ch->bch.rx_skb->len - 2); isar_rcv_frame()
565 recv_Bchannel(&ch->bch, 0, false); isar_rcv_frame()
567 if (ch->is->cmsb & SART_NMD) { /* ABORT */ isar_rcv_frame()
569 ch->is->name); isar_rcv_frame()
570 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
571 if (ch->bch.rx_skb) isar_rcv_frame()
572 skb_trim(ch->bch.rx_skb, 0); isar_rcv_frame()
573 send_mbox(ch->is, SET_DPS(ch->dpath) | isar_rcv_frame()
575 ch->state = STFAX_ESCAPE; isar_rcv_frame()
576 deliver_status(ch, HW_MOD_NOCARR); isar_rcv_frame()
580 pr_info("isar_rcv_frame protocol (%x)error\n", ch->bch.state); isar_rcv_frame()
581 ch->is->write_reg(ch->is->hw, ISAR_IIA, 0); isar_rcv_frame()
587 isar_fill_fifo(struct isar_ch *ch) isar_fill_fifo() argument
593 pr_debug("%s: ch%d tx_skb %d tx_idx %d\n", ch->is->name, ch->bch.nr, isar_fill_fifo()
594 ch->bch.tx_skb ? ch->bch.tx_skb->len : -1, ch->bch.tx_idx); isar_fill_fifo()
595 if (!(ch->is->bstat & isar_fill_fifo()
596 (ch->dpath == 1 ? BSTAT_RDM1 : BSTAT_RDM2))) isar_fill_fifo()
598 if (!ch->bch.tx_skb) { isar_fill_fifo()
599 if (!test_bit(FLG_TX_EMPTY, &ch->bch.Flags) || isar_fill_fifo()
600 (ch->bch.state != ISDN_P_B_RAW)) isar_fill_fifo()
602 count = ch->mml; isar_fill_fifo()
604 memset(ch->is->buf, ch->bch.fill[0], count); isar_fill_fifo()
605 send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA, isar_fill_fifo()
606 0, count, ch->is->buf); isar_fill_fifo()
609 count = ch->bch.tx_skb->len - ch->bch.tx_idx; isar_fill_fifo()
612 if (count > ch->mml) { isar_fill_fifo()
614 count = ch->mml; isar_fill_fifo()
618 ptr = ch->bch.tx_skb->data + ch->bch.tx_idx; isar_fill_fifo()
619 if (!ch->bch.tx_idx) { isar_fill_fifo()
620 pr_debug("%s: frame start\n", ch->is->name); isar_fill_fifo()
621 if ((ch->bch.state == ISDN_P_B_T30_FAX) && isar_fill_fifo()
622 (ch->cmd == PCTRL_CMD_FTH)) { isar_fill_fifo()
627 &ch->bch.Flags); isar_fill_fifo()
629 ch->is->name); isar_fill_fifo()
632 &ch->bch.Flags); isar_fill_fifo()
638 ch->bch.tx_idx += count; isar_fill_fifo()
639 switch (ch->bch.state) { isar_fill_fifo()
646 send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA, isar_fill_fifo()
650 send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA, isar_fill_fifo()
654 if (ch->state != STFAX_ACTIV) isar_fill_fifo()
655 pr_debug("%s: not ACTIV\n", ch->is->name); isar_fill_fifo()
656 else if (ch->cmd == PCTRL_CMD_FTH) isar_fill_fifo()
657 send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA, isar_fill_fifo()
659 else if (ch->cmd == PCTRL_CMD_FTM) isar_fill_fifo()
660 send_mbox(ch->is, SET_DPS(ch->dpath) | ISAR_HIS_SDATA, isar_fill_fifo()
663 pr_debug("%s: not FTH/FTM\n", ch->is->name); isar_fill_fifo()
667 __func__, ch->bch.state); isar_fill_fifo()
675 struct isar_ch *base = &isar->ch[0]; sel_bch_isar()
688 send_next(struct isar_ch *ch) send_next() argument
690 pr_debug("%s: %s ch%d tx_skb %d tx_idx %d\n", ch->is->name, __func__, send_next()
691 ch->bch.nr, ch->bch.tx_skb ? ch->bch.tx_skb->len : -1, send_next()
692 ch->bch.tx_idx); send_next()
693 if (ch->bch.state == ISDN_P_B_T30_FAX) { send_next()
694 if (ch->cmd == PCTRL_CMD_FTH) { send_next()
695 if (test_bit(FLG_LASTDATA, &ch->bch.Flags)) { send_next()
697 test_and_set_bit(FLG_NMD_DATA, &ch->bch.Flags); send_next()
699 } else if (ch->cmd == PCTRL_CMD_FTM) { send_next()
700 if (test_bit(FLG_DLEETX, &ch->bch.Flags)) { send_next()
701 test_and_set_bit(FLG_LASTDATA, &ch->bch.Flags); send_next()
702 test_and_set_bit(FLG_NMD_DATA, &ch->bch.Flags); send_next()
706 if (ch->bch.tx_skb) send_next()
707 dev_kfree_skb(ch->bch.tx_skb); send_next()
708 if (get_next_bframe(&ch->bch)) { send_next()
709 isar_fill_fifo(ch); send_next()
710 test_and_clear_bit(FLG_TX_EMPTY, &ch->bch.Flags); send_next()
711 } else if (test_bit(FLG_TX_EMPTY, &ch->bch.Flags)) { send_next()
712 isar_fill_fifo(ch); send_next()
714 if (test_and_clear_bit(FLG_DLEETX, &ch->bch.Flags)) { send_next()
716 &ch->bch.Flags)) { send_next()
718 &ch->bch.Flags)) { send_next()
720 send_mbox(ch->is, SET_DPS(ch->dpath) | send_next()
723 test_and_set_bit(FLG_LL_OK, &ch->bch.Flags); send_next()
725 deliver_status(ch, HW_MOD_CONNECT); send_next()
727 } else if (test_bit(FLG_FILLEMPTY, &ch->bch.Flags)) { send_next()
728 test_and_set_bit(FLG_TX_EMPTY, &ch->bch.Flags); send_next()
736 struct isar_ch *ch; check_send() local
740 ch = sel_bch_isar(isar, 1); check_send()
741 if (ch && test_bit(FLG_ACTIVE, &ch->bch.Flags)) { check_send()
742 if (ch->bch.tx_skb && (ch->bch.tx_skb->len > check_send()
743 ch->bch.tx_idx)) check_send()
744 isar_fill_fifo(ch); check_send()
746 send_next(ch); check_send()
750 ch = sel_bch_isar(isar, 2); check_send()
751 if (ch && test_bit(FLG_ACTIVE, &ch->bch.Flags)) { check_send()
752 if (ch->bch.tx_skb && (ch->bch.tx_skb->len > check_send()
753 ch->bch.tx_idx)) check_send()
754 isar_fill_fifo(ch); check_send()
756 send_next(ch); check_send()
768 isar_pump_status_rsp(struct isar_ch *ch) { isar_pump_status_rsp() argument
769 u8 ril = ch->is->buf[0]; isar_pump_status_rsp()
772 if (!test_and_clear_bit(ISAR_RATE_REQ, &ch->is->Flags)) isar_pump_status_rsp()
775 pr_info("%s: wrong pstrsp ril=%d\n", ch->is->name, ril); isar_pump_status_rsp()
778 switch (ch->is->buf[1]) { isar_pump_status_rsp()
813 sprintf(ch->conmsg, "%s %s", dmril[ril], dmrim[rim]); isar_pump_status_rsp()
814 pr_debug("%s: pump strsp %s\n", ch->is->name, ch->conmsg); isar_pump_status_rsp()
818 isar_pump_statev_modem(struct isar_ch *ch, u8 devt) { isar_pump_statev_modem() argument
819 u8 dps = SET_DPS(ch->dpath); isar_pump_statev_modem()
823 pr_debug("%s: pump stev TIMER\n", ch->is->name); isar_pump_statev_modem()
826 pr_debug("%s: pump stev CONNECT\n", ch->is->name); isar_pump_statev_modem()
827 deliver_status(ch, HW_MOD_CONNECT); isar_pump_statev_modem()
830 pr_debug("%s: pump stev NO CONNECT\n", ch->is->name); isar_pump_statev_modem()
831 send_mbox(ch->is, dps | ISAR_HIS_PSTREQ, 0, 0, NULL); isar_pump_statev_modem()
832 deliver_status(ch, HW_MOD_NOCARR); isar_pump_statev_modem()
835 pr_debug("%s: pump stev V24 OFF\n", ch->is->name); isar_pump_statev_modem()
838 pr_debug("%s: pump stev CTS ON\n", ch->is->name); isar_pump_statev_modem()
841 pr_debug("%s pump stev CTS OFF\n", ch->is->name); isar_pump_statev_modem()
844 pr_debug("%s: pump stev CARRIER ON\n", ch->is->name); isar_pump_statev_modem()
845 test_and_set_bit(ISAR_RATE_REQ, &ch->is->Flags); isar_pump_statev_modem()
846 send_mbox(ch->is, dps | ISAR_HIS_PSTREQ, 0, 0, NULL); isar_pump_statev_modem()
849 pr_debug("%s: pump stev CARRIER OFF\n", ch->is->name); isar_pump_statev_modem()
852 pr_debug("%s: pump stev DSR ON\n", ch->is->name); isar_pump_statev_modem()
855 pr_debug("%s: pump stev DSR_OFF\n", ch->is->name); isar_pump_statev_modem()
858 pr_debug("%s: pump stev REMOTE RETRAIN\n", ch->is->name); isar_pump_statev_modem()
861 pr_debug("%s: pump stev REMOTE RENEGOTIATE\n", ch->is->name); isar_pump_statev_modem()
864 pr_debug("%s: pump stev GSTN CLEAR\n", ch->is->name); isar_pump_statev_modem()
867 pr_info("u%s: unknown pump stev %x\n", ch->is->name, devt); isar_pump_statev_modem()
873 isar_pump_statev_fax(struct isar_ch *ch, u8 devt) { isar_pump_statev_fax() argument
874 u8 dps = SET_DPS(ch->dpath); isar_pump_statev_fax()
879 pr_debug("%s: pump stev TIMER\n", ch->is->name); isar_pump_statev_fax()
882 pr_debug("%s: pump stev RSP_READY\n", ch->is->name); isar_pump_statev_fax()
883 ch->state = STFAX_READY; isar_pump_statev_fax()
884 deliver_status(ch, HW_MOD_READY); isar_pump_statev_fax()
886 if (test_bit(BC_FLG_ORIG, &ch->bch.Flags)) isar_pump_statev_fax()
893 if (ch->state == STFAX_LINE) { isar_pump_statev_fax()
894 pr_debug("%s: pump stev LINE_TX_H\n", ch->is->name); isar_pump_statev_fax()
895 ch->state = STFAX_CONT; isar_pump_statev_fax()
896 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, isar_pump_statev_fax()
900 ch->is->name, ch->state); isar_pump_statev_fax()
904 if (ch->state == STFAX_LINE) { isar_pump_statev_fax()
905 pr_debug("%s: pump stev LINE_RX_H\n", ch->is->name); isar_pump_statev_fax()
906 ch->state = STFAX_CONT; isar_pump_statev_fax()
907 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, isar_pump_statev_fax()
911 ch->is->name, ch->state); isar_pump_statev_fax()
915 if (ch->state == STFAX_LINE) { isar_pump_statev_fax()
916 pr_debug("%s: pump stev LINE_TX_B\n", ch->is->name); isar_pump_statev_fax()
917 ch->state = STFAX_CONT; isar_pump_statev_fax()
918 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, isar_pump_statev_fax()
922 ch->is->name, ch->state); isar_pump_statev_fax()
926 if (ch->state == STFAX_LINE) { isar_pump_statev_fax()
927 pr_debug("%s: pump stev LINE_RX_B\n", ch->is->name); isar_pump_statev_fax()
928 ch->state = STFAX_CONT; isar_pump_statev_fax()
929 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, isar_pump_statev_fax()
933 ch->is->name, ch->state); isar_pump_statev_fax()
937 if (ch->state == STFAX_CONT) { isar_pump_statev_fax()
938 pr_debug("%s: pump stev RSP_CONN\n", ch->is->name); isar_pump_statev_fax()
939 ch->state = STFAX_ACTIV; isar_pump_statev_fax()
940 test_and_set_bit(ISAR_RATE_REQ, &ch->is->Flags); isar_pump_statev_fax()
941 send_mbox(ch->is, dps | ISAR_HIS_PSTREQ, 0, 0, NULL); isar_pump_statev_fax()
942 if (ch->cmd == PCTRL_CMD_FTH) { isar_pump_statev_fax()
943 int delay = (ch->mod == 3) ? 1000 : 200; isar_pump_statev_fax()
946 &ch->bch.Flags)) isar_pump_statev_fax()
947 del_timer(&ch->ftimer); isar_pump_statev_fax()
948 ch->ftimer.expires = isar_pump_statev_fax()
951 &ch->bch.Flags); isar_pump_statev_fax()
952 add_timer(&ch->ftimer); isar_pump_statev_fax()
954 deliver_status(ch, HW_MOD_CONNECT); isar_pump_statev_fax()
958 ch->is->name, ch->state); isar_pump_statev_fax()
962 pr_debug("%s: pump stev FLAGS_DET\n", ch->is->name); isar_pump_statev_fax()
966 ch->is->name, ch->state); isar_pump_statev_fax()
967 if (ch->state == STFAX_ESCAPE) { isar_pump_statev_fax()
969 switch (ch->newcmd) { isar_pump_statev_fax()
971 ch->state = STFAX_READY; isar_pump_statev_fax()
976 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, isar_pump_statev_fax()
978 ch->state = STFAX_SILDET; isar_pump_statev_fax()
982 ch->mod = ch->newmod; isar_pump_statev_fax()
983 p1 = ch->newmod; isar_pump_statev_fax()
984 ch->newmod = 0; isar_pump_statev_fax()
985 ch->cmd = ch->newcmd; isar_pump_statev_fax()
986 ch->newcmd = 0; isar_pump_statev_fax()
987 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, isar_pump_statev_fax()
988 ch->cmd, 1, &p1); isar_pump_statev_fax()
989 ch->state = STFAX_LINE; isar_pump_statev_fax()
990 ch->try_mod = 3; isar_pump_statev_fax()
994 ch->is->name, ch->newcmd); isar_pump_statev_fax()
997 } else if (ch->state == STFAX_ACTIV) { isar_pump_statev_fax()
998 if (test_and_clear_bit(FLG_LL_OK, &ch->bch.Flags)) isar_pump_statev_fax()
999 deliver_status(ch, HW_MOD_OK); isar_pump_statev_fax()
1000 else if (ch->cmd == PCTRL_CMD_FRM) isar_pump_statev_fax()
1001 deliver_status(ch, HW_MOD_NOCARR); isar_pump_statev_fax()
1003 deliver_status(ch, HW_MOD_FCERROR); isar_pump_statev_fax()
1004 ch->state = STFAX_READY; isar_pump_statev_fax()
1005 } else if (ch->state != STFAX_SILDET) { isar_pump_statev_fax()
1007 ch->state = STFAX_READY; isar_pump_statev_fax()
1008 deliver_status(ch, HW_MOD_FCERROR); isar_pump_statev_fax()
1012 pr_debug("%s: pump stev RSP_SILDET\n", ch->is->name); isar_pump_statev_fax()
1013 if (ch->state == STFAX_SILDET) { isar_pump_statev_fax()
1014 ch->mod = ch->newmod; isar_pump_statev_fax()
1015 p1 = ch->newmod; isar_pump_statev_fax()
1016 ch->newmod = 0; isar_pump_statev_fax()
1017 ch->cmd = ch->newcmd; isar_pump_statev_fax()
1018 ch->newcmd = 0; isar_pump_statev_fax()
1019 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, isar_pump_statev_fax()
1020 ch->cmd, 1, &p1); isar_pump_statev_fax()
1021 ch->state = STFAX_LINE; isar_pump_statev_fax()
1022 ch->try_mod = 3; isar_pump_statev_fax()
1026 pr_debug("%s: pump stev RSP_SILOFF\n", ch->is->name); isar_pump_statev_fax()
1029 if (ch->state == STFAX_LINE) { isar_pump_statev_fax()
1031 ch->is->name, ch->try_mod); isar_pump_statev_fax()
1032 if (ch->try_mod--) { isar_pump_statev_fax()
1033 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, isar_pump_statev_fax()
1034 ch->cmd, 1, &ch->mod); isar_pump_statev_fax()
1038 pr_debug("%s: pump stev RSP_FCERR\n", ch->is->name); isar_pump_statev_fax()
1039 ch->state = STFAX_ESCAPE; isar_pump_statev_fax()
1040 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, PCTRL_CMD_ESC, isar_pump_statev_fax()
1042 deliver_status(ch, HW_MOD_FCERROR); isar_pump_statev_fax()
1052 struct isar_ch *ch; mISDNisar_irq() local
1057 ch = sel_bch_isar(isar, isar->iis >> 6); mISDNisar_irq()
1058 if (ch) mISDNisar_irq()
1059 isar_rcv_frame(ch); mISDNisar_irq()
1074 ch = sel_bch_isar(isar, isar->iis >> 6); mISDNisar_irq()
1075 if (ch) { mISDNisar_irq()
1077 ch->bch.err_tx++; mISDNisar_irq()
1079 ch->bch.err_rdo++; mISDNisar_irq()
1087 ch = sel_bch_isar(isar, isar->iis >> 6); mISDNisar_irq()
1088 if (ch) { mISDNisar_irq()
1090 if (ch->bch.state == ISDN_P_B_MODEM_ASYNC) mISDNisar_irq()
1091 isar_pump_statev_modem(ch, isar->cmsb); mISDNisar_irq()
1092 else if (ch->bch.state == ISDN_P_B_T30_FAX) mISDNisar_irq()
1093 isar_pump_statev_fax(ch, isar->cmsb); mISDNisar_irq()
1094 else if (ch->bch.state == ISDN_P_B_RAW) { mISDNisar_irq()
1104 _queue_data(&ch->bch.ch, PH_CONTROL_IND, mISDNisar_irq()
1109 isar->name, ch->bch.state, mISDNisar_irq()
1119 ch = sel_bch_isar(isar, isar->iis >> 6); mISDNisar_irq()
1120 if (ch) { mISDNisar_irq()
1122 isar_pump_status_rsp(ch); mISDNisar_irq()
1151 struct isar_ch *ch = (struct isar_ch *)data; ftimer_handler() local
1153 pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags); ftimer_handler()
1154 test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags); ftimer_handler()
1155 if (test_and_clear_bit(FLG_LL_CONN, &ch->bch.Flags)) ftimer_handler()
1156 deliver_status(ch, HW_MOD_CONNECT); ftimer_handler()
1160 setup_pump(struct isar_ch *ch) { setup_pump() argument
1161 u8 dps = SET_DPS(ch->dpath); setup_pump()
1164 switch (ch->bch.state) { setup_pump()
1168 send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, PMOD_BYPASS, 0, NULL); setup_pump()
1171 if (test_bit(FLG_DTMFSEND, &ch->bch.Flags)) { setup_pump()
1173 send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, setup_pump()
1177 send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, setup_pump()
1182 if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) { setup_pump()
1194 send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, ctrl, 6, param); setup_pump()
1198 if (test_bit(FLG_ORIGIN, &ch->bch.Flags)) { setup_pump()
1205 send_mbox(ch->is, dps | ISAR_HIS_PUMPCFG, ctrl, 2, param); setup_pump()
1206 ch->state = STFAX_NULL; setup_pump()
1207 ch->newcmd = 0; setup_pump()
1208 ch->newmod = 0; setup_pump()
1209 test_and_set_bit(FLG_FTI_RUN, &ch->bch.Flags); setup_pump()
1213 send_mbox(ch->is, dps | ISAR_HIS_PSTREQ, 0, 0, NULL); setup_pump()
1218 setup_sart(struct isar_ch *ch) { setup_sart() argument
1219 u8 dps = SET_DPS(ch->dpath); setup_sart()
1222 switch (ch->bch.state) { setup_sart()
1224 send_mbox(ch->is, dps | ISAR_HIS_SARTCFG, SMODE_DISABLE, setup_sart()
1229 send_mbox(ch->is, dps | ISAR_HIS_SARTCFG, SMODE_BINARY, setup_sart()
1234 send_mbox(ch->is, dps | ISAR_HIS_SARTCFG, SMODE_HDLC, setup_sart()
1241 send_mbox(ch->is, dps | ISAR_HIS_SARTCFG, ctrl, 2, param); setup_sart()
1245 send_mbox(ch->is, dps | ISAR_HIS_BSTREQ, 0, 0, NULL); setup_sart()
1250 setup_iom2(struct isar_ch *ch) { setup_iom2() argument
1251 u8 dps = SET_DPS(ch->dpath); setup_iom2()
1254 if (ch->bch.nr == 2) { setup_iom2()
1258 switch (ch->bch.state) { setup_iom2()
1262 msg[1] = ch->dpath + 2; setup_iom2()
1263 msg[3] = ch->dpath + 2; setup_iom2()
1272 if (test_bit(FLG_DTMFSEND, &ch->bch.Flags)) setup_iom2()
1277 send_mbox(ch->is, dps | ISAR_HIS_IOM2CFG, cmsb, 5, msg); setup_iom2()
1279 send_mbox(ch->is, dps | ISAR_HIS_IOM2REQ, 0, 0, NULL); setup_iom2()
1284 modeisar(struct isar_ch *ch, u32 bprotocol) modeisar() argument
1287 if (ch->bch.state == ISDN_P_NONE) { /* New Setup */ modeisar()
1290 if (!ch->dpath) modeisar()
1293 test_and_clear_bit(FLG_HDLC, &ch->bch.Flags); modeisar()
1294 test_and_clear_bit(FLG_TRANSPARENT, &ch->bch.Flags); modeisar()
1299 if (!test_and_set_bit(ISAR_DP2_USE, &ch->is->Flags)) modeisar()
1300 ch->dpath = 2; modeisar()
1302 &ch->is->Flags)) modeisar()
1303 ch->dpath = 1; modeisar()
1309 test_and_set_bit(FLG_HDLC, &ch->bch.Flags); modeisar()
1312 &ch->bch.Flags); modeisar()
1318 if (!test_and_set_bit(ISAR_DP1_USE, &ch->is->Flags)) modeisar()
1319 ch->dpath = 1; modeisar()
1322 "only with DP1\n", ch->is->name); modeisar()
1327 pr_info("%s: protocol not known %x\n", ch->is->name, modeisar()
1332 pr_debug("%s: ISAR ch%d dp%d protocol %x->%x\n", ch->is->name, modeisar()
1333 ch->bch.nr, ch->dpath, ch->bch.state, bprotocol); modeisar()
1334 ch->bch.state = bprotocol; modeisar()
1335 setup_pump(ch); modeisar()
1336 setup_iom2(ch); modeisar()
1337 setup_sart(ch); modeisar()
1338 if (ch->bch.state == ISDN_P_NONE) { modeisar()
1340 if (ch->dpath == 1) modeisar()
1341 test_and_clear_bit(ISAR_DP1_USE, &ch->is->Flags); modeisar()
1342 else if (ch->dpath == 2) modeisar()
1343 test_and_clear_bit(ISAR_DP2_USE, &ch->is->Flags); modeisar()
1344 ch->dpath = 0; modeisar()
1345 ch->is->ctrl(ch->is->hw, HW_DEACT_IND, ch->bch.nr); modeisar()
1347 ch->is->ctrl(ch->is->hw, HW_ACTIVATE_IND, ch->bch.nr); modeisar()
1352 isar_pump_cmd(struct isar_ch *ch, u32 cmd, u8 para) isar_pump_cmd() argument
1354 u8 dps = SET_DPS(ch->dpath); isar_pump_cmd()
1358 ch->is->name, cmd, para, ch->bch.state); isar_pump_cmd()
1361 if (ch->state == STFAX_READY) { isar_pump_cmd()
1365 ch->state = STFAX_LINE; isar_pump_cmd()
1366 ch->cmd = ctrl; isar_pump_cmd()
1367 ch->mod = para; isar_pump_cmd()
1368 ch->newmod = 0; isar_pump_cmd()
1369 ch->newcmd = 0; isar_pump_cmd()
1370 ch->try_mod = 3; isar_pump_cmd()
1371 } else if ((ch->state == STFAX_ACTIV) && isar_pump_cmd()
1372 (ch->cmd == PCTRL_CMD_FTM) && (ch->mod == para)) isar_pump_cmd()
1373 deliver_status(ch, HW_MOD_CONNECT); isar_pump_cmd()
1375 ch->newmod = para; isar_pump_cmd()
1376 ch->newcmd = PCTRL_CMD_FTM; isar_pump_cmd()
1379 ch->state = STFAX_ESCAPE; isar_pump_cmd()
1383 if (ch->state == STFAX_READY) { isar_pump_cmd()
1387 ch->state = STFAX_LINE; isar_pump_cmd()
1388 ch->cmd = ctrl; isar_pump_cmd()
1389 ch->mod = para; isar_pump_cmd()
1390 ch->newmod = 0; isar_pump_cmd()
1391 ch->newcmd = 0; isar_pump_cmd()
1392 ch->try_mod = 3; isar_pump_cmd()
1393 } else if ((ch->state == STFAX_ACTIV) && isar_pump_cmd()
1394 (ch->cmd == PCTRL_CMD_FTH) && (ch->mod == para)) isar_pump_cmd()
1395 deliver_status(ch, HW_MOD_CONNECT); isar_pump_cmd()
1397 ch->newmod = para; isar_pump_cmd()
1398 ch->newcmd = PCTRL_CMD_FTH; isar_pump_cmd()
1401 ch->state = STFAX_ESCAPE; isar_pump_cmd()
1405 if (ch->state == STFAX_READY) { isar_pump_cmd()
1409 ch->state = STFAX_LINE; isar_pump_cmd()
1410 ch->cmd = ctrl; isar_pump_cmd()
1411 ch->mod = para; isar_pump_cmd()
1412 ch->newmod = 0; isar_pump_cmd()
1413 ch->newcmd = 0; isar_pump_cmd()
1414 ch->try_mod = 3; isar_pump_cmd()
1415 } else if ((ch->state == STFAX_ACTIV) && isar_pump_cmd()
1416 (ch->cmd == PCTRL_CMD_FRM) && (ch->mod == para)) isar_pump_cmd()
1417 deliver_status(ch, HW_MOD_CONNECT); isar_pump_cmd()
1419 ch->newmod = para; isar_pump_cmd()
1420 ch->newcmd = PCTRL_CMD_FRM; isar_pump_cmd()
1423 ch->state = STFAX_ESCAPE; isar_pump_cmd()
1427 if (ch->state == STFAX_READY) { isar_pump_cmd()
1431 ch->state = STFAX_LINE; isar_pump_cmd()
1432 ch->cmd = ctrl; isar_pump_cmd()
1433 ch->mod = para; isar_pump_cmd()
1434 ch->newmod = 0; isar_pump_cmd()
1435 ch->newcmd = 0; isar_pump_cmd()
1436 ch->try_mod = 3; isar_pump_cmd()
1437 } else if ((ch->state == STFAX_ACTIV) && isar_pump_cmd()
1438 (ch->cmd == PCTRL_CMD_FRH) && (ch->mod == para)) isar_pump_cmd()
1439 deliver_status(ch, HW_MOD_CONNECT); isar_pump_cmd()
1441 ch->newmod = para; isar_pump_cmd()
1442 ch->newcmd = PCTRL_CMD_FRH; isar_pump_cmd()
1445 ch->state = STFAX_ESCAPE; isar_pump_cmd()
1455 send_mbox(ch->is, dps | ISAR_HIS_PUMPCTRL, ctrl, nom, &p1); isar_pump_cmd()
1470 isar->ch[i].mml = msg; isar_setup()
1471 isar->ch[i].bch.state = 0; isar_setup()
1472 isar->ch[i].dpath = i + 1; isar_setup()
1473 modeisar(&isar->ch[i], ISDN_P_NONE); isar_setup()
1478 isar_l2l1(struct mISDNchannel *ch, struct sk_buff *skb) isar_l2l1() argument
1480 struct bchannel *bch = container_of(ch, struct bchannel, ch); isar_l2l1()
1500 ret = modeisar(ich, ch->protocol); isar_l2l1()
1505 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, isar_l2l1()
1513 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, isar_l2l1()
1580 isar_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) isar_bctrl() argument
1582 struct bchannel *bch = container_of(ch, struct bchannel, ch); isar_bctrl()
1596 ch->protocol = ISDN_P_NONE; isar_bctrl()
1597 ch->peer = NULL; isar_bctrl()
1614 modeisar(&isar->ch[0], ISDN_P_NONE); free_isar()
1615 modeisar(&isar->ch[1], ISDN_P_NONE); free_isar()
1616 del_timer(&isar->ch[0].ftimer); free_isar()
1617 del_timer(&isar->ch[1].ftimer); free_isar()
1618 test_and_clear_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags); free_isar()
1619 test_and_clear_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags); free_isar()
1629 if (isar->ch[0].bch.debug & DEBUG_HW) init_isar()
1638 isar->ch[0].ftimer.function = &ftimer_handler; init_isar()
1639 isar->ch[0].ftimer.data = (long)&isar->ch[0]; init_isar()
1640 init_timer(&isar->ch[0].ftimer); init_isar()
1641 test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags); init_isar()
1642 isar->ch[1].ftimer.function = &ftimer_handler; init_isar()
1643 isar->ch[1].ftimer.data = (long)&isar->ch[1]; init_isar()
1644 init_timer(&isar->ch[1].ftimer); init_isar()
1645 test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags); init_isar()
1658 bch = &isar->ch[rq->adr.channel - 1].bch; isar_open()
1661 bch->ch.protocol = rq->protocol; isar_open()
1662 rq->ch = &bch->ch; isar_open()
1673 isar->ch[i].bch.nr = i + 1; mISDNisar_init()
1674 mISDN_initbchannel(&isar->ch[i].bch, MAX_DATA_MEM, 32); mISDNisar_init()
1675 isar->ch[i].bch.ch.nr = i + 1; mISDNisar_init()
1676 isar->ch[i].bch.ch.send = &isar_l2l1; mISDNisar_init()
1677 isar->ch[i].bch.ch.ctrl = isar_bctrl; mISDNisar_init()
1678 isar->ch[i].bch.hw = hw; mISDNisar_init()
1679 isar->ch[i].is = isar; mISDNisar_init()
H A Dhfcmulti.c854 vpm_echocan_on(struct hfc_multi *hc, int ch, int taps) vpm_echocan_on() argument
858 struct bchannel *bch = hc->chan[ch].bch; vpm_echocan_on()
863 if (hc->chan[ch].protocol != ISDN_P_B_RAW) vpm_echocan_on()
876 timeslot = ((ch / 4) * 8) + ((ch % 4) * 4) + 1; vpm_echocan_on()
877 unit = ch % 4; vpm_echocan_on()
886 vpm_echocan_off(struct hfc_multi *hc, int ch) vpm_echocan_off() argument
890 struct bchannel *bch = hc->chan[ch].bch; vpm_echocan_off()
896 if (hc->chan[ch].protocol != ISDN_P_B_RAW) vpm_echocan_off()
909 timeslot = ((ch / 4) * 8) + ((ch % 4) * 4) + 1; vpm_echocan_off()
910 unit = ch % 4; vpm_echocan_off()
1843 int co, ch; hfcmulti_dtmf() local
1854 for (ch = 0; ch <= 31; ch++) { hfcmulti_dtmf()
1856 bch = hc->chan[ch].bch; hfcmulti_dtmf()
1859 if (!hc->created[hc->chan[ch].port]) hfcmulti_dtmf()
1865 __func__, ch); hfcmulti_dtmf()
1866 coeff = &(hc->chan[ch].coeff[hc->chan[ch].coeff_count * 16]); hfcmulti_dtmf()
1870 addr = hc->DTMFbase + ((co << 7) | (ch << 2)); hfcmulti_dtmf()
1917 hc->chan[ch].coeff_count++; hfcmulti_dtmf()
1918 if (hc->chan[ch].coeff_count == 8) { hfcmulti_dtmf()
1919 hc->chan[ch].coeff_count = 0; hfcmulti_dtmf()
1929 memcpy(skb_put(skb, 512), hc->chan[ch].coeff, 512); hfcmulti_dtmf()
1946 hfcmulti_tx(struct hfc_multi *hc, int ch) hfcmulti_tx() argument
1958 bch = hc->chan[ch].bch; hfcmulti_tx()
1959 dch = hc->chan[ch].dch; hfcmulti_tx()
1963 txpending = &hc->chan[ch].txpending; hfcmulti_tx()
1964 slot_tx = hc->chan[ch].slot_tx; hfcmulti_tx()
1983 (hc->chan[ch].protocol == ISDN_P_B_RAW) && hfcmulti_tx()
1984 (hc->chan[ch].slot_rx < 0) && hfcmulti_tx()
1985 (hc->chan[ch].slot_tx < 0)) hfcmulti_tx()
1986 HFC_outb_nodebug(hc, R_FIFO, 0x20 | (ch << 1)); hfcmulti_tx()
1988 HFC_outb_nodebug(hc, R_FIFO, ch << 1); hfcmulti_tx()
2041 hc->chan[ch].Zfill = z1 - z2; hfcmulti_tx()
2042 if (hc->chan[ch].Zfill < 0) hfcmulti_tx()
2043 hc->chan[ch].Zfill += hc->Zlen; hfcmulti_tx()
2065 __func__, ch, slot_tx); hfcmulti_tx()
2074 HFC_outb_nodebug(hc, R_FIFO, ch << 1 | 1); hfcmulti_tx()
2083 HFC_outb_nodebug(hc, R_FIFO, ch << 1); hfcmulti_tx()
2108 __func__, ch, slot_tx); hfcmulti_tx()
2117 HFC_outb_nodebug(hc, R_FIFO, ch << 1 | 1); hfcmulti_tx()
2126 HFC_outb_nodebug(hc, R_FIFO, ch << 1); hfcmulti_tx()
2133 hc->activity_tx |= 1 << hc->chan[ch].port; hfcmulti_tx()
2148 __func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i, hfcmulti_tx()
2153 hc->chan[ch].Zfill += ii - i; hfcmulti_tx()
2192 hfcmulti_rx(struct hfc_multi *hc, int ch) hfcmulti_rx() argument
2203 bch = hc->chan[ch].bch; hfcmulti_rx()
2207 } else if (hc->chan[ch].dch) { hfcmulti_rx()
2208 dch = hc->chan[ch].dch; hfcmulti_rx()
2218 (hc->chan[ch].protocol == ISDN_P_B_RAW) && hfcmulti_rx()
2219 (hc->chan[ch].slot_rx < 0) && hfcmulti_rx()
2220 (hc->chan[ch].slot_tx < 0)) hfcmulti_rx()
2221 HFC_outb_nodebug(hc, R_FIFO, 0x20 | (ch << 1) | 1); hfcmulti_rx()
2223 HFC_outb_nodebug(hc, R_FIFO, (ch << 1) | 1); hfcmulti_rx()
2227 if (hc->chan[ch].rx_off) { hfcmulti_rx()
2285 hc->activity_rx |= 1 << hc->chan[ch].port; hfcmulti_rx()
2292 "got=%d (again %d)\n", __func__, hc->id + 1, ch, hfcmulti_rx()
2371 __func__, hc->id + 1, ch, Zsize, z1, z2); hfcmulti_rx()
2373 recv_Bchannel(bch, hc->chan[ch].Zfill, false); hfcmulti_rx()
2402 int ch, temp; handle_timer_irq() local
2438 for (ch = 0; ch <= 31; ch++) { handle_timer_irq()
2439 if (hc->created[hc->chan[ch].port]) { handle_timer_irq()
2440 hfcmulti_tx(hc, ch); handle_timer_irq()
2442 hfcmulti_rx(hc, ch); handle_timer_irq()
2443 if (hc->chan[ch].dch && handle_timer_irq()
2444 hc->chan[ch].nt_timer > -1) { handle_timer_irq()
2445 dch = hc->chan[ch].dch; handle_timer_irq()
2446 if (!(--hc->chan[ch].nt_timer)) { handle_timer_irq()
2580 int ch; ph_state_irq() local
2585 for (ch = 0; ch <= 31; ch++) { ph_state_irq()
2586 if (hc->chan[ch].dch) { ph_state_irq()
2587 dch = hc->chan[ch].dch; ph_state_irq()
2590 hc->chan[ch].port); ph_state_irq()
2610 (1 << hc->chan[ch].port); ph_state_irq()
2613 ~(1 << hc->chan[ch].port); ph_state_irq()
2622 (ch << 1) | 1); ph_state_irq()
2634 hc->chan[ch].port); ph_state_irq()
2646 int ch, j; fifo_irq() local
2654 ch = (block << 2) + (j >> 1); fifo_irq()
2655 dch = hc->chan[ch].dch; fifo_irq()
2656 bch = hc->chan[ch].bch; fifo_irq()
2657 if (((!dch) && (!bch)) || (!hc->created[hc->chan[ch].port])) { fifo_irq()
2663 hfcmulti_tx(hc, ch); fifo_irq()
2670 hfcmulti_tx(hc, ch); fifo_irq()
2678 hfcmulti_rx(hc, ch); fifo_irq()
2682 hfcmulti_rx(hc, ch); fifo_irq()
2868 * ch eqals to the HFC-channel (0-31)
2869 * ch is the number of channel (0-4,4-7,8-11,12-15,16-19,20-23,24-27,28-31
2874 mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx, mode_hfcmulti() argument
2881 if (ch < 0 || ch > 31) mode_hfcmulti()
2883 oslot_tx = hc->chan[ch].slot_tx; mode_hfcmulti()
2884 oslot_rx = hc->chan[ch].slot_rx; mode_hfcmulti()
2885 conf = hc->chan[ch].conf; mode_hfcmulti()
2891 __func__, hc->id, ch, protocol, oslot_tx, slot_tx, mode_hfcmulti()
2899 if (hc->slot_owner[oslot_tx << 1] == ch) { mode_hfcmulti()
2920 if (hc->slot_owner[(oslot_rx << 1) | 1] == ch) { mode_hfcmulti()
2937 hc->chan[ch].slot_tx = -1; mode_hfcmulti()
2938 hc->chan[ch].bank_tx = 0; mode_hfcmulti()
2941 if (hc->chan[ch].txpending) mode_hfcmulti()
2952 __func__, ch, slot_tx, bank_tx, mode_hfcmulti()
2955 HFC_outb(hc, A_SL_CFG, (ch << 1) | routing); mode_hfcmulti()
2959 hc->slot_owner[slot_tx << 1] = ch; mode_hfcmulti()
2960 hc->chan[ch].slot_tx = slot_tx; mode_hfcmulti()
2961 hc->chan[ch].bank_tx = bank_tx; mode_hfcmulti()
2966 hc->chan[ch].slot_rx = -1; mode_hfcmulti()
2967 hc->chan[ch].bank_rx = 0; mode_hfcmulti()
2970 if (hc->chan[ch].txpending) mode_hfcmulti()
2981 __func__, ch, slot_rx, bank_rx, mode_hfcmulti()
2984 HFC_outb(hc, A_SL_CFG, (ch << 1) | V_CH_DIR | routing); mode_hfcmulti()
2985 hc->slot_owner[(slot_rx << 1) | 1] = ch; mode_hfcmulti()
2986 hc->chan[ch].slot_rx = slot_rx; mode_hfcmulti()
2987 hc->chan[ch].bank_rx = bank_rx; mode_hfcmulti()
2993 HFC_outb(hc, R_FIFO, ch << 1); mode_hfcmulti()
3001 HFC_outb(hc, R_FIFO, (ch << 1) | 1); mode_hfcmulti()
3008 if (hc->chan[ch].bch && hc->ctype != HFC_TYPE_E1) { mode_hfcmulti()
3009 hc->hw.a_st_ctrl0[hc->chan[ch].port] &= mode_hfcmulti()
3010 ((ch & 0x3) == 0) ? ~V_B1_EN : ~V_B2_EN; mode_hfcmulti()
3011 HFC_outb(hc, R_ST_SEL, hc->chan[ch].port); mode_hfcmulti()
3015 hc->hw.a_st_ctrl0[hc->chan[ch].port]); mode_hfcmulti()
3017 if (hc->chan[ch].bch) { mode_hfcmulti()
3018 test_and_clear_bit(FLG_HDLC, &hc->chan[ch].bch->Flags); mode_hfcmulti()
3020 &hc->chan[ch].bch->Flags); mode_hfcmulti()
3026 (hc->chan[ch].slot_rx < 0) && mode_hfcmulti()
3027 (hc->chan[ch].slot_tx < 0)) { mode_hfcmulti()
3031 "state on PCM slot %d\n", ch, mode_hfcmulti()
3032 ((ch / 4) * 8) + ((ch % 4) * 4) + 1); mode_hfcmulti()
3035 vpm_out(hc, ch, ((ch / 4) * 8) + mode_hfcmulti()
3036 ((ch % 4) * 4) + 1, 0x01); mode_hfcmulti()
3039 HFC_outb(hc, R_FIFO, (ch << 1)); mode_hfcmulti()
3042 HFC_outb(hc, R_SLOT, (((ch / 4) * 8) + mode_hfcmulti()
3043 ((ch % 4) * 4) + 1) << 1); mode_hfcmulti()
3044 HFC_outb(hc, A_SL_CFG, 0x80 | (ch << 1)); mode_hfcmulti()
3047 HFC_outb(hc, R_FIFO, 0x20 | (ch << 1) | 1); mode_hfcmulti()
3052 if (hc->chan[ch].protocol != protocol) { mode_hfcmulti()
3056 HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) + mode_hfcmulti()
3057 ((ch % 4) * 4) + 1) << 1) | 1); mode_hfcmulti()
3058 HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1); mode_hfcmulti()
3062 HFC_outb(hc, R_FIFO, (ch << 1) | 1); mode_hfcmulti()
3065 HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) + mode_hfcmulti()
3066 ((ch % 4) * 4)) << 1) | 1); mode_hfcmulti()
3067 HFC_outb(hc, A_SL_CFG, 0x80 | 0x40 | (ch << 1) | 1); mode_hfcmulti()
3070 HFC_outb(hc, R_FIFO, 0x20 | (ch << 1)); mode_hfcmulti()
3075 if (hc->chan[ch].protocol != protocol) { mode_hfcmulti()
3081 HFC_outb(hc, R_SLOT, (((ch / 4) * 8) + mode_hfcmulti()
3082 ((ch % 4) * 4)) << 1); mode_hfcmulti()
3083 HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1)); mode_hfcmulti()
3086 HFC_outb(hc, R_FIFO, ch << 1); mode_hfcmulti()
3097 if (hc->chan[ch].protocol != protocol) { mode_hfcmulti()
3104 HFC_outb(hc, R_FIFO, (ch << 1) | 1); mode_hfcmulti()
3115 if (hc->chan[ch].protocol != protocol) { mode_hfcmulti()
3121 hc->hw.a_st_ctrl0[hc->chan[ch].port] |= mode_hfcmulti()
3122 ((ch & 0x3) == 0) ? V_B1_EN : V_B2_EN; mode_hfcmulti()
3123 HFC_outb(hc, R_ST_SEL, hc->chan[ch].port); mode_hfcmulti()
3127 hc->hw.a_st_ctrl0[hc->chan[ch].port]); mode_hfcmulti()
3129 if (hc->chan[ch].bch) mode_hfcmulti()
3131 &hc->chan[ch].bch->Flags); mode_hfcmulti()
3139 HFC_outb(hc, R_FIFO, ch << 1); mode_hfcmulti()
3141 if (hc->ctype == HFC_TYPE_E1 || hc->chan[ch].bch) { mode_hfcmulti()
3154 HFC_outb(hc, R_FIFO, (ch << 1) | 1); mode_hfcmulti()
3157 if (hc->ctype == HFC_TYPE_E1 || hc->chan[ch].bch) mode_hfcmulti()
3164 if (hc->chan[ch].bch) { mode_hfcmulti()
3165 test_and_set_bit(FLG_HDLC, &hc->chan[ch].bch->Flags); mode_hfcmulti()
3167 hc->hw.a_st_ctrl0[hc->chan[ch].port] |= mode_hfcmulti()
3168 ((ch & 0x3) == 0) ? V_B1_EN : V_B2_EN; mode_hfcmulti()
3169 HFC_outb(hc, R_ST_SEL, hc->chan[ch].port); mode_hfcmulti()
3173 hc->hw.a_st_ctrl0[hc->chan[ch].port]); mode_hfcmulti()
3180 hc->chan[ch].protocol = ISDN_P_NONE; mode_hfcmulti()
3183 hc->chan[ch].protocol = protocol; mode_hfcmulti()
3193 hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx, hfcmulti_pcm() argument
3198 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0); hfcmulti_pcm()
3203 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, slot_tx, bank_tx, hfcmulti_pcm()
3212 hfcmulti_conf(struct hfc_multi *hc, int ch, int num) hfcmulti_conf() argument
3215 hc->chan[ch].conf = num; hfcmulti_conf()
3217 hc->chan[ch].conf = -1; hfcmulti_conf()
3218 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, hc->chan[ch].slot_tx, hfcmulti_conf()
3219 hc->chan[ch].bank_tx, hc->chan[ch].slot_rx, hfcmulti_conf()
3220 hc->chan[ch].bank_rx); hfcmulti_conf()
3340 handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) handle_dmsg() argument
3342 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); handle_dmsg()
3364 queue_ch_frame(ch, PH_DATA_CNF, id, NULL); handle_dmsg()
3469 handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) handle_bmsg() argument
3471 struct bchannel *bch = container_of(ch, struct bchannel, ch); handle_bmsg()
3494 printk(KERN_DEBUG "%s: PH_ACTIVATE ch %d (0..32)\n", handle_bmsg()
3501 ch->protocol, handle_bmsg()
3507 if (ch->protocol == ISDN_P_B_RAW && !hc->dtmf handle_bmsg()
3523 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL, handle_bmsg()
3552 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL, handle_bmsg()
3696 hfcm_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) hfcm_bctrl() argument
3698 struct bchannel *bch = container_of(ch, struct bchannel, ch); hfcm_bctrl()
3710 ch->protocol = ISDN_P_NONE; hfcm_bctrl()
3711 ch->peer = NULL; hfcm_bctrl()
3736 int ch, i; ph_state_change() local
3743 ch = dch->slot; ph_state_change()
3813 if (hc->chan[ch].nt_timer == 0) { ph_state_change()
3814 hc->chan[ch].nt_timer = -1; ph_state_change()
3816 hc->chan[ch].port); ph_state_change()
3826 hc->chan[ch].nt_timer = ph_state_change()
3829 hc->chan[ch].port); ph_state_change()
3838 hc->chan[ch].nt_timer = -1; ph_state_change()
3844 hc->chan[ch].nt_timer = -1; ph_state_change()
3847 hc->chan[ch].nt_timer = -1; ph_state_change()
4094 rq->ch = &dch->dev.D; open_dchannel()
4105 int ch; open_bchannel() local
4112 ch = rq->adr.channel; open_bchannel()
4114 ch = (rq->adr.channel - 1) + (dch->slot - 2); open_bchannel()
4115 bch = hc->chan[ch].bch; open_bchannel()
4117 printk(KERN_ERR "%s:internal error ch %d has no bch\n", open_bchannel()
4118 __func__, ch); open_bchannel()
4123 bch->ch.protocol = rq->protocol; open_bchannel()
4124 hc->chan[ch].rx_off = 0; open_bchannel()
4125 rq->ch = &bch->ch; open_bchannel()
4186 hfcm_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) hfcm_dctrl() argument
4188 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); hfcm_dctrl()
4649 int ch; release_card() local
4677 for (ch = 0; ch <= 31; ch++) { release_card()
4678 if (hc->chan[ch].dch) release_card()
4679 release_port(hc, hc->chan[ch].dch); release_card()
4818 int ch, ret = 0; init_e1_port() local
4837 for (ch = 1; ch <= 31; ch++) { init_e1_port()
4838 if (!((1 << ch) & hc->bmask[pt])) /* skip unused channel */ init_e1_port()
4847 hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL); init_e1_port()
4848 if (!hc->chan[ch].coeff) { init_e1_port()
4855 bch->nr = ch; init_e1_port()
4856 bch->slot = ch; init_e1_port()
4860 bch->ch.send = handle_bmsg; init_e1_port()
4861 bch->ch.ctrl = hfcm_bctrl; init_e1_port()
4862 bch->ch.nr = ch; init_e1_port()
4863 list_add(&bch->ch.list, &dch->dev.bchannels); init_e1_port()
4864 hc->chan[ch].bch = bch; init_e1_port()
4865 hc->chan[ch].port = pt; init_e1_port()
4892 int ch, i, ret = 0; init_multi_port() local
4912 for (ch = 0; ch < dch->dev.nrbchan; ch++) { init_multi_port()
4920 hc->chan[i + ch].coeff = kzalloc(512, GFP_KERNEL); init_multi_port()
4921 if (!hc->chan[i + ch].coeff) { init_multi_port()
4928 bch->nr = ch + 1; init_multi_port()
4929 bch->slot = i + ch; init_multi_port()
4933 bch->ch.send = handle_bmsg; init_multi_port()
4934 bch->ch.ctrl = hfcm_bctrl; init_multi_port()
4935 bch->ch.nr = ch + 1; init_multi_port()
4936 list_add(&bch->ch.list, &dch->dev.bchannels); init_multi_port()
4937 hc->chan[i + ch].bch = bch; init_multi_port()
4938 hc->chan[i + ch].port = pt; init_multi_port()
5013 int i, ch; hfcmulti_init() local
5052 for (ch = 0; ch <= 31; ch++) { hfcmulti_init()
5053 if (!((1 << ch) & dmask[E1_cnt])) hfcmulti_init()
5055 hc->dnum[pt] = ch; hfcmulti_init()
5068 E1_cnt + 1, ch, hc->bmask[pt]); hfcmulti_init()
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmutil/
H A Dd11.c52 static void brcmu_d11n_encchspec(struct brcmu_chan *ch) brcmu_d11n_encchspec() argument
54 if (ch->bw == BRCMU_CHAN_BW_20) brcmu_d11n_encchspec()
55 ch->sb = BRCMU_CHAN_SB_NONE; brcmu_d11n_encchspec()
57 ch->chspec = 0; brcmu_d11n_encchspec()
58 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK, brcmu_d11n_encchspec()
59 BRCMU_CHSPEC_CH_SHIFT, ch->chnum); brcmu_d11n_encchspec()
60 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_SB_MASK, brcmu_d11n_encchspec()
61 0, d11n_sb(ch->sb)); brcmu_d11n_encchspec()
62 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11N_BW_MASK, brcmu_d11n_encchspec()
63 0, d11n_bw(ch->bw)); brcmu_d11n_encchspec()
65 if (ch->chnum <= CH_MAX_2G_CHANNEL) brcmu_d11n_encchspec()
66 ch->chspec |= BRCMU_CHSPEC_D11N_BND_2G; brcmu_d11n_encchspec()
68 ch->chspec |= BRCMU_CHSPEC_D11N_BND_5G; brcmu_d11n_encchspec()
86 static void brcmu_d11ac_encchspec(struct brcmu_chan *ch) brcmu_d11ac_encchspec() argument
88 if (ch->bw == BRCMU_CHAN_BW_20 || ch->sb == BRCMU_CHAN_SB_NONE) brcmu_d11ac_encchspec()
89 ch->sb = BRCMU_CHAN_SB_L; brcmu_d11ac_encchspec()
91 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_CH_MASK, brcmu_d11ac_encchspec()
92 BRCMU_CHSPEC_CH_SHIFT, ch->chnum); brcmu_d11ac_encchspec()
93 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK, brcmu_d11ac_encchspec()
94 BRCMU_CHSPEC_D11AC_SB_SHIFT, ch->sb); brcmu_d11ac_encchspec()
95 brcmu_maskset16(&ch->chspec, BRCMU_CHSPEC_D11AC_BW_MASK, brcmu_d11ac_encchspec()
96 0, d11ac_bw(ch->bw)); brcmu_d11ac_encchspec()
98 ch->chspec &= ~BRCMU_CHSPEC_D11AC_BND_MASK; brcmu_d11ac_encchspec()
99 if (ch->chnum <= CH_MAX_2G_CHANNEL) brcmu_d11ac_encchspec()
100 ch->chspec |= BRCMU_CHSPEC_D11AC_BND_2G; brcmu_d11ac_encchspec()
102 ch->chspec |= BRCMU_CHSPEC_D11AC_BND_5G; brcmu_d11ac_encchspec()
105 static void brcmu_d11n_decchspec(struct brcmu_chan *ch) brcmu_d11n_decchspec() argument
109 ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); brcmu_d11n_decchspec()
111 switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) { brcmu_d11n_decchspec()
113 ch->bw = BRCMU_CHAN_BW_20; brcmu_d11n_decchspec()
114 ch->sb = BRCMU_CHAN_SB_NONE; brcmu_d11n_decchspec()
117 ch->bw = BRCMU_CHAN_BW_40; brcmu_d11n_decchspec()
118 val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK; brcmu_d11n_decchspec()
120 ch->sb = BRCMU_CHAN_SB_L; brcmu_d11n_decchspec()
121 ch->chnum -= CH_10MHZ_APART; brcmu_d11n_decchspec()
123 ch->sb = BRCMU_CHAN_SB_U; brcmu_d11n_decchspec()
124 ch->chnum += CH_10MHZ_APART; brcmu_d11n_decchspec()
132 switch (ch->chspec & BRCMU_CHSPEC_D11N_BND_MASK) { brcmu_d11n_decchspec()
134 ch->band = BRCMU_CHAN_BAND_5G; brcmu_d11n_decchspec()
137 ch->band = BRCMU_CHAN_BAND_2G; brcmu_d11n_decchspec()
145 static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) brcmu_d11ac_decchspec() argument
149 ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); brcmu_d11ac_decchspec()
151 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) { brcmu_d11ac_decchspec()
153 ch->bw = BRCMU_CHAN_BW_20; brcmu_d11ac_decchspec()
154 ch->sb = BRCMU_CHAN_SB_NONE; brcmu_d11ac_decchspec()
157 ch->bw = BRCMU_CHAN_BW_40; brcmu_d11ac_decchspec()
158 val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK; brcmu_d11ac_decchspec()
160 ch->sb = BRCMU_CHAN_SB_L; brcmu_d11ac_decchspec()
161 ch->chnum -= CH_10MHZ_APART; brcmu_d11ac_decchspec()
163 ch->sb = BRCMU_CHAN_SB_U; brcmu_d11ac_decchspec()
164 ch->chnum += CH_10MHZ_APART; brcmu_d11ac_decchspec()
170 ch->bw = BRCMU_CHAN_BW_80; brcmu_d11ac_decchspec()
171 ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK, brcmu_d11ac_decchspec()
173 switch (ch->sb) { brcmu_d11ac_decchspec()
175 ch->chnum -= CH_30MHZ_APART; brcmu_d11ac_decchspec()
178 ch->chnum -= CH_10MHZ_APART; brcmu_d11ac_decchspec()
181 ch->chnum += CH_10MHZ_APART; brcmu_d11ac_decchspec()
184 ch->chnum += CH_30MHZ_APART; brcmu_d11ac_decchspec()
198 switch (ch->chspec & BRCMU_CHSPEC_D11AC_BND_MASK) { brcmu_d11ac_decchspec()
200 ch->band = BRCMU_CHAN_BAND_5G; brcmu_d11ac_decchspec()
203 ch->band = BRCMU_CHAN_BAND_2G; brcmu_d11ac_decchspec()
/linux-4.1.27/drivers/staging/dgnc/
H A Ddgnc_cls.c30 static inline void cls_clear_break(struct channel_t *ch, int force);
31 static inline void cls_set_cts_flow_control(struct channel_t *ch);
32 static inline void cls_set_rts_flow_control(struct channel_t *ch);
33 static inline void cls_set_ixon_flow_control(struct channel_t *ch);
34 static inline void cls_set_ixoff_flow_control(struct channel_t *ch);
35 static inline void cls_set_no_output_flow_control(struct channel_t *ch);
36 static inline void cls_set_no_input_flow_control(struct channel_t *ch);
37 static void cls_parse_modem(struct channel_t *ch, unsigned char signals);
40 static void cls_uart_init(struct channel_t *ch);
41 static void cls_uart_off(struct channel_t *ch);
44 static void cls_assert_modem_signals(struct channel_t *ch);
45 static void cls_flush_uart_write(struct channel_t *ch);
46 static void cls_flush_uart_read(struct channel_t *ch);
47 static void cls_disable_receiver(struct channel_t *ch);
48 static void cls_enable_receiver(struct channel_t *ch);
49 static void cls_send_break(struct channel_t *ch, int msecs);
50 static void cls_send_start_character(struct channel_t *ch);
51 static void cls_send_stop_character(struct channel_t *ch);
52 static void cls_copy_data_from_uart_to_queue(struct channel_t *ch);
53 static void cls_copy_data_from_queue_to_uart(struct channel_t *ch);
54 static uint cls_get_uart_bytes_left(struct channel_t *ch);
55 static void cls_send_immediate_char(struct channel_t *ch, unsigned char);
79 static inline void cls_set_cts_flow_control(struct channel_t *ch) cls_set_cts_flow_control() argument
81 unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_cts_flow_control()
82 unsigned char ier = readb(&ch->ch_cls_uart->ier); cls_set_cts_flow_control()
89 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_cts_flow_control()
91 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_cts_flow_control()
97 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_cts_flow_control()
100 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_cts_flow_control()
108 writeb(ier, &ch->ch_cls_uart->ier); cls_set_cts_flow_control()
111 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_cts_flow_control()
115 &ch->ch_cls_uart->isr_fcr); cls_set_cts_flow_control()
117 ch->ch_t_tlevel = 16; cls_set_cts_flow_control()
121 static inline void cls_set_ixon_flow_control(struct channel_t *ch) cls_set_ixon_flow_control() argument
123 unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_ixon_flow_control()
124 unsigned char ier = readb(&ch->ch_cls_uart->ier); cls_set_ixon_flow_control()
131 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_ixon_flow_control()
133 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_ixon_flow_control()
139 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_ixon_flow_control()
142 writeb(ch->ch_startc, &ch->ch_cls_uart->mcr); cls_set_ixon_flow_control()
143 writeb(0, &ch->ch_cls_uart->lsr); cls_set_ixon_flow_control()
144 writeb(ch->ch_stopc, &ch->ch_cls_uart->msr); cls_set_ixon_flow_control()
145 writeb(0, &ch->ch_cls_uart->spr); cls_set_ixon_flow_control()
148 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_ixon_flow_control()
156 writeb(ier, &ch->ch_cls_uart->ier); cls_set_ixon_flow_control()
159 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_ixon_flow_control()
163 &ch->ch_cls_uart->isr_fcr); cls_set_ixon_flow_control()
167 static inline void cls_set_no_output_flow_control(struct channel_t *ch) cls_set_no_output_flow_control() argument
169 unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_no_output_flow_control()
170 unsigned char ier = readb(&ch->ch_cls_uart->ier); cls_set_no_output_flow_control()
177 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_no_output_flow_control()
179 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_no_output_flow_control()
185 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_no_output_flow_control()
188 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_no_output_flow_control()
196 writeb(ier, &ch->ch_cls_uart->ier); cls_set_no_output_flow_control()
199 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_no_output_flow_control()
203 &ch->ch_cls_uart->isr_fcr); cls_set_no_output_flow_control()
205 ch->ch_r_watermark = 0; cls_set_no_output_flow_control()
206 ch->ch_t_tlevel = 16; cls_set_no_output_flow_control()
207 ch->ch_r_tlevel = 16; cls_set_no_output_flow_control()
211 static inline void cls_set_rts_flow_control(struct channel_t *ch) cls_set_rts_flow_control() argument
213 unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_rts_flow_control()
214 unsigned char ier = readb(&ch->ch_cls_uart->ier); cls_set_rts_flow_control()
221 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_rts_flow_control()
223 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_rts_flow_control()
229 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_rts_flow_control()
232 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_rts_flow_control()
236 writeb(ier, &ch->ch_cls_uart->ier); cls_set_rts_flow_control()
239 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_rts_flow_control()
243 &ch->ch_cls_uart->isr_fcr); cls_set_rts_flow_control()
245 ch->ch_r_watermark = 4; cls_set_rts_flow_control()
246 ch->ch_r_tlevel = 8; cls_set_rts_flow_control()
250 static inline void cls_set_ixoff_flow_control(struct channel_t *ch) cls_set_ixoff_flow_control() argument
252 unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_ixoff_flow_control()
253 unsigned char ier = readb(&ch->ch_cls_uart->ier); cls_set_ixoff_flow_control()
260 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_ixoff_flow_control()
262 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_ixoff_flow_control()
268 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_ixoff_flow_control()
271 writeb(ch->ch_startc, &ch->ch_cls_uart->mcr); cls_set_ixoff_flow_control()
272 writeb(0, &ch->ch_cls_uart->lsr); cls_set_ixoff_flow_control()
273 writeb(ch->ch_stopc, &ch->ch_cls_uart->msr); cls_set_ixoff_flow_control()
274 writeb(0, &ch->ch_cls_uart->spr); cls_set_ixoff_flow_control()
277 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_ixoff_flow_control()
281 writeb(ier, &ch->ch_cls_uart->ier); cls_set_ixoff_flow_control()
284 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_ixoff_flow_control()
288 &ch->ch_cls_uart->isr_fcr); cls_set_ixoff_flow_control()
292 static inline void cls_set_no_input_flow_control(struct channel_t *ch) cls_set_no_input_flow_control() argument
294 unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_no_input_flow_control()
295 unsigned char ier = readb(&ch->ch_cls_uart->ier); cls_set_no_input_flow_control()
302 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_no_input_flow_control()
304 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_no_input_flow_control()
310 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_no_input_flow_control()
313 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_no_input_flow_control()
317 writeb(ier, &ch->ch_cls_uart->ier); cls_set_no_input_flow_control()
320 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_no_input_flow_control()
324 &ch->ch_cls_uart->isr_fcr); cls_set_no_input_flow_control()
326 ch->ch_t_tlevel = 16; cls_set_no_input_flow_control()
327 ch->ch_r_tlevel = 16; cls_set_no_input_flow_control()
338 static inline void cls_clear_break(struct channel_t *ch, int force) cls_clear_break() argument
342 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_clear_break()
345 spin_lock_irqsave(&ch->ch_lock, flags); cls_clear_break()
348 if (!ch->ch_stop_sending_break) { cls_clear_break()
349 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_clear_break()
354 if (ch->ch_flags & CH_BREAK_SENDING) { cls_clear_break()
355 if (time_after(jiffies, ch->ch_stop_sending_break) || force) { cls_clear_break()
356 unsigned char temp = readb(&ch->ch_cls_uart->lcr); cls_clear_break()
358 writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr); cls_clear_break()
359 ch->ch_flags &= ~(CH_BREAK_SENDING); cls_clear_break()
360 ch->ch_stop_sending_break = 0; cls_clear_break()
363 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_clear_break()
369 struct channel_t *ch; cls_parse_isr() local
381 ch = brd->channels[port]; cls_parse_isr()
382 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_parse_isr()
388 isr = readb(&ch->ch_cls_uart->isr_fcr); cls_parse_isr()
398 ch->ch_intr_rx++; cls_parse_isr()
399 cls_copy_data_from_uart_to_queue(ch); cls_parse_isr()
400 dgnc_check_queue_flow_control(ch); cls_parse_isr()
406 spin_lock_irqsave(&ch->ch_lock, flags); cls_parse_isr()
407 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_parse_isr()
409 ch->ch_intr_tx++; cls_parse_isr()
410 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_parse_isr()
411 cls_copy_data_from_queue_to_uart(ch); cls_parse_isr()
417 ch->ch_intr_modem++; cls_parse_isr()
425 cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); cls_parse_isr()
442 struct channel_t *ch; cls_param() local
452 ch = un->un_ch; cls_param()
453 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_param()
456 bd = ch->ch_bd; cls_param()
463 if ((ch->ch_c_cflag & (CBAUD)) == 0) { cls_param()
464 ch->ch_r_head = 0; cls_param()
465 ch->ch_r_tail = 0; cls_param()
466 ch->ch_e_head = 0; cls_param()
467 ch->ch_e_tail = 0; cls_param()
468 ch->ch_w_head = 0; cls_param()
469 ch->ch_w_tail = 0; cls_param()
471 cls_flush_uart_write(ch); cls_param()
472 cls_flush_uart_read(ch); cls_param()
475 ch->ch_flags |= (CH_BAUD0); cls_param()
476 ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); cls_param()
477 cls_assert_modem_signals(ch); cls_param()
478 ch->ch_old_baud = 0; cls_param()
480 } else if (ch->ch_custom_speed) { cls_param()
482 baud = ch->ch_custom_speed; cls_param()
484 if (ch->ch_flags & CH_BAUD0) { cls_param()
485 ch->ch_flags &= ~(CH_BAUD0); cls_param()
491 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)) cls_param()
492 ch->ch_mostat |= (UART_MCR_RTS); cls_param()
493 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)) cls_param()
494 ch->ch_mostat |= (UART_MCR_DTR); cls_param()
528 if (!(ch->ch_tun.un_flags & UN_ISOPEN) && cls_param()
530 baud = C_BAUD(ch->ch_pun.un_tty) & 0xff; cls_param()
532 baud = C_BAUD(ch->ch_tun.un_tty) & 0xff; cls_param()
534 if (ch->ch_c_cflag & CBAUDEX) cls_param()
537 if (ch->ch_digi.digi_flags & DIGI_FAST) cls_param()
553 if (ch->ch_flags & CH_BAUD0) { cls_param()
554 ch->ch_flags &= ~(CH_BAUD0); cls_param()
560 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)) cls_param()
561 ch->ch_mostat |= (UART_MCR_RTS); cls_param()
562 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)) cls_param()
563 ch->ch_mostat |= (UART_MCR_DTR); cls_param()
567 if (ch->ch_c_cflag & PARENB) cls_param()
570 if (!(ch->ch_c_cflag & PARODD)) cls_param()
578 if (ch->ch_c_cflag & CMSPAR) cls_param()
582 if (ch->ch_c_cflag & CSTOPB) cls_param()
585 switch (ch->ch_c_cflag & CSIZE) { cls_param()
601 uart_ier = readb(&ch->ch_cls_uart->ier); cls_param()
603 uart_lcr = readb(&ch->ch_cls_uart->lcr); cls_param()
608 quot = ch->ch_bd->bd_dividend / baud; cls_param()
610 if (quot != 0 && ch->ch_old_baud != baud) { cls_param()
611 ch->ch_old_baud = baud; cls_param()
612 writeb(UART_LCR_DLAB, &ch->ch_cls_uart->lcr); cls_param()
613 writeb((quot & 0xff), &ch->ch_cls_uart->txrx); cls_param()
614 writeb((quot >> 8), &ch->ch_cls_uart->ier); cls_param()
615 writeb(lcr, &ch->ch_cls_uart->lcr); cls_param()
619 writeb(lcr, &ch->ch_cls_uart->lcr); cls_param()
621 if (ch->ch_c_cflag & CREAD) cls_param()
630 if ((ch->ch_digi.digi_flags & CTSPACE) || cls_param()
631 (ch->ch_digi.digi_flags & RTSPACE) || cls_param()
632 (ch->ch_c_cflag & CRTSCTS) || cls_param()
633 !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) || cls_param()
634 !(ch->ch_c_cflag & CLOCAL)) cls_param()
642 writeb(ier, &ch->ch_cls_uart->ier); cls_param()
644 if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) { cls_param()
645 cls_set_cts_flow_control(ch); cls_param()
646 } else if (ch->ch_c_iflag & IXON) { cls_param()
651 if ((ch->ch_startc == _POSIX_VDISABLE) || cls_param()
652 (ch->ch_stopc == _POSIX_VDISABLE)) cls_param()
653 cls_set_no_output_flow_control(ch); cls_param()
655 cls_set_ixon_flow_control(ch); cls_param()
657 cls_set_no_output_flow_control(ch); cls_param()
660 if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) { cls_param()
661 cls_set_rts_flow_control(ch); cls_param()
662 } else if (ch->ch_c_iflag & IXOFF) { cls_param()
667 if ((ch->ch_startc == _POSIX_VDISABLE) || cls_param()
668 (ch->ch_stopc == _POSIX_VDISABLE)) cls_param()
669 cls_set_no_input_flow_control(ch); cls_param()
671 cls_set_ixoff_flow_control(ch); cls_param()
673 cls_set_no_input_flow_control(ch); cls_param()
676 cls_assert_modem_signals(ch); cls_param()
679 cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); cls_param()
688 struct channel_t *ch; cls_tasklet() local
716 ch = bd->channels[i]; cls_tasklet()
717 if (!ch) cls_tasklet()
727 dgnc_input(ch); cls_tasklet()
733 cls_copy_data_from_queue_to_uart(ch); cls_tasklet()
734 dgnc_wakeup_writes(ch); cls_tasklet()
739 dgnc_carrier(ch); cls_tasklet()
745 if (ch->ch_stop_sending_break) cls_tasklet()
746 cls_clear_break(ch, 0); cls_tasklet()
803 static void cls_disable_receiver(struct channel_t *ch) cls_disable_receiver() argument
805 unsigned char tmp = readb(&ch->ch_cls_uart->ier); cls_disable_receiver()
808 writeb(tmp, &ch->ch_cls_uart->ier); cls_disable_receiver()
811 static void cls_enable_receiver(struct channel_t *ch) cls_enable_receiver() argument
813 unsigned char tmp = readb(&ch->ch_cls_uart->ier); cls_enable_receiver()
816 writeb(tmp, &ch->ch_cls_uart->ier); cls_enable_receiver()
819 static void cls_copy_data_from_uart_to_queue(struct channel_t *ch) cls_copy_data_from_uart_to_queue() argument
828 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_copy_data_from_uart_to_queue()
831 spin_lock_irqsave(&ch->ch_lock, flags); cls_copy_data_from_uart_to_queue()
834 head = ch->ch_r_head; cls_copy_data_from_uart_to_queue()
835 tail = ch->ch_r_tail; cls_copy_data_from_uart_to_queue()
846 if (ch->ch_c_iflag & IGNBRK) cls_copy_data_from_uart_to_queue()
850 linestatus = readb(&ch->ch_cls_uart->lsr); cls_copy_data_from_uart_to_queue()
862 discard = readb(&ch->ch_cls_uart->txrx); cls_copy_data_from_uart_to_queue()
876 ch->ch_r_tail = tail; cls_copy_data_from_uart_to_queue()
877 ch->ch_err_overrun++; cls_copy_data_from_uart_to_queue()
881 ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE cls_copy_data_from_uart_to_queue()
883 ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx); cls_copy_data_from_uart_to_queue()
887 if (ch->ch_equeue[head] & UART_LSR_PE) cls_copy_data_from_uart_to_queue()
888 ch->ch_err_parity++; cls_copy_data_from_uart_to_queue()
889 if (ch->ch_equeue[head] & UART_LSR_BI) cls_copy_data_from_uart_to_queue()
890 ch->ch_err_break++; cls_copy_data_from_uart_to_queue()
891 if (ch->ch_equeue[head] & UART_LSR_FE) cls_copy_data_from_uart_to_queue()
892 ch->ch_err_frame++; cls_copy_data_from_uart_to_queue()
896 ch->ch_rxcount++; cls_copy_data_from_uart_to_queue()
902 ch->ch_r_head = head & RQUEUEMASK; cls_copy_data_from_uart_to_queue()
903 ch->ch_e_head = head & EQUEUEMASK; cls_copy_data_from_uart_to_queue()
905 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_copy_data_from_uart_to_queue()
915 struct channel_t *ch; cls_drain() local
925 ch = un->un_ch; cls_drain()
926 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_drain()
929 spin_lock_irqsave(&ch->ch_lock, flags); cls_drain()
931 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_drain()
944 static void cls_flush_uart_write(struct channel_t *ch) cls_flush_uart_write() argument
946 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_flush_uart_write()
950 &ch->ch_cls_uart->isr_fcr); cls_flush_uart_write()
953 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_flush_uart_write()
957 static void cls_flush_uart_read(struct channel_t *ch) cls_flush_uart_read() argument
959 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_flush_uart_read()
976 static void cls_copy_data_from_queue_to_uart(struct channel_t *ch) cls_copy_data_from_queue_to_uart() argument
985 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_copy_data_from_queue_to_uart()
988 spin_lock_irqsave(&ch->ch_lock, flags); cls_copy_data_from_queue_to_uart()
991 if (ch->ch_w_tail == ch->ch_w_head) cls_copy_data_from_queue_to_uart()
995 if ((ch->ch_flags & CH_FORCED_STOP) || cls_copy_data_from_queue_to_uart()
996 (ch->ch_flags & CH_BREAK_SENDING)) cls_copy_data_from_queue_to_uart()
999 if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) cls_copy_data_from_queue_to_uart()
1005 head = ch->ch_w_head & WQUEUEMASK; cls_copy_data_from_queue_to_uart()
1006 tail = ch->ch_w_tail & WQUEUEMASK; cls_copy_data_from_queue_to_uart()
1019 if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) { cls_copy_data_from_queue_to_uart()
1020 if (!(ch->ch_mostat & UART_MCR_RTS)) { cls_copy_data_from_queue_to_uart()
1021 ch->ch_mostat |= (UART_MCR_RTS); cls_copy_data_from_queue_to_uart()
1022 cls_assert_modem_signals(ch); cls_copy_data_from_queue_to_uart()
1024 ch->ch_tun.un_flags |= (UN_EMPTY); cls_copy_data_from_queue_to_uart()
1032 if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) { cls_copy_data_from_queue_to_uart()
1033 if (!(ch->ch_mostat & UART_MCR_DTR)) { cls_copy_data_from_queue_to_uart()
1034 ch->ch_mostat |= (UART_MCR_DTR); cls_copy_data_from_queue_to_uart()
1035 cls_assert_modem_signals(ch); cls_copy_data_from_queue_to_uart()
1037 ch->ch_tun.un_flags |= (UN_EMPTY); cls_copy_data_from_queue_to_uart()
1039 writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx); cls_copy_data_from_queue_to_uart()
1040 ch->ch_w_tail++; cls_copy_data_from_queue_to_uart()
1041 ch->ch_w_tail &= WQUEUEMASK; cls_copy_data_from_queue_to_uart()
1042 ch->ch_txcount++; cls_copy_data_from_queue_to_uart()
1048 ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_copy_data_from_queue_to_uart()
1051 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_copy_data_from_queue_to_uart()
1054 static void cls_parse_modem(struct channel_t *ch, unsigned char signals) cls_parse_modem() argument
1059 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_parse_modem()
1066 spin_lock_irqsave(&ch->ch_lock, flags); cls_parse_modem()
1067 if (ch->ch_digi.digi_flags & DIGI_ALTPIN) { cls_parse_modem()
1087 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_parse_modem()
1095 spin_lock_irqsave(&ch->ch_lock, flags); cls_parse_modem()
1097 ch->ch_mistat |= UART_MSR_DCD; cls_parse_modem()
1099 ch->ch_mistat &= ~UART_MSR_DCD; cls_parse_modem()
1102 ch->ch_mistat |= UART_MSR_DSR; cls_parse_modem()
1104 ch->ch_mistat &= ~UART_MSR_DSR; cls_parse_modem()
1107 ch->ch_mistat |= UART_MSR_RI; cls_parse_modem()
1109 ch->ch_mistat &= ~UART_MSR_RI; cls_parse_modem()
1112 ch->ch_mistat |= UART_MSR_CTS; cls_parse_modem()
1114 ch->ch_mistat &= ~UART_MSR_CTS; cls_parse_modem()
1115 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_parse_modem()
1119 static void cls_assert_modem_signals(struct channel_t *ch) cls_assert_modem_signals() argument
1123 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_assert_modem_signals()
1126 out = ch->ch_mostat; cls_assert_modem_signals()
1128 if (ch->ch_flags & CH_LOOPBACK) cls_assert_modem_signals()
1131 writeb(out, &ch->ch_cls_uart->mcr); cls_assert_modem_signals()
1137 static void cls_send_start_character(struct channel_t *ch) cls_send_start_character() argument
1139 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_send_start_character()
1142 if (ch->ch_startc != _POSIX_VDISABLE) { cls_send_start_character()
1143 ch->ch_xon_sends++; cls_send_start_character()
1144 writeb(ch->ch_startc, &ch->ch_cls_uart->txrx); cls_send_start_character()
1148 static void cls_send_stop_character(struct channel_t *ch) cls_send_stop_character() argument
1150 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_send_stop_character()
1153 if (ch->ch_stopc != _POSIX_VDISABLE) { cls_send_stop_character()
1154 ch->ch_xoff_sends++; cls_send_stop_character()
1155 writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx); cls_send_stop_character()
1160 static void cls_uart_init(struct channel_t *ch) cls_uart_init() argument
1162 unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); cls_uart_init()
1165 writeb(0, &ch->ch_cls_uart->ier); cls_uart_init()
1171 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_uart_init()
1173 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_uart_init()
1178 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_uart_init()
1181 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_uart_init()
1184 readb(&ch->ch_cls_uart->txrx); cls_uart_init()
1187 &ch->ch_cls_uart->isr_fcr); cls_uart_init()
1190 ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_uart_init()
1192 readb(&ch->ch_cls_uart->lsr); cls_uart_init()
1193 readb(&ch->ch_cls_uart->msr); cls_uart_init()
1199 static void cls_uart_off(struct channel_t *ch) cls_uart_off() argument
1201 writeb(0, &ch->ch_cls_uart->ier); cls_uart_off()
1210 static uint cls_get_uart_bytes_left(struct channel_t *ch) cls_get_uart_bytes_left() argument
1215 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_get_uart_bytes_left()
1218 lsr = readb(&ch->ch_cls_uart->lsr); cls_get_uart_bytes_left()
1222 if (ch->ch_flags & CH_TX_FIFO_EMPTY) cls_get_uart_bytes_left()
1223 tasklet_schedule(&ch->ch_bd->helper_tasklet); cls_get_uart_bytes_left()
1226 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_get_uart_bytes_left()
1239 static void cls_send_break(struct channel_t *ch, int msecs) cls_send_break() argument
1241 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_send_break()
1249 if (ch->ch_flags & CH_BREAK_SENDING) { cls_send_break()
1250 unsigned char temp = readb(&ch->ch_cls_uart->lcr); cls_send_break()
1252 writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr); cls_send_break()
1253 ch->ch_flags &= ~(CH_BREAK_SENDING); cls_send_break()
1254 ch->ch_stop_sending_break = 0; cls_send_break()
1264 ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs); cls_send_break()
1267 if (!(ch->ch_flags & CH_BREAK_SENDING)) { cls_send_break()
1268 unsigned char temp = readb(&ch->ch_cls_uart->lcr); cls_send_break()
1270 writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr); cls_send_break()
1271 ch->ch_flags |= (CH_BREAK_SENDING); cls_send_break()
1282 static void cls_send_immediate_char(struct channel_t *ch, unsigned char c) cls_send_immediate_char() argument
1284 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) cls_send_immediate_char()
1287 writeb(c, &ch->ch_cls_uart->txrx); cls_send_immediate_char()
H A Ddgnc_neo.c31 static void neo_copy_data_from_uart_to_queue(struct channel_t *ch);
32 static inline void neo_clear_break(struct channel_t *ch, int force);
33 static inline void neo_set_cts_flow_control(struct channel_t *ch);
34 static inline void neo_set_rts_flow_control(struct channel_t *ch);
35 static inline void neo_set_ixon_flow_control(struct channel_t *ch);
36 static inline void neo_set_ixoff_flow_control(struct channel_t *ch);
37 static inline void neo_set_no_output_flow_control(struct channel_t *ch);
38 static inline void neo_set_no_input_flow_control(struct channel_t *ch);
39 static inline void neo_set_new_start_stop_chars(struct channel_t *ch);
40 static void neo_parse_modem(struct channel_t *ch, unsigned char signals);
43 static void neo_uart_init(struct channel_t *ch);
44 static void neo_uart_off(struct channel_t *ch);
47 static void neo_assert_modem_signals(struct channel_t *ch);
48 static void neo_flush_uart_write(struct channel_t *ch);
49 static void neo_flush_uart_read(struct channel_t *ch);
50 static void neo_disable_receiver(struct channel_t *ch);
51 static void neo_enable_receiver(struct channel_t *ch);
52 static void neo_send_break(struct channel_t *ch, int msecs);
53 static void neo_send_start_character(struct channel_t *ch);
54 static void neo_send_stop_character(struct channel_t *ch);
55 static void neo_copy_data_from_queue_to_uart(struct channel_t *ch);
56 static uint neo_get_uart_bytes_left(struct channel_t *ch);
57 static void neo_send_immediate_char(struct channel_t *ch, unsigned char c);
98 static inline void neo_set_cts_flow_control(struct channel_t *ch) neo_set_cts_flow_control() argument
100 unsigned char ier = readb(&ch->ch_neo_uart->ier); neo_set_cts_flow_control()
101 unsigned char efr = readb(&ch->ch_neo_uart->efr); neo_set_cts_flow_control()
117 writeb(0, &ch->ch_neo_uart->efr); neo_set_cts_flow_control()
120 writeb(efr, &ch->ch_neo_uart->efr); neo_set_cts_flow_control()
123 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); neo_set_cts_flow_control()
126 writeb(8, &ch->ch_neo_uart->tfifo); neo_set_cts_flow_control()
127 ch->ch_t_tlevel = 8; neo_set_cts_flow_control()
129 writeb(ier, &ch->ch_neo_uart->ier); neo_set_cts_flow_control()
131 neo_pci_posting_flush(ch->ch_bd); neo_set_cts_flow_control()
135 static inline void neo_set_rts_flow_control(struct channel_t *ch) neo_set_rts_flow_control() argument
137 unsigned char ier = readb(&ch->ch_neo_uart->ier); neo_set_rts_flow_control()
138 unsigned char efr = readb(&ch->ch_neo_uart->efr); neo_set_rts_flow_control()
153 writeb(0, &ch->ch_neo_uart->efr); neo_set_rts_flow_control()
156 writeb(efr, &ch->ch_neo_uart->efr); neo_set_rts_flow_control()
158 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); neo_set_rts_flow_control()
159 ch->ch_r_watermark = 4; neo_set_rts_flow_control()
161 writeb(32, &ch->ch_neo_uart->rfifo); neo_set_rts_flow_control()
162 ch->ch_r_tlevel = 32; neo_set_rts_flow_control()
164 writeb(ier, &ch->ch_neo_uart->ier); neo_set_rts_flow_control()
172 ch->ch_mostat |= UART_MCR_RTS; neo_set_rts_flow_control()
174 neo_pci_posting_flush(ch->ch_bd); neo_set_rts_flow_control()
178 static inline void neo_set_ixon_flow_control(struct channel_t *ch) neo_set_ixon_flow_control() argument
180 unsigned char ier = readb(&ch->ch_neo_uart->ier); neo_set_ixon_flow_control()
181 unsigned char efr = readb(&ch->ch_neo_uart->efr); neo_set_ixon_flow_control()
191 writeb(0, &ch->ch_neo_uart->efr); neo_set_ixon_flow_control()
194 writeb(efr, &ch->ch_neo_uart->efr); neo_set_ixon_flow_control()
196 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); neo_set_ixon_flow_control()
197 ch->ch_r_watermark = 4; neo_set_ixon_flow_control()
199 writeb(32, &ch->ch_neo_uart->rfifo); neo_set_ixon_flow_control()
200 ch->ch_r_tlevel = 32; neo_set_ixon_flow_control()
203 writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); neo_set_ixon_flow_control()
204 writeb(0, &ch->ch_neo_uart->xonchar2); neo_set_ixon_flow_control()
206 writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); neo_set_ixon_flow_control()
207 writeb(0, &ch->ch_neo_uart->xoffchar2); neo_set_ixon_flow_control()
209 writeb(ier, &ch->ch_neo_uart->ier); neo_set_ixon_flow_control()
211 neo_pci_posting_flush(ch->ch_bd); neo_set_ixon_flow_control()
215 static inline void neo_set_ixoff_flow_control(struct channel_t *ch) neo_set_ixoff_flow_control() argument
217 unsigned char ier = readb(&ch->ch_neo_uart->ier); neo_set_ixoff_flow_control()
218 unsigned char efr = readb(&ch->ch_neo_uart->efr); neo_set_ixoff_flow_control()
229 writeb(0, &ch->ch_neo_uart->efr); neo_set_ixoff_flow_control()
232 writeb(efr, &ch->ch_neo_uart->efr); neo_set_ixoff_flow_control()
235 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); neo_set_ixoff_flow_control()
237 writeb(8, &ch->ch_neo_uart->tfifo); neo_set_ixoff_flow_control()
238 ch->ch_t_tlevel = 8; neo_set_ixoff_flow_control()
241 writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); neo_set_ixoff_flow_control()
242 writeb(0, &ch->ch_neo_uart->xonchar2); neo_set_ixoff_flow_control()
244 writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); neo_set_ixoff_flow_control()
245 writeb(0, &ch->ch_neo_uart->xoffchar2); neo_set_ixoff_flow_control()
247 writeb(ier, &ch->ch_neo_uart->ier); neo_set_ixoff_flow_control()
249 neo_pci_posting_flush(ch->ch_bd); neo_set_ixoff_flow_control()
253 static inline void neo_set_no_input_flow_control(struct channel_t *ch) neo_set_no_input_flow_control() argument
255 unsigned char ier = readb(&ch->ch_neo_uart->ier); neo_set_no_input_flow_control()
256 unsigned char efr = readb(&ch->ch_neo_uart->efr); neo_set_no_input_flow_control()
264 if (ch->ch_c_iflag & IXON) neo_set_no_input_flow_control()
271 writeb(0, &ch->ch_neo_uart->efr); neo_set_no_input_flow_control()
274 writeb(efr, &ch->ch_neo_uart->efr); neo_set_no_input_flow_control()
277 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); neo_set_no_input_flow_control()
279 ch->ch_r_watermark = 0; neo_set_no_input_flow_control()
281 writeb(16, &ch->ch_neo_uart->tfifo); neo_set_no_input_flow_control()
282 ch->ch_t_tlevel = 16; neo_set_no_input_flow_control()
284 writeb(16, &ch->ch_neo_uart->rfifo); neo_set_no_input_flow_control()
285 ch->ch_r_tlevel = 16; neo_set_no_input_flow_control()
287 writeb(ier, &ch->ch_neo_uart->ier); neo_set_no_input_flow_control()
289 neo_pci_posting_flush(ch->ch_bd); neo_set_no_input_flow_control()
293 static inline void neo_set_no_output_flow_control(struct channel_t *ch) neo_set_no_output_flow_control() argument
295 unsigned char ier = readb(&ch->ch_neo_uart->ier); neo_set_no_output_flow_control()
296 unsigned char efr = readb(&ch->ch_neo_uart->efr); neo_set_no_output_flow_control()
303 if (ch->ch_c_iflag & IXOFF) neo_set_no_output_flow_control()
309 writeb(0, &ch->ch_neo_uart->efr); neo_set_no_output_flow_control()
312 writeb(efr, &ch->ch_neo_uart->efr); neo_set_no_output_flow_control()
315 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); neo_set_no_output_flow_control()
317 ch->ch_r_watermark = 0; neo_set_no_output_flow_control()
319 writeb(16, &ch->ch_neo_uart->tfifo); neo_set_no_output_flow_control()
320 ch->ch_t_tlevel = 16; neo_set_no_output_flow_control()
322 writeb(16, &ch->ch_neo_uart->rfifo); neo_set_no_output_flow_control()
323 ch->ch_r_tlevel = 16; neo_set_no_output_flow_control()
325 writeb(ier, &ch->ch_neo_uart->ier); neo_set_no_output_flow_control()
327 neo_pci_posting_flush(ch->ch_bd); neo_set_no_output_flow_control()
332 static inline void neo_set_new_start_stop_chars(struct channel_t *ch) neo_set_new_start_stop_chars() argument
336 if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) || ch->ch_c_cflag & CRTSCTS) neo_set_new_start_stop_chars()
340 writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); neo_set_new_start_stop_chars()
341 writeb(0, &ch->ch_neo_uart->xonchar2); neo_set_new_start_stop_chars()
343 writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); neo_set_new_start_stop_chars()
344 writeb(0, &ch->ch_neo_uart->xoffchar2); neo_set_new_start_stop_chars()
346 neo_pci_posting_flush(ch->ch_bd); neo_set_new_start_stop_chars()
353 static inline void neo_clear_break(struct channel_t *ch, int force) neo_clear_break() argument
357 spin_lock_irqsave(&ch->ch_lock, flags); neo_clear_break()
360 if (!ch->ch_stop_sending_break) { neo_clear_break()
361 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_clear_break()
366 if (ch->ch_flags & CH_BREAK_SENDING) { neo_clear_break()
367 if (time_after_eq(jiffies, ch->ch_stop_sending_break) neo_clear_break()
369 unsigned char temp = readb(&ch->ch_neo_uart->lcr); neo_clear_break()
371 writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr); neo_clear_break()
372 neo_pci_posting_flush(ch->ch_bd); neo_clear_break()
373 ch->ch_flags &= ~(CH_BREAK_SENDING); neo_clear_break()
374 ch->ch_stop_sending_break = 0; neo_clear_break()
377 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_clear_break()
386 struct channel_t *ch; neo_parse_isr() local
397 ch = brd->channels[port]; neo_parse_isr()
398 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_parse_isr()
404 isr = readb(&ch->ch_neo_uart->isr_fcr); neo_parse_isr()
418 ch->ch_intr_rx++; neo_parse_isr()
419 neo_copy_data_from_uart_to_queue(ch); neo_parse_isr()
422 spin_lock_irqsave(&ch->ch_lock, flags); neo_parse_isr()
423 dgnc_check_queue_flow_control(ch); neo_parse_isr()
424 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_parse_isr()
429 ch->ch_intr_tx++; neo_parse_isr()
431 spin_lock_irqsave(&ch->ch_lock, flags); neo_parse_isr()
432 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_parse_isr()
433 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_parse_isr()
434 neo_copy_data_from_queue_to_uart(ch); neo_parse_isr()
438 cause = readb(&ch->ch_neo_uart->xoffchar1); neo_parse_isr()
448 spin_lock_irqsave(&ch->ch_lock, neo_parse_isr()
450 ch->ch_flags &= ~(CH_STOP); neo_parse_isr()
451 spin_unlock_irqrestore(&ch->ch_lock, neo_parse_isr()
456 spin_lock_irqsave(&ch->ch_lock, neo_parse_isr()
458 ch->ch_flags |= CH_STOP; neo_parse_isr()
459 spin_unlock_irqrestore(&ch->ch_lock, neo_parse_isr()
471 ch->ch_intr_modem++; neo_parse_isr()
472 cause = readb(&ch->ch_neo_uart->mcr); neo_parse_isr()
476 spin_lock_irqsave(&ch->ch_lock, neo_parse_isr()
478 ch->ch_mostat |= UART_MCR_RTS; neo_parse_isr()
479 spin_unlock_irqrestore(&ch->ch_lock, neo_parse_isr()
482 spin_lock_irqsave(&ch->ch_lock, neo_parse_isr()
484 ch->ch_mostat &= ~(UART_MCR_RTS); neo_parse_isr()
485 spin_unlock_irqrestore(&ch->ch_lock, neo_parse_isr()
490 spin_lock_irqsave(&ch->ch_lock, neo_parse_isr()
492 ch->ch_mostat |= UART_MCR_DTR; neo_parse_isr()
493 spin_unlock_irqrestore(&ch->ch_lock, neo_parse_isr()
496 spin_lock_irqsave(&ch->ch_lock, neo_parse_isr()
498 ch->ch_mostat &= ~(UART_MCR_DTR); neo_parse_isr()
499 spin_unlock_irqrestore(&ch->ch_lock, neo_parse_isr()
506 neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); neo_parse_isr()
513 struct channel_t *ch; neo_parse_lsr() local
527 ch = brd->channels[port]; neo_parse_lsr()
528 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_parse_lsr()
531 linestatus = readb(&ch->ch_neo_uart->lsr); neo_parse_lsr()
533 ch->ch_cached_lsr |= linestatus; neo_parse_lsr()
535 if (ch->ch_cached_lsr & UART_LSR_DR) { neo_parse_lsr()
537 ch->ch_intr_rx++; neo_parse_lsr()
539 neo_copy_data_from_uart_to_queue(ch); neo_parse_lsr()
540 spin_lock_irqsave(&ch->ch_lock, flags); neo_parse_lsr()
541 dgnc_check_queue_flow_control(ch); neo_parse_lsr()
542 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_parse_lsr()
551 ch->ch_err_parity++; neo_parse_lsr()
554 ch->ch_err_frame++; neo_parse_lsr()
557 ch->ch_err_break++; neo_parse_lsr()
566 ch->ch_err_overrun++; neo_parse_lsr()
571 ch->ch_intr_tx++; neo_parse_lsr()
572 spin_lock_irqsave(&ch->ch_lock, flags); neo_parse_lsr()
573 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_parse_lsr()
574 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_parse_lsr()
577 neo_copy_data_from_queue_to_uart(ch); neo_parse_lsr()
580 ch->ch_intr_tx++; neo_parse_lsr()
581 spin_lock_irqsave(&ch->ch_lock, flags); neo_parse_lsr()
582 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_parse_lsr()
583 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_parse_lsr()
586 neo_copy_data_from_queue_to_uart(ch); neo_parse_lsr()
604 struct channel_t *ch; neo_param() local
614 ch = un->un_ch; neo_param()
615 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_param()
618 bd = ch->ch_bd; neo_param()
625 if ((ch->ch_c_cflag & (CBAUD)) == 0) { neo_param()
626 ch->ch_r_head = 0; neo_param()
627 ch->ch_r_tail = 0; neo_param()
628 ch->ch_e_head = 0; neo_param()
629 ch->ch_e_tail = 0; neo_param()
630 ch->ch_w_head = 0; neo_param()
631 ch->ch_w_tail = 0; neo_param()
633 neo_flush_uart_write(ch); neo_param()
634 neo_flush_uart_read(ch); neo_param()
637 ch->ch_flags |= (CH_BAUD0); neo_param()
638 ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); neo_param()
639 neo_assert_modem_signals(ch); neo_param()
640 ch->ch_old_baud = 0; neo_param()
643 } else if (ch->ch_custom_speed) { neo_param()
645 baud = ch->ch_custom_speed; neo_param()
647 if (ch->ch_flags & CH_BAUD0) { neo_param()
648 ch->ch_flags &= ~(CH_BAUD0); neo_param()
654 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)) neo_param()
655 ch->ch_mostat |= (UART_MCR_RTS); neo_param()
656 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)) neo_param()
657 ch->ch_mostat |= (UART_MCR_DTR); neo_param()
687 if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT)) neo_param()
688 baud = C_BAUD(ch->ch_pun.un_tty) & 0xff; neo_param()
690 baud = C_BAUD(ch->ch_tun.un_tty) & 0xff; neo_param()
692 if (ch->ch_c_cflag & CBAUDEX) neo_param()
695 if (ch->ch_digi.digi_flags & DIGI_FAST) neo_param()
709 if (ch->ch_flags & CH_BAUD0) { neo_param()
710 ch->ch_flags &= ~(CH_BAUD0); neo_param()
716 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)) neo_param()
717 ch->ch_mostat |= (UART_MCR_RTS); neo_param()
718 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)) neo_param()
719 ch->ch_mostat |= (UART_MCR_DTR); neo_param()
723 if (ch->ch_c_cflag & PARENB) neo_param()
726 if (!(ch->ch_c_cflag & PARODD)) neo_param()
734 if (ch->ch_c_cflag & CMSPAR) neo_param()
738 if (ch->ch_c_cflag & CSTOPB) neo_param()
741 switch (ch->ch_c_cflag & CSIZE) { neo_param()
757 uart_ier = readb(&ch->ch_neo_uart->ier); neo_param()
760 uart_lcr = readb(&ch->ch_neo_uart->lcr); neo_param()
765 quot = ch->ch_bd->bd_dividend / baud; neo_param()
767 if (quot != 0 && ch->ch_old_baud != baud) { neo_param()
768 ch->ch_old_baud = baud; neo_param()
769 writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr); neo_param()
770 writeb((quot & 0xff), &ch->ch_neo_uart->txrx); neo_param()
771 writeb((quot >> 8), &ch->ch_neo_uart->ier); neo_param()
772 writeb(lcr, &ch->ch_neo_uart->lcr); neo_param()
776 writeb(lcr, &ch->ch_neo_uart->lcr); neo_param()
778 if (ch->ch_c_cflag & CREAD) neo_param()
787 if ((ch->ch_digi.digi_flags & CTSPACE) || neo_param()
788 (ch->ch_digi.digi_flags & RTSPACE) || neo_param()
789 (ch->ch_c_cflag & CRTSCTS) || neo_param()
790 !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) || neo_param()
791 !(ch->ch_c_cflag & CLOCAL)) neo_param()
799 writeb(ier, &ch->ch_neo_uart->ier); neo_param()
802 neo_set_new_start_stop_chars(ch); neo_param()
804 if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) { neo_param()
805 neo_set_cts_flow_control(ch); neo_param()
806 } else if (ch->ch_c_iflag & IXON) { neo_param()
808 if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) neo_param()
809 neo_set_no_output_flow_control(ch); neo_param()
811 neo_set_ixon_flow_control(ch); neo_param()
813 neo_set_no_output_flow_control(ch); neo_param()
816 if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) { neo_param()
817 neo_set_rts_flow_control(ch); neo_param()
818 } else if (ch->ch_c_iflag & IXOFF) { neo_param()
820 if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) neo_param()
821 neo_set_no_input_flow_control(ch); neo_param()
823 neo_set_ixoff_flow_control(ch); neo_param()
825 neo_set_no_input_flow_control(ch); neo_param()
834 writeb(1, &ch->ch_neo_uart->rfifo); neo_param()
835 ch->ch_r_tlevel = 1; neo_param()
838 neo_assert_modem_signals(ch); neo_param()
841 neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); neo_param()
851 struct channel_t *ch; neo_tasklet() local
878 ch = bd->channels[i]; neo_tasklet()
881 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_tasklet()
893 dgnc_input(ch); neo_tasklet()
900 neo_copy_data_from_queue_to_uart(ch); neo_tasklet()
901 dgnc_wakeup_writes(ch); neo_tasklet()
907 dgnc_carrier(ch); neo_tasklet()
913 if (ch->ch_stop_sending_break) neo_tasklet()
914 neo_clear_break(ch, 0); neo_tasklet()
932 struct channel_t *ch; neo_intr() local
1009 ch = brd->channels[port]; neo_intr()
1010 neo_copy_data_from_uart_to_queue(ch); neo_intr()
1013 spin_lock_irqsave(&ch->ch_lock, flags2); neo_intr()
1014 dgnc_check_queue_flow_control(ch); neo_intr()
1015 spin_unlock_irqrestore(&ch->ch_lock, flags2); neo_intr()
1076 static void neo_disable_receiver(struct channel_t *ch) neo_disable_receiver() argument
1078 unsigned char tmp = readb(&ch->ch_neo_uart->ier); neo_disable_receiver()
1081 writeb(tmp, &ch->ch_neo_uart->ier); neo_disable_receiver()
1082 neo_pci_posting_flush(ch->ch_bd); neo_disable_receiver()
1091 static void neo_enable_receiver(struct channel_t *ch) neo_enable_receiver() argument
1093 unsigned char tmp = readb(&ch->ch_neo_uart->ier); neo_enable_receiver()
1096 writeb(tmp, &ch->ch_neo_uart->ier); neo_enable_receiver()
1097 neo_pci_posting_flush(ch->ch_bd); neo_enable_receiver()
1101 static void neo_copy_data_from_uart_to_queue(struct channel_t *ch) neo_copy_data_from_uart_to_queue() argument
1112 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_copy_data_from_uart_to_queue()
1115 spin_lock_irqsave(&ch->ch_lock, flags); neo_copy_data_from_uart_to_queue()
1118 head = ch->ch_r_head & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
1119 tail = ch->ch_r_tail & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
1122 linestatus = ch->ch_cached_lsr; neo_copy_data_from_uart_to_queue()
1123 ch->ch_cached_lsr = 0; neo_copy_data_from_uart_to_queue()
1137 if (!(ch->ch_flags & CH_FIFO_ENABLED)) neo_copy_data_from_uart_to_queue()
1140 total = readb(&ch->ch_neo_uart->rfifo); neo_copy_data_from_uart_to_queue()
1150 if ((ch->ch_bd->dvid & 0xf0) >= UART_XR17E158_DVID) neo_copy_data_from_uart_to_queue()
1171 linestatus = readb(&ch->ch_neo_uart->lsr); neo_copy_data_from_uart_to_queue()
1198 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_copy_data_from_uart_to_queue()
1203 memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); neo_copy_data_from_uart_to_queue()
1210 memset(ch->ch_equeue + head, 0, n); neo_copy_data_from_uart_to_queue()
1216 ch->ch_rxcount += n; neo_copy_data_from_uart_to_queue()
1223 if (ch->ch_c_iflag & IGNBRK) neo_copy_data_from_uart_to_queue()
1236 linestatus |= readb(&ch->ch_neo_uart->lsr); neo_copy_data_from_uart_to_queue()
1244 ch->ch_cached_lsr = linestatus; neo_copy_data_from_uart_to_queue()
1258 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_copy_data_from_uart_to_queue()
1268 memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1); neo_copy_data_from_uart_to_queue()
1282 ch->ch_r_tail = tail; neo_copy_data_from_uart_to_queue()
1283 ch->ch_err_overrun++; neo_copy_data_from_uart_to_queue()
1287 memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1); neo_copy_data_from_uart_to_queue()
1288 ch->ch_equeue[head] = (unsigned char) linestatus; neo_copy_data_from_uart_to_queue()
1297 ch->ch_rxcount++; neo_copy_data_from_uart_to_queue()
1303 ch->ch_r_head = head & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
1304 ch->ch_e_head = head & EQUEUEMASK; neo_copy_data_from_uart_to_queue()
1306 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_copy_data_from_uart_to_queue()
1317 struct channel_t *ch; neo_drain() local
1328 ch = un->un_ch; neo_drain()
1329 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_drain()
1332 spin_lock_irqsave(&ch->ch_lock, flags); neo_drain()
1334 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_drain()
1354 static void neo_flush_uart_write(struct channel_t *ch) neo_flush_uart_write() argument
1359 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_flush_uart_write()
1362 writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); neo_flush_uart_write()
1363 neo_pci_posting_flush(ch->ch_bd); neo_flush_uart_write()
1368 tmp = readb(&ch->ch_neo_uart->isr_fcr); neo_flush_uart_write()
1375 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_flush_uart_write()
1384 static void neo_flush_uart_read(struct channel_t *ch) neo_flush_uart_read() argument
1389 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_flush_uart_read()
1392 writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr); neo_flush_uart_read()
1393 neo_pci_posting_flush(ch->ch_bd); neo_flush_uart_read()
1398 tmp = readb(&ch->ch_neo_uart->isr_fcr); neo_flush_uart_read()
1407 static void neo_copy_data_from_queue_to_uart(struct channel_t *ch) neo_copy_data_from_queue_to_uart() argument
1417 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_copy_data_from_queue_to_uart()
1420 spin_lock_irqsave(&ch->ch_lock, flags); neo_copy_data_from_queue_to_uart()
1423 if (ch->ch_w_tail == ch->ch_w_head) neo_copy_data_from_queue_to_uart()
1427 if ((ch->ch_flags & CH_FORCED_STOP) || neo_copy_data_from_queue_to_uart()
1428 (ch->ch_flags & CH_BREAK_SENDING)) neo_copy_data_from_queue_to_uart()
1434 if (!(ch->ch_flags & CH_FIFO_ENABLED)) { neo_copy_data_from_queue_to_uart()
1435 unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr); neo_copy_data_from_queue_to_uart()
1438 ch->ch_cached_lsr |= lsrbits; neo_copy_data_from_queue_to_uart()
1439 if (ch->ch_cached_lsr & UART_LSR_THRE) { neo_copy_data_from_queue_to_uart()
1440 ch->ch_cached_lsr &= ~(UART_LSR_THRE); neo_copy_data_from_queue_to_uart()
1446 if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) { neo_copy_data_from_queue_to_uart()
1447 if (!(ch->ch_mostat & UART_MCR_RTS)) { neo_copy_data_from_queue_to_uart()
1448 ch->ch_mostat |= (UART_MCR_RTS); neo_copy_data_from_queue_to_uart()
1449 neo_assert_modem_signals(ch); neo_copy_data_from_queue_to_uart()
1451 ch->ch_tun.un_flags |= (UN_EMPTY); neo_copy_data_from_queue_to_uart()
1457 if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) { neo_copy_data_from_queue_to_uart()
1458 if (!(ch->ch_mostat & UART_MCR_DTR)) { neo_copy_data_from_queue_to_uart()
1459 ch->ch_mostat |= (UART_MCR_DTR); neo_copy_data_from_queue_to_uart()
1460 neo_assert_modem_signals(ch); neo_copy_data_from_queue_to_uart()
1462 ch->ch_tun.un_flags |= (UN_EMPTY); neo_copy_data_from_queue_to_uart()
1465 writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx); neo_copy_data_from_queue_to_uart()
1466 ch->ch_w_tail++; neo_copy_data_from_queue_to_uart()
1467 ch->ch_w_tail &= WQUEUEMASK; neo_copy_data_from_queue_to_uart()
1468 ch->ch_txcount++; neo_copy_data_from_queue_to_uart()
1477 if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) { neo_copy_data_from_queue_to_uart()
1478 if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) neo_copy_data_from_queue_to_uart()
1483 n = readb(&ch->ch_neo_uart->tfifo); neo_copy_data_from_queue_to_uart()
1485 if ((unsigned int) n > ch->ch_t_tlevel) neo_copy_data_from_queue_to_uart()
1488 n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel; neo_copy_data_from_queue_to_uart()
1490 n = UART_17158_TX_FIFOSIZE - readb(&ch->ch_neo_uart->tfifo); neo_copy_data_from_queue_to_uart()
1494 head = ch->ch_w_head & WQUEUEMASK; neo_copy_data_from_queue_to_uart()
1495 tail = ch->ch_w_tail & WQUEUEMASK; neo_copy_data_from_queue_to_uart()
1513 if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) { neo_copy_data_from_queue_to_uart()
1514 if (!(ch->ch_mostat & UART_MCR_RTS)) { neo_copy_data_from_queue_to_uart()
1515 ch->ch_mostat |= (UART_MCR_RTS); neo_copy_data_from_queue_to_uart()
1516 neo_assert_modem_signals(ch); neo_copy_data_from_queue_to_uart()
1518 ch->ch_tun.un_flags |= (UN_EMPTY); neo_copy_data_from_queue_to_uart()
1525 if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) { neo_copy_data_from_queue_to_uart()
1526 if (!(ch->ch_mostat & UART_MCR_DTR)) { neo_copy_data_from_queue_to_uart()
1527 ch->ch_mostat |= (UART_MCR_DTR); neo_copy_data_from_queue_to_uart()
1528 neo_assert_modem_signals(ch); neo_copy_data_from_queue_to_uart()
1530 ch->ch_tun.un_flags |= (UN_EMPTY); neo_copy_data_from_queue_to_uart()
1533 memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s); neo_copy_data_from_queue_to_uart()
1538 ch->ch_txcount += s; neo_copy_data_from_queue_to_uart()
1543 ch->ch_w_tail = tail & WQUEUEMASK; neo_copy_data_from_queue_to_uart()
1546 neo_pci_posting_flush(ch->ch_bd); neo_copy_data_from_queue_to_uart()
1547 ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_copy_data_from_queue_to_uart()
1551 spin_unlock_irqrestore(&ch->ch_lock, flags); neo_copy_data_from_queue_to_uart()
1555 static void neo_parse_modem(struct channel_t *ch, unsigned char signals) neo_parse_modem() argument
1559 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_parse_modem()
1566 if (ch->ch_digi.digi_flags & DIGI_ALTPIN) { neo_parse_modem()
1591 ch->ch_mistat |= UART_MSR_DCD; neo_parse_modem()
1593 ch->ch_mistat &= ~UART_MSR_DCD; neo_parse_modem()
1596 ch->ch_mistat |= UART_MSR_DSR; neo_parse_modem()
1598 ch->ch_mistat &= ~UART_MSR_DSR; neo_parse_modem()
1601 ch->ch_mistat |= UART_MSR_RI; neo_parse_modem()
1603 ch->ch_mistat &= ~UART_MSR_RI; neo_parse_modem()
1606 ch->ch_mistat |= UART_MSR_CTS; neo_parse_modem()
1608 ch->ch_mistat &= ~UART_MSR_CTS; neo_parse_modem()
1613 static void neo_assert_modem_signals(struct channel_t *ch) neo_assert_modem_signals() argument
1617 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_assert_modem_signals()
1620 out = ch->ch_mostat; neo_assert_modem_signals()
1622 if (ch->ch_flags & CH_LOOPBACK) neo_assert_modem_signals()
1625 writeb(out, &ch->ch_neo_uart->mcr); neo_assert_modem_signals()
1626 neo_pci_posting_flush(ch->ch_bd); neo_assert_modem_signals()
1633 static void neo_send_start_character(struct channel_t *ch) neo_send_start_character() argument
1635 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_send_start_character()
1638 if (ch->ch_startc != _POSIX_VDISABLE) { neo_send_start_character()
1639 ch->ch_xon_sends++; neo_send_start_character()
1640 writeb(ch->ch_startc, &ch->ch_neo_uart->txrx); neo_send_start_character()
1641 neo_pci_posting_flush(ch->ch_bd); neo_send_start_character()
1647 static void neo_send_stop_character(struct channel_t *ch) neo_send_stop_character() argument
1649 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_send_stop_character()
1652 if (ch->ch_stopc != _POSIX_VDISABLE) { neo_send_stop_character()
1653 ch->ch_xoff_sends++; neo_send_stop_character()
1654 writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx); neo_send_stop_character()
1655 neo_pci_posting_flush(ch->ch_bd); neo_send_stop_character()
1664 static void neo_uart_init(struct channel_t *ch) neo_uart_init() argument
1667 writeb(0, &ch->ch_neo_uart->ier); neo_uart_init()
1668 writeb(0, &ch->ch_neo_uart->efr); neo_uart_init()
1669 writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr); neo_uart_init()
1673 readb(&ch->ch_neo_uart->txrx); neo_uart_init()
1674 writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); neo_uart_init()
1675 readb(&ch->ch_neo_uart->lsr); neo_uart_init()
1676 readb(&ch->ch_neo_uart->msr); neo_uart_init()
1678 ch->ch_flags |= CH_FIFO_ENABLED; neo_uart_init()
1681 writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); neo_uart_init()
1682 neo_pci_posting_flush(ch->ch_bd); neo_uart_init()
1689 static void neo_uart_off(struct channel_t *ch) neo_uart_off() argument
1692 writeb(0, &ch->ch_neo_uart->efr); neo_uart_off()
1695 writeb(0, &ch->ch_neo_uart->ier); neo_uart_off()
1696 neo_pci_posting_flush(ch->ch_bd); neo_uart_off()
1700 static uint neo_get_uart_bytes_left(struct channel_t *ch) neo_get_uart_bytes_left() argument
1703 unsigned char lsr = readb(&ch->ch_neo_uart->lsr); neo_get_uart_bytes_left()
1706 ch->ch_cached_lsr |= lsr; neo_get_uart_bytes_left()
1710 if (ch->ch_flags & CH_TX_FIFO_EMPTY) neo_get_uart_bytes_left()
1711 tasklet_schedule(&ch->ch_bd->helper_tasklet); neo_get_uart_bytes_left()
1714 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_get_uart_bytes_left()
1723 static void neo_send_break(struct channel_t *ch, int msecs) neo_send_break() argument
1729 if (ch->ch_flags & CH_BREAK_SENDING) { neo_send_break()
1730 unsigned char temp = readb(&ch->ch_neo_uart->lcr); neo_send_break()
1732 writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr); neo_send_break()
1733 neo_pci_posting_flush(ch->ch_bd); neo_send_break()
1734 ch->ch_flags &= ~(CH_BREAK_SENDING); neo_send_break()
1735 ch->ch_stop_sending_break = 0; neo_send_break()
1745 ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs); neo_send_break()
1748 if (!(ch->ch_flags & CH_BREAK_SENDING)) { neo_send_break()
1749 unsigned char temp = readb(&ch->ch_neo_uart->lcr); neo_send_break()
1751 writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr); neo_send_break()
1752 neo_pci_posting_flush(ch->ch_bd); neo_send_break()
1753 ch->ch_flags |= (CH_BREAK_SENDING); neo_send_break()
1766 static void neo_send_immediate_char(struct channel_t *ch, unsigned char c) neo_send_immediate_char() argument
1768 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) neo_send_immediate_char()
1771 writeb(c, &ch->ch_neo_uart->txrx); neo_send_immediate_char()
1772 neo_pci_posting_flush(ch->ch_bd); neo_send_immediate_char()
H A Ddgnc_tty.c92 static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
107 static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value);
114 static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
294 struct channel_t *ch; dgnc_tty_init() local
322 ch = brd->channels[0]; dgnc_tty_init()
326 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { dgnc_tty_init()
331 spin_lock_init(&ch->ch_lock); dgnc_tty_init()
334 ch->magic = DGNC_CHANNEL_MAGIC; dgnc_tty_init()
335 ch->ch_tun.magic = DGNC_UNIT_MAGIC; dgnc_tty_init()
336 ch->ch_tun.un_ch = ch; dgnc_tty_init()
337 ch->ch_tun.un_type = DGNC_SERIAL; dgnc_tty_init()
338 ch->ch_tun.un_dev = i; dgnc_tty_init()
340 ch->ch_pun.magic = DGNC_UNIT_MAGIC; dgnc_tty_init()
341 ch->ch_pun.un_ch = ch; dgnc_tty_init()
342 ch->ch_pun.un_type = DGNC_PRINT; dgnc_tty_init()
343 ch->ch_pun.un_dev = i + 128; dgnc_tty_init()
346 ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i); dgnc_tty_init()
348 ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i); dgnc_tty_init()
350 ch->ch_bd = brd; dgnc_tty_init()
351 ch->ch_portnum = i; dgnc_tty_init()
352 ch->ch_digi = dgnc_digi_init; dgnc_tty_init()
355 ch->ch_close_delay = 250; dgnc_tty_init()
357 init_waitqueue_head(&ch->ch_flags_wait); dgnc_tty_init()
358 init_waitqueue_head(&ch->ch_tun.un_flags_wait); dgnc_tty_init()
359 init_waitqueue_head(&ch->ch_pun.un_flags_wait); dgnc_tty_init()
365 &(ch->ch_bd->pdev->dev)); dgnc_tty_init()
366 ch->ch_tun.un_sysfs = classp; dgnc_tty_init()
367 dgnc_create_tty_sysfs(&ch->ch_tun, classp); dgnc_tty_init()
370 &(ch->ch_bd->pdev->dev)); dgnc_tty_init()
371 ch->ch_pun.un_sysfs = classp; dgnc_tty_init()
372 dgnc_create_tty_sysfs(&ch->ch_pun, classp); dgnc_tty_init()
438 * ch - Pointer to channel structure.
443 static void dgnc_wmove(struct channel_t *ch, char *buf, uint n) dgnc_wmove() argument
448 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_wmove()
451 head = ch->ch_w_head & WQUEUEMASK; dgnc_wmove()
462 memcpy(ch->ch_wqueue + head, buf, remain); dgnc_wmove()
472 memcpy(ch->ch_wqueue + head, buf, remain); dgnc_wmove()
477 ch->ch_w_head = head; dgnc_wmove()
487 * ch - Pointer to channel structure.
490 void dgnc_input(struct channel_t *ch) dgnc_input() argument
506 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_input()
509 tp = ch->ch_tun.un_tty; dgnc_input()
511 bd = ch->ch_bd; dgnc_input()
515 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_input()
522 head = ch->ch_r_head & rmask; dgnc_input()
523 tail = ch->ch_r_tail & rmask; dgnc_input()
533 if (!tp || (tp->magic != TTY_MAGIC) || !(ch->ch_tun.un_flags & UN_ISOPEN) || dgnc_input()
534 !(tp->termios.c_cflag & CREAD) || (ch->ch_tun.un_flags & UN_CLOSING)) { dgnc_input()
536 ch->ch_r_head = tail; dgnc_input()
539 dgnc_check_queue_flow_control(ch); dgnc_input()
547 if (ch->ch_flags & CH_FORCED_STOPI) dgnc_input()
581 ch->ch_r_head = ch->ch_r_tail; dgnc_input()
626 if (*(ch->ch_equeue + tail + i) & UART_LSR_BI) dgnc_input()
627 tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_BREAK); dgnc_input()
628 else if (*(ch->ch_equeue + tail + i) & UART_LSR_PE) dgnc_input()
629 tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_PARITY); dgnc_input()
630 else if (*(ch->ch_equeue + tail + i) & UART_LSR_FE) dgnc_input()
631 tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_FRAME); dgnc_input()
633 tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_NORMAL); dgnc_input()
636 tty_insert_flip_string(tp->port, ch->ch_rqueue + tail, s); dgnc_input()
645 ch->ch_r_tail = tail & rmask; dgnc_input()
646 ch->ch_e_tail = tail & rmask; dgnc_input()
647 dgnc_check_queue_flow_control(ch); dgnc_input()
648 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_input()
658 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_input()
668 void dgnc_carrier(struct channel_t *ch) dgnc_carrier() argument
675 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_carrier()
678 bd = ch->ch_bd; dgnc_carrier()
683 if (ch->ch_mistat & UART_MSR_DCD) dgnc_carrier()
686 if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) dgnc_carrier()
689 if (ch->ch_c_cflag & CLOCAL) dgnc_carrier()
695 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { dgnc_carrier()
702 if (waitqueue_active(&(ch->ch_flags_wait))) dgnc_carrier()
703 wake_up_interruptible(&ch->ch_flags_wait); dgnc_carrier()
709 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { dgnc_carrier()
716 if (waitqueue_active(&(ch->ch_flags_wait))) dgnc_carrier()
717 wake_up_interruptible(&ch->ch_flags_wait); dgnc_carrier()
729 if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) && dgnc_carrier()
744 if (waitqueue_active(&(ch->ch_flags_wait))) dgnc_carrier()
745 wake_up_interruptible(&ch->ch_flags_wait); dgnc_carrier()
747 if (ch->ch_tun.un_open_count > 0) dgnc_carrier()
748 tty_hangup(ch->ch_tun.un_tty); dgnc_carrier()
750 if (ch->ch_pun.un_open_count > 0) dgnc_carrier()
751 tty_hangup(ch->ch_pun.un_tty); dgnc_carrier()
758 ch->ch_flags |= CH_FCAR; dgnc_carrier()
760 ch->ch_flags &= ~CH_FCAR; dgnc_carrier()
763 ch->ch_flags |= CH_CD; dgnc_carrier()
765 ch->ch_flags &= ~CH_CD; dgnc_carrier()
771 static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate) dgnc_set_custom_speed() argument
780 ch->ch_custom_speed = 0; dgnc_set_custom_speed()
789 if (newrate && newrate < ((ch->ch_bd->bd_dividend / 0xFFFF) + 1)) dgnc_set_custom_speed()
790 newrate = ((ch->ch_bd->bd_dividend / 0xFFFF) + 1); dgnc_set_custom_speed()
792 if (newrate && newrate > ch->ch_bd->bd_dividend) dgnc_set_custom_speed()
793 newrate = ch->ch_bd->bd_dividend; dgnc_set_custom_speed()
796 testdiv = ch->ch_bd->bd_dividend / newrate; dgnc_set_custom_speed()
805 testrate_high = ch->ch_bd->bd_dividend / testdiv; dgnc_set_custom_speed()
806 testrate_low = ch->ch_bd->bd_dividend / (testdiv + 1); dgnc_set_custom_speed()
827 ch->ch_custom_speed = newrate; dgnc_set_custom_speed()
831 void dgnc_check_queue_flow_control(struct channel_t *ch) dgnc_check_queue_flow_control() argument
836 qleft = ch->ch_r_tail - ch->ch_r_head - 1; dgnc_check_queue_flow_control()
857 if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) { dgnc_check_queue_flow_control()
858 if (!(ch->ch_flags & CH_RECEIVER_OFF)) { dgnc_check_queue_flow_control()
859 ch->ch_bd->bd_ops->disable_receiver(ch); dgnc_check_queue_flow_control()
860 ch->ch_flags |= (CH_RECEIVER_OFF); dgnc_check_queue_flow_control()
864 else if (ch->ch_c_iflag & IXOFF) { dgnc_check_queue_flow_control()
865 if (ch->ch_stops_sent <= MAX_STOPS_SENT) { dgnc_check_queue_flow_control()
866 ch->ch_bd->bd_ops->send_stop_character(ch); dgnc_check_queue_flow_control()
867 ch->ch_stops_sent++; dgnc_check_queue_flow_control()
889 if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) { dgnc_check_queue_flow_control()
890 if (ch->ch_flags & CH_RECEIVER_OFF) { dgnc_check_queue_flow_control()
891 ch->ch_bd->bd_ops->enable_receiver(ch); dgnc_check_queue_flow_control()
892 ch->ch_flags &= ~(CH_RECEIVER_OFF); dgnc_check_queue_flow_control()
896 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) { dgnc_check_queue_flow_control()
897 ch->ch_stops_sent = 0; dgnc_check_queue_flow_control()
898 ch->ch_bd->bd_ops->send_start_character(ch); dgnc_check_queue_flow_control()
908 void dgnc_wakeup_writes(struct channel_t *ch) dgnc_wakeup_writes() argument
913 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_wakeup_writes()
916 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_wakeup_writes()
921 qlen = ch->ch_w_head - ch->ch_w_tail; dgnc_wakeup_writes()
926 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_wakeup_writes()
930 if (ch->ch_tun.un_flags & UN_ISOPEN) { dgnc_wakeup_writes()
931 if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && dgnc_wakeup_writes()
932 ch->ch_tun.un_tty->ldisc->ops->write_wakeup) { dgnc_wakeup_writes()
933 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_wakeup_writes()
934 (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty); dgnc_wakeup_writes()
935 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_wakeup_writes()
938 wake_up_interruptible(&ch->ch_tun.un_tty->write_wait); dgnc_wakeup_writes()
944 if (ch->ch_tun.un_flags & UN_EMPTY) { dgnc_wakeup_writes()
945 if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) { dgnc_wakeup_writes()
946 ch->ch_tun.un_flags &= ~(UN_EMPTY); dgnc_wakeup_writes()
952 if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) { dgnc_wakeup_writes()
953 ch->ch_mostat &= ~(UART_MCR_RTS); dgnc_wakeup_writes()
954 ch->ch_bd->bd_ops->assert_modem_signals(ch); dgnc_wakeup_writes()
961 if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) { dgnc_wakeup_writes()
962 ch->ch_mostat &= ~(UART_MCR_DTR); dgnc_wakeup_writes()
963 ch->ch_bd->bd_ops->assert_modem_signals(ch); dgnc_wakeup_writes()
968 wake_up_interruptible(&ch->ch_tun.un_flags_wait); dgnc_wakeup_writes()
971 if (ch->ch_pun.un_flags & UN_ISOPEN) { dgnc_wakeup_writes()
972 if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && dgnc_wakeup_writes()
973 ch->ch_pun.un_tty->ldisc->ops->write_wakeup) { dgnc_wakeup_writes()
974 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_wakeup_writes()
975 (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty); dgnc_wakeup_writes()
976 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_wakeup_writes()
979 wake_up_interruptible(&ch->ch_pun.un_tty->write_wait); dgnc_wakeup_writes()
985 if (ch->ch_pun.un_flags & UN_EMPTY) { dgnc_wakeup_writes()
986 if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) dgnc_wakeup_writes()
987 ch->ch_pun.un_flags &= ~(UN_EMPTY); dgnc_wakeup_writes()
990 wake_up_interruptible(&ch->ch_pun.un_flags_wait); dgnc_wakeup_writes()
993 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_wakeup_writes()
1011 struct channel_t *ch; dgnc_tty_open() local
1049 ch = brd->channels[PORT_NUM(minor)]; dgnc_tty_open()
1050 if (!ch) { dgnc_tty_open()
1059 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_open()
1069 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_open()
1078 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_open()
1080 rc = wait_event_interruptible(ch->ch_flags_wait, ((ch->ch_flags & CH_OPENING) == 0)); dgnc_tty_open()
1093 rc = wait_event_interruptible(ch->ch_flags_wait, dgnc_tty_open()
1094 (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING) == 0)); dgnc_tty_open()
1100 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_open()
1122 ch->ch_flags |= (CH_OPENING); dgnc_tty_open()
1125 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_open()
1127 if (!ch->ch_rqueue) dgnc_tty_open()
1128 ch->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL); dgnc_tty_open()
1129 if (!ch->ch_equeue) dgnc_tty_open()
1130 ch->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL); dgnc_tty_open()
1131 if (!ch->ch_wqueue) dgnc_tty_open()
1132 ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL); dgnc_tty_open()
1134 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_open()
1136 ch->ch_flags &= ~(CH_OPENING); dgnc_tty_open()
1137 wake_up_interruptible(&ch->ch_flags_wait); dgnc_tty_open()
1142 if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) { dgnc_tty_open()
1147 ch->ch_r_head = 0; dgnc_tty_open()
1148 ch->ch_r_tail = 0; dgnc_tty_open()
1149 ch->ch_e_head = 0; dgnc_tty_open()
1150 ch->ch_e_tail = 0; dgnc_tty_open()
1151 ch->ch_w_head = 0; dgnc_tty_open()
1152 ch->ch_w_tail = 0; dgnc_tty_open()
1154 brd->bd_ops->flush_uart_write(ch); dgnc_tty_open()
1155 brd->bd_ops->flush_uart_read(ch); dgnc_tty_open()
1157 ch->ch_flags = 0; dgnc_tty_open()
1158 ch->ch_cached_lsr = 0; dgnc_tty_open()
1159 ch->ch_stop_sending_break = 0; dgnc_tty_open()
1160 ch->ch_stops_sent = 0; dgnc_tty_open()
1162 ch->ch_c_cflag = tty->termios.c_cflag; dgnc_tty_open()
1163 ch->ch_c_iflag = tty->termios.c_iflag; dgnc_tty_open()
1164 ch->ch_c_oflag = tty->termios.c_oflag; dgnc_tty_open()
1165 ch->ch_c_lflag = tty->termios.c_lflag; dgnc_tty_open()
1166 ch->ch_startc = tty->termios.c_cc[VSTART]; dgnc_tty_open()
1167 ch->ch_stopc = tty->termios.c_cc[VSTOP]; dgnc_tty_open()
1173 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)) dgnc_tty_open()
1174 ch->ch_mostat |= (UART_MCR_RTS); dgnc_tty_open()
1175 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)) dgnc_tty_open()
1176 ch->ch_mostat |= (UART_MCR_DTR); dgnc_tty_open()
1179 brd->bd_ops->uart_init(ch); dgnc_tty_open()
1187 dgnc_carrier(ch); dgnc_tty_open()
1193 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_open()
1195 rc = dgnc_block_til_ready(tty, file, ch); dgnc_tty_open()
1198 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_open()
1199 ch->ch_open_count++; dgnc_tty_open()
1202 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_open()
1213 static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch) dgnc_block_til_ready() argument
1221 if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_block_til_ready()
1228 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_block_til_ready()
1230 ch->ch_wopen++; dgnc_block_til_ready()
1240 if (ch->ch_bd->state == BOARD_FAILED) { dgnc_block_til_ready()
1258 if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) { dgnc_block_til_ready()
1275 if (ch->ch_flags & CH_CD) dgnc_block_til_ready()
1278 if (ch->ch_flags & CH_FCAR) dgnc_block_til_ready()
1298 old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags; dgnc_block_til_ready()
1300 old_flags = ch->ch_flags; dgnc_block_til_ready()
1308 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_block_til_ready()
1315 (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags))); dgnc_block_til_ready()
1317 retval = wait_event_interruptible(ch->ch_flags_wait, dgnc_block_til_ready()
1318 (old_flags != ch->ch_flags)); dgnc_block_til_ready()
1324 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_block_til_ready()
1327 ch->ch_wopen--; dgnc_block_til_ready()
1329 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_block_til_ready()
1368 struct channel_t *ch; dgnc_tty_close() local
1380 ch = un->un_ch; dgnc_tty_close()
1381 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_close()
1384 bd = ch->ch_bd; dgnc_tty_close()
1390 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_close()
1417 ch->ch_open_count--; dgnc_tty_close()
1419 if (ch->ch_open_count && un->un_open_count) { dgnc_tty_close()
1420 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_close()
1434 if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) { dgnc_tty_close()
1436 ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI); dgnc_tty_close()
1441 if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { dgnc_tty_close()
1442 dgnc_wmove(ch, ch->ch_digi.digi_offstr, dgnc_tty_close()
1443 (int) ch->ch_digi.digi_offlen); dgnc_tty_close()
1444 ch->ch_flags &= ~CH_PRON; dgnc_tty_close()
1447 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_close()
1456 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_close()
1463 if (ch->ch_c_cflag & HUPCL) { dgnc_tty_close()
1466 ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS); dgnc_tty_close()
1467 bd->bd_ops->assert_modem_signals(ch); dgnc_tty_close()
1473 if (ch->ch_close_delay) { dgnc_tty_close()
1474 spin_unlock_irqrestore(&ch->ch_lock, dgnc_tty_close()
1476 dgnc_ms_sleep(ch->ch_close_delay); dgnc_tty_close()
1477 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_close()
1481 ch->ch_old_baud = 0; dgnc_tty_close()
1484 ch->ch_bd->bd_ops->uart_off(ch); dgnc_tty_close()
1489 if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { dgnc_tty_close()
1490 dgnc_wmove(ch, ch->ch_digi.digi_offstr, dgnc_tty_close()
1491 (int) ch->ch_digi.digi_offlen); dgnc_tty_close()
1492 ch->ch_flags &= ~CH_PRON; dgnc_tty_close()
1499 wake_up_interruptible(&ch->ch_flags_wait); dgnc_tty_close()
1502 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_close()
1516 struct channel_t *ch = NULL; dgnc_tty_chars_in_buffer() local
1531 ch = un->un_ch; dgnc_tty_chars_in_buffer()
1532 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_chars_in_buffer()
1535 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_chars_in_buffer()
1538 thead = ch->ch_w_head & tmask; dgnc_tty_chars_in_buffer()
1539 ttail = ch->ch_w_tail & tmask; dgnc_tty_chars_in_buffer()
1541 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_chars_in_buffer()
1566 struct channel_t *ch = NULL; dgnc_maxcps_room() local
1576 ch = un->un_ch; dgnc_maxcps_room()
1577 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_maxcps_room()
1587 if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) { dgnc_maxcps_room()
1591 (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps; dgnc_maxcps_room()
1593 if (ch->ch_cpstime < current_time) { dgnc_maxcps_room()
1595 ch->ch_cpstime = current_time; /* reset ch_cpstime */ dgnc_maxcps_room()
1596 cps_limit = ch->ch_digi.digi_bufsize; dgnc_maxcps_room()
1597 } else if (ch->ch_cpstime < buffer_time) { dgnc_maxcps_room()
1599 cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ; dgnc_maxcps_room()
1619 struct channel_t *ch = NULL; dgnc_tty_write_room() local
1634 ch = un->un_ch; dgnc_tty_write_room()
1635 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_write_room()
1638 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_write_room()
1641 head = (ch->ch_w_head) & tmask; dgnc_tty_write_room()
1642 tail = (ch->ch_w_tail) & tmask; dgnc_tty_write_room()
1656 if (!(ch->ch_flags & CH_PRON)) dgnc_tty_write_room()
1657 ret -= ch->ch_digi.digi_onlen; dgnc_tty_write_room()
1658 ret -= ch->ch_digi.digi_offlen; dgnc_tty_write_room()
1660 if (ch->ch_flags & CH_PRON) dgnc_tty_write_room()
1661 ret -= ch->ch_digi.digi_offlen; dgnc_tty_write_room()
1667 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_write_room()
1676 * Put a character into ch->ch_buf
1699 struct channel_t *ch = NULL; dgnc_tty_write() local
1717 ch = un->un_ch; dgnc_tty_write()
1718 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_write()
1731 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_write()
1735 head = (ch->ch_w_head) & tmask; dgnc_tty_write()
1736 tail = (ch->ch_w_tail) & tmask; dgnc_tty_write()
1764 if ((un->un_type == DGNC_PRINT) && !(ch->ch_flags & CH_PRON)) { dgnc_tty_write()
1765 dgnc_wmove(ch, ch->ch_digi.digi_onstr, dgnc_tty_write()
1766 (int) ch->ch_digi.digi_onlen); dgnc_tty_write()
1767 head = (ch->ch_w_head) & tmask; dgnc_tty_write()
1768 ch->ch_flags |= CH_PRON; dgnc_tty_write()
1775 if ((un->un_type != DGNC_PRINT) && (ch->ch_flags & CH_PRON)) { dgnc_tty_write()
1776 dgnc_wmove(ch, ch->ch_digi.digi_offstr, dgnc_tty_write()
1777 (int) ch->ch_digi.digi_offlen); dgnc_tty_write()
1778 head = (ch->ch_w_head) & tmask; dgnc_tty_write()
1779 ch->ch_flags &= ~CH_PRON; dgnc_tty_write()
1792 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_write()
1814 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_write()
1831 memcpy(ch->ch_wqueue + head, buf, remain); dgnc_tty_write()
1841 memcpy(ch->ch_wqueue + head, buf, remain); dgnc_tty_write()
1847 ch->ch_w_head = head; dgnc_tty_write()
1851 if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0) dgnc_tty_write()
1852 && (ch->ch_digi.digi_bufsize > 0)) { dgnc_tty_write()
1853 ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps; dgnc_tty_write()
1857 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_write()
1860 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_write()
1868 ch->ch_bd->bd_ops->copy_data_from_queue_to_uart(ch); dgnc_tty_write()
1875 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_write()
1886 struct channel_t *ch; dgnc_tty_tiocmget() local
1899 ch = un->un_ch; dgnc_tty_tiocmget()
1900 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_tiocmget()
1903 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_tiocmget()
1905 mstat = (ch->ch_mostat | ch->ch_mistat); dgnc_tty_tiocmget()
1907 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_tiocmget()
1938 struct channel_t *ch; dgnc_tty_tiocmset() local
1950 ch = un->un_ch; dgnc_tty_tiocmset()
1951 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_tiocmset()
1954 bd = ch->ch_bd; dgnc_tty_tiocmset()
1958 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_tiocmset()
1961 ch->ch_mostat |= UART_MCR_RTS; dgnc_tty_tiocmset()
1964 ch->ch_mostat |= UART_MCR_DTR; dgnc_tty_tiocmset()
1967 ch->ch_mostat &= ~(UART_MCR_RTS); dgnc_tty_tiocmset()
1970 ch->ch_mostat &= ~(UART_MCR_DTR); dgnc_tty_tiocmset()
1972 ch->ch_bd->bd_ops->assert_modem_signals(ch); dgnc_tty_tiocmset()
1974 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_tiocmset()
1988 struct channel_t *ch; dgnc_tty_send_break() local
2000 ch = un->un_ch; dgnc_tty_send_break()
2001 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_send_break()
2004 bd = ch->ch_bd; dgnc_tty_send_break()
2019 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_send_break()
2021 ch->ch_bd->bd_ops->send_break(ch, msec); dgnc_tty_send_break()
2023 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_send_break()
2038 struct channel_t *ch; dgnc_tty_wait_until_sent() local
2049 ch = un->un_ch; dgnc_tty_wait_until_sent()
2050 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_wait_until_sent()
2053 bd = ch->ch_bd; dgnc_tty_wait_until_sent()
2069 struct channel_t *ch; dgnc_tty_send_xchar() local
2080 ch = un->un_ch; dgnc_tty_send_xchar()
2081 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_send_xchar()
2084 bd = ch->ch_bd; dgnc_tty_send_xchar()
2090 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_send_xchar()
2091 bd->bd_ops->send_immediate_char(ch, c); dgnc_tty_send_xchar()
2092 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_send_xchar()
2103 static inline int dgnc_get_mstat(struct channel_t *ch) dgnc_get_mstat() argument
2109 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_get_mstat()
2112 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_get_mstat()
2114 mstat = (ch->ch_mostat | ch->ch_mistat); dgnc_get_mstat()
2116 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_get_mstat()
2141 static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value) dgnc_get_modem_info() argument
2145 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_get_modem_info()
2148 result = dgnc_get_mstat(ch); dgnc_get_modem_info()
2165 struct channel_t *ch; dgnc_set_modem_info() local
2178 ch = un->un_ch; dgnc_set_modem_info()
2179 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_set_modem_info()
2182 bd = ch->ch_bd; dgnc_set_modem_info()
2193 ch->ch_mostat |= UART_MCR_RTS; dgnc_set_modem_info()
2196 ch->ch_mostat |= UART_MCR_DTR; dgnc_set_modem_info()
2202 ch->ch_mostat &= ~(UART_MCR_RTS); dgnc_set_modem_info()
2205 ch->ch_mostat &= ~(UART_MCR_DTR); dgnc_set_modem_info()
2212 ch->ch_mostat |= UART_MCR_RTS; dgnc_set_modem_info()
2214 ch->ch_mostat &= ~(UART_MCR_RTS); dgnc_set_modem_info()
2217 ch->ch_mostat |= UART_MCR_DTR; dgnc_set_modem_info()
2219 ch->ch_mostat &= ~(UART_MCR_DTR); dgnc_set_modem_info()
2227 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_set_modem_info()
2229 ch->ch_bd->bd_ops->assert_modem_signals(ch); dgnc_set_modem_info()
2231 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_set_modem_info()
2247 struct channel_t *ch; dgnc_tty_digigeta() local
2262 ch = un->un_ch; dgnc_tty_digigeta()
2263 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_digigeta()
2268 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_digigeta()
2269 memcpy(&tmp, &ch->ch_digi, sizeof(tmp)); dgnc_tty_digigeta()
2270 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_digigeta()
2290 struct channel_t *ch; dgnc_tty_digiseta() local
2302 ch = un->un_ch; dgnc_tty_digiseta()
2303 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_digiseta()
2306 bd = ch->ch_bd; dgnc_tty_digiseta()
2313 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_digiseta()
2318 if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && (new_digi.digi_flags & DIGI_RTS_TOGGLE)) dgnc_tty_digiseta()
2319 ch->ch_mostat &= ~(UART_MCR_RTS); dgnc_tty_digiseta()
2320 if ((ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && !(new_digi.digi_flags & DIGI_RTS_TOGGLE)) dgnc_tty_digiseta()
2321 ch->ch_mostat |= (UART_MCR_RTS); dgnc_tty_digiseta()
2326 if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && (new_digi.digi_flags & DIGI_DTR_TOGGLE)) dgnc_tty_digiseta()
2327 ch->ch_mostat &= ~(UART_MCR_DTR); dgnc_tty_digiseta()
2328 if ((ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && !(new_digi.digi_flags & DIGI_DTR_TOGGLE)) dgnc_tty_digiseta()
2329 ch->ch_mostat |= (UART_MCR_DTR); dgnc_tty_digiseta()
2331 memcpy(&ch->ch_digi, &new_digi, sizeof(new_digi)); dgnc_tty_digiseta()
2333 if (ch->ch_digi.digi_maxcps < 1) dgnc_tty_digiseta()
2334 ch->ch_digi.digi_maxcps = 1; dgnc_tty_digiseta()
2336 if (ch->ch_digi.digi_maxcps > 10000) dgnc_tty_digiseta()
2337 ch->ch_digi.digi_maxcps = 10000; dgnc_tty_digiseta()
2339 if (ch->ch_digi.digi_bufsize < 10) dgnc_tty_digiseta()
2340 ch->ch_digi.digi_bufsize = 10; dgnc_tty_digiseta()
2342 if (ch->ch_digi.digi_maxchar < 1) dgnc_tty_digiseta()
2343 ch->ch_digi.digi_maxchar = 1; dgnc_tty_digiseta()
2345 if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize) dgnc_tty_digiseta()
2346 ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize; dgnc_tty_digiseta()
2348 if (ch->ch_digi.digi_onlen > DIGI_PLEN) dgnc_tty_digiseta()
2349 ch->ch_digi.digi_onlen = DIGI_PLEN; dgnc_tty_digiseta()
2351 if (ch->ch_digi.digi_offlen > DIGI_PLEN) dgnc_tty_digiseta()
2352 ch->ch_digi.digi_offlen = DIGI_PLEN; dgnc_tty_digiseta()
2354 ch->ch_bd->bd_ops->param(tty); dgnc_tty_digiseta()
2356 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_digiseta()
2368 struct channel_t *ch; dgnc_tty_set_termios() local
2379 ch = un->un_ch; dgnc_tty_set_termios()
2380 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_set_termios()
2383 bd = ch->ch_bd; dgnc_tty_set_termios()
2387 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_set_termios()
2389 ch->ch_c_cflag = tty->termios.c_cflag; dgnc_tty_set_termios()
2390 ch->ch_c_iflag = tty->termios.c_iflag; dgnc_tty_set_termios()
2391 ch->ch_c_oflag = tty->termios.c_oflag; dgnc_tty_set_termios()
2392 ch->ch_c_lflag = tty->termios.c_lflag; dgnc_tty_set_termios()
2393 ch->ch_startc = tty->termios.c_cc[VSTART]; dgnc_tty_set_termios()
2394 ch->ch_stopc = tty->termios.c_cc[VSTOP]; dgnc_tty_set_termios()
2396 ch->ch_bd->bd_ops->param(tty); dgnc_tty_set_termios()
2397 dgnc_carrier(ch); dgnc_tty_set_termios()
2399 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_set_termios()
2405 struct channel_t *ch; dgnc_tty_throttle() local
2416 ch = un->un_ch; dgnc_tty_throttle()
2417 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_throttle()
2420 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_throttle()
2422 ch->ch_flags |= (CH_FORCED_STOPI); dgnc_tty_throttle()
2424 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_throttle()
2430 struct channel_t *ch; dgnc_tty_unthrottle() local
2441 ch = un->un_ch; dgnc_tty_unthrottle()
2442 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_unthrottle()
2445 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_unthrottle()
2447 ch->ch_flags &= ~(CH_FORCED_STOPI); dgnc_tty_unthrottle()
2449 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_unthrottle()
2456 struct channel_t *ch; dgnc_tty_start() local
2467 ch = un->un_ch; dgnc_tty_start()
2468 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_start()
2471 bd = ch->ch_bd; dgnc_tty_start()
2475 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_start()
2477 ch->ch_flags &= ~(CH_FORCED_STOP); dgnc_tty_start()
2479 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_start()
2486 struct channel_t *ch; dgnc_tty_stop() local
2497 ch = un->un_ch; dgnc_tty_stop()
2498 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_stop()
2501 bd = ch->ch_bd; dgnc_tty_stop()
2505 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_stop()
2507 ch->ch_flags |= (CH_FORCED_STOP); dgnc_tty_stop()
2509 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_stop()
2529 struct channel_t *ch; dgnc_tty_flush_chars() local
2540 ch = un->un_ch; dgnc_tty_flush_chars()
2541 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_flush_chars()
2544 bd = ch->ch_bd; dgnc_tty_flush_chars()
2548 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_flush_chars()
2552 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_flush_chars()
2564 struct channel_t *ch; dgnc_tty_flush_buffer() local
2575 ch = un->un_ch; dgnc_tty_flush_buffer()
2576 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_flush_buffer()
2579 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_flush_buffer()
2581 ch->ch_flags &= ~CH_STOP; dgnc_tty_flush_buffer()
2584 ch->ch_w_head = ch->ch_w_tail; dgnc_tty_flush_buffer()
2587 ch->ch_bd->bd_ops->flush_uart_write(ch); dgnc_tty_flush_buffer()
2589 if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { dgnc_tty_flush_buffer()
2590 ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY); dgnc_tty_flush_buffer()
2591 wake_up_interruptible(&ch->ch_tun.un_flags_wait); dgnc_tty_flush_buffer()
2593 if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { dgnc_tty_flush_buffer()
2594 ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY); dgnc_tty_flush_buffer()
2595 wake_up_interruptible(&ch->ch_pun.un_flags_wait); dgnc_tty_flush_buffer()
2598 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_flush_buffer()
2618 struct channel_t *ch; dgnc_tty_ioctl() local
2631 ch = un->un_ch; dgnc_tty_ioctl()
2632 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_ioctl()
2635 bd = ch->ch_bd; dgnc_tty_ioctl()
2639 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2642 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2660 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2664 rc = ch->ch_bd->bd_ops->drain(tty, 0); dgnc_tty_ioctl()
2669 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2672 ch->ch_bd->bd_ops->send_break(ch, 250); dgnc_tty_ioctl()
2674 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2686 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2690 rc = ch->ch_bd->bd_ops->drain(tty, 0); dgnc_tty_ioctl()
2694 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2696 ch->ch_bd->bd_ops->send_break(ch, 250); dgnc_tty_ioctl()
2698 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2704 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2708 rc = ch->ch_bd->bd_ops->drain(tty, 0); dgnc_tty_ioctl()
2712 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2714 ch->ch_bd->bd_ops->send_break(ch, 250); dgnc_tty_ioctl()
2716 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2722 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2727 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2734 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2739 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2741 ch->ch_bd->bd_ops->param(tty); dgnc_tty_ioctl()
2742 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2747 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2748 return dgnc_get_modem_info(ch, uarg); dgnc_tty_ioctl()
2753 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2772 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2777 ch->ch_r_head = ch->ch_r_tail; dgnc_tty_ioctl()
2778 ch->ch_bd->bd_ops->flush_uart_read(ch); dgnc_tty_ioctl()
2780 dgnc_check_queue_flow_control(ch); dgnc_tty_ioctl()
2785 ch->ch_w_head = ch->ch_w_tail; dgnc_tty_ioctl()
2786 ch->ch_bd->bd_ops->flush_uart_write(ch); dgnc_tty_ioctl()
2788 if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { dgnc_tty_ioctl()
2789 ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY); dgnc_tty_ioctl()
2790 wake_up_interruptible(&ch->ch_tun.un_flags_wait); dgnc_tty_ioctl()
2793 if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { dgnc_tty_ioctl()
2794 ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY); dgnc_tty_ioctl()
2795 wake_up_interruptible(&ch->ch_pun.un_flags_wait); dgnc_tty_ioctl()
2802 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2817 ch->ch_flags &= ~CH_STOP; dgnc_tty_ioctl()
2818 ch->ch_r_head = ch->ch_r_tail; dgnc_tty_ioctl()
2819 ch->ch_bd->bd_ops->flush_uart_read(ch); dgnc_tty_ioctl()
2821 dgnc_check_queue_flow_control(ch); dgnc_tty_ioctl()
2825 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2826 rc = ch->ch_bd->bd_ops->drain(tty, 0); dgnc_tty_ioctl()
2835 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2836 rc = ch->ch_bd->bd_ops->drain(tty, 0); dgnc_tty_ioctl()
2844 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2850 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2859 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2860 rc = ch->ch_bd->bd_ops->drain(tty, 0); dgnc_tty_ioctl()
2865 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2872 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2879 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2883 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2887 ch->ch_flags |= CH_LOOPBACK; dgnc_tty_ioctl()
2889 ch->ch_flags &= ~(CH_LOOPBACK); dgnc_tty_ioctl()
2891 ch->ch_bd->bd_ops->param(tty); dgnc_tty_ioctl()
2892 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2897 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2898 rc = put_user(ch->ch_custom_speed, (unsigned int __user *) arg); dgnc_tty_ioctl()
2905 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2909 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2910 dgnc_set_custom_speed(ch, new_rate); dgnc_tty_ioctl()
2911 ch->ch_bd->bd_ops->param(tty); dgnc_tty_ioctl()
2912 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2927 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2931 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
2932 ch->ch_bd->bd_ops->send_immediate_char(ch, c); dgnc_tty_ioctl()
2933 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2947 buf.norun = ch->ch_err_overrun; dgnc_tty_ioctl()
2949 buf.nframe = ch->ch_err_frame; dgnc_tty_ioctl()
2950 buf.nparity = ch->ch_err_parity; dgnc_tty_ioctl()
2951 buf.nbreak = ch->ch_err_break; dgnc_tty_ioctl()
2952 buf.rbytes = ch->ch_rxcount; dgnc_tty_ioctl()
2953 buf.tbytes = ch->ch_txcount; dgnc_tty_ioctl()
2955 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2974 if (ch->ch_flags & CH_BREAK_SENDING) dgnc_tty_ioctl()
2976 if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP)) dgnc_tty_ioctl()
2979 if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI)) dgnc_tty_ioctl()
2982 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
2999 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
3007 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_tty_ioctl()
3012 buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK; dgnc_tty_ioctl()
3013 buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK; dgnc_tty_ioctl()
3018 count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch); dgnc_tty_ioctl()
3045 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
3053 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_tty_ioctl()
H A Ddgnc_mgmt.c167 struct channel_t *ch; dgnc_mgmt_ioctl() local
187 ch = dgnc_Board[board]->channels[channel]; dgnc_mgmt_ioctl()
189 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_mgmt_ioctl()
196 spin_lock_irqsave(&ch->ch_lock, flags); dgnc_mgmt_ioctl()
198 mstat = (ch->ch_mostat | ch->ch_mistat); dgnc_mgmt_ioctl()
223 ni.iflag = ch->ch_c_iflag; dgnc_mgmt_ioctl()
224 ni.oflag = ch->ch_c_oflag; dgnc_mgmt_ioctl()
225 ni.cflag = ch->ch_c_cflag; dgnc_mgmt_ioctl()
226 ni.lflag = ch->ch_c_lflag; dgnc_mgmt_ioctl()
228 if (ch->ch_digi.digi_flags & CTSPACE || dgnc_mgmt_ioctl()
229 ch->ch_c_cflag & CRTSCTS) dgnc_mgmt_ioctl()
234 if ((ch->ch_flags & CH_STOPI) || dgnc_mgmt_ioctl()
235 (ch->ch_flags & CH_FORCED_STOPI)) dgnc_mgmt_ioctl()
240 if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP)) dgnc_mgmt_ioctl()
245 ni.curtx = ch->ch_txcount; dgnc_mgmt_ioctl()
246 ni.currx = ch->ch_rxcount; dgnc_mgmt_ioctl()
248 ni.baud = ch->ch_old_baud; dgnc_mgmt_ioctl()
250 spin_unlock_irqrestore(&ch->ch_lock, flags); dgnc_mgmt_ioctl()
H A Ddgnc_sysfs.c372 struct channel_t *ch; dgnc_tty_state_show() local
380 ch = un->un_ch; dgnc_tty_state_show()
381 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_state_show()
383 bd = ch->ch_bd; dgnc_tty_state_show()
397 struct channel_t *ch; dgnc_tty_baud_show() local
405 ch = un->un_ch; dgnc_tty_baud_show()
406 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_baud_show()
408 bd = ch->ch_bd; dgnc_tty_baud_show()
414 return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud); dgnc_tty_baud_show()
422 struct channel_t *ch; dgnc_tty_msignals_show() local
430 ch = un->un_ch; dgnc_tty_msignals_show()
431 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_msignals_show()
433 bd = ch->ch_bd; dgnc_tty_msignals_show()
439 if (ch->ch_open_count) { dgnc_tty_msignals_show()
441 (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "", dgnc_tty_msignals_show()
442 (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "", dgnc_tty_msignals_show()
443 (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "", dgnc_tty_msignals_show()
444 (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "", dgnc_tty_msignals_show()
445 (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "", dgnc_tty_msignals_show()
446 (ch->ch_mistat & UART_MSR_RI) ? "RI" : ""); dgnc_tty_msignals_show()
456 struct channel_t *ch; dgnc_tty_iflag_show() local
464 ch = un->un_ch; dgnc_tty_iflag_show()
465 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_iflag_show()
467 bd = ch->ch_bd; dgnc_tty_iflag_show()
473 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag); dgnc_tty_iflag_show()
481 struct channel_t *ch; dgnc_tty_cflag_show() local
489 ch = un->un_ch; dgnc_tty_cflag_show()
490 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_cflag_show()
492 bd = ch->ch_bd; dgnc_tty_cflag_show()
498 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag); dgnc_tty_cflag_show()
506 struct channel_t *ch; dgnc_tty_oflag_show() local
514 ch = un->un_ch; dgnc_tty_oflag_show()
515 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_oflag_show()
517 bd = ch->ch_bd; dgnc_tty_oflag_show()
523 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag); dgnc_tty_oflag_show()
531 struct channel_t *ch; dgnc_tty_lflag_show() local
539 ch = un->un_ch; dgnc_tty_lflag_show()
540 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_lflag_show()
542 bd = ch->ch_bd; dgnc_tty_lflag_show()
548 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag); dgnc_tty_lflag_show()
556 struct channel_t *ch; dgnc_tty_digi_flag_show() local
564 ch = un->un_ch; dgnc_tty_digi_flag_show()
565 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_digi_flag_show()
567 bd = ch->ch_bd; dgnc_tty_digi_flag_show()
573 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags); dgnc_tty_digi_flag_show()
581 struct channel_t *ch; dgnc_tty_rxcount_show() local
589 ch = un->un_ch; dgnc_tty_rxcount_show()
590 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_rxcount_show()
592 bd = ch->ch_bd; dgnc_tty_rxcount_show()
598 return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount); dgnc_tty_rxcount_show()
606 struct channel_t *ch; dgnc_tty_txcount_show() local
614 ch = un->un_ch; dgnc_tty_txcount_show()
615 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_txcount_show()
617 bd = ch->ch_bd; dgnc_tty_txcount_show()
623 return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount); dgnc_tty_txcount_show()
631 struct channel_t *ch; dgnc_tty_name_show() local
639 ch = un->un_ch; dgnc_tty_name_show()
640 if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) dgnc_tty_name_show()
642 bd = ch->ch_bd; dgnc_tty_name_show()
650 bd->boardnum + 1, 'a' + ch->ch_portnum); dgnc_tty_name_show()
H A Ddgnc_tty.h29 void dgnc_input(struct channel_t *ch);
30 void dgnc_carrier(struct channel_t *ch);
31 void dgnc_wakeup_writes(struct channel_t *ch);
32 void dgnc_check_queue_flow_control(struct channel_t *ch);
/linux-4.1.27/arch/mips/include/asm/mach-rc32434/
H A Ddma_v.h25 static inline int rc32434_halt_dma(struct dma_reg *ch) rc32434_halt_dma() argument
28 if (__raw_readl(&ch->dmac) & DMA_CHAN_RUN_BIT) { rc32434_halt_dma()
29 __raw_writel(0, &ch->dmac); rc32434_halt_dma()
31 if (__raw_readl(&ch->dmas) & DMA_STAT_HALT) { rc32434_halt_dma()
32 __raw_writel(0, &ch->dmas); rc32434_halt_dma()
41 static inline void rc32434_start_dma(struct dma_reg *ch, u32 dma_addr) rc32434_start_dma() argument
43 __raw_writel(0, &ch->dmandptr); rc32434_start_dma()
44 __raw_writel(dma_addr, &ch->dmadptr); rc32434_start_dma()
47 static inline void rc32434_chain_dma(struct dma_reg *ch, u32 dma_addr) rc32434_chain_dma() argument
49 __raw_writel(dma_addr, &ch->dmandptr); rc32434_chain_dma()
/linux-4.1.27/drivers/clocksource/
H A Dsh_tmu.c88 static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) sh_tmu_read() argument
93 switch (ch->tmu->model) { sh_tmu_read()
95 return ioread8(ch->tmu->mapbase + 2); sh_tmu_read()
97 return ioread8(ch->tmu->mapbase + 4); sh_tmu_read()
104 return ioread16(ch->base + offs); sh_tmu_read()
106 return ioread32(ch->base + offs); sh_tmu_read()
109 static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, sh_tmu_write() argument
115 switch (ch->tmu->model) { sh_tmu_write()
117 return iowrite8(value, ch->tmu->mapbase + 2); sh_tmu_write()
119 return iowrite8(value, ch->tmu->mapbase + 4); sh_tmu_write()
126 iowrite16(value, ch->base + offs); sh_tmu_write()
128 iowrite32(value, ch->base + offs); sh_tmu_write()
131 static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) sh_tmu_start_stop_ch() argument
136 raw_spin_lock_irqsave(&ch->tmu->lock, flags); sh_tmu_start_stop_ch()
137 value = sh_tmu_read(ch, TSTR); sh_tmu_start_stop_ch()
140 value |= 1 << ch->index; sh_tmu_start_stop_ch()
142 value &= ~(1 << ch->index); sh_tmu_start_stop_ch()
144 sh_tmu_write(ch, TSTR, value); sh_tmu_start_stop_ch()
145 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags); sh_tmu_start_stop_ch()
148 static int __sh_tmu_enable(struct sh_tmu_channel *ch) __sh_tmu_enable() argument
153 ret = clk_enable(ch->tmu->clk); __sh_tmu_enable()
155 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", __sh_tmu_enable()
156 ch->index); __sh_tmu_enable()
161 sh_tmu_start_stop_ch(ch, 0); __sh_tmu_enable()
164 sh_tmu_write(ch, TCOR, 0xffffffff); __sh_tmu_enable()
165 sh_tmu_write(ch, TCNT, 0xffffffff); __sh_tmu_enable()
168 ch->rate = clk_get_rate(ch->tmu->clk) / 4; __sh_tmu_enable()
169 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); __sh_tmu_enable()
172 sh_tmu_start_stop_ch(ch, 1); __sh_tmu_enable()
177 static int sh_tmu_enable(struct sh_tmu_channel *ch) sh_tmu_enable() argument
179 if (ch->enable_count++ > 0) sh_tmu_enable()
182 pm_runtime_get_sync(&ch->tmu->pdev->dev); sh_tmu_enable()
183 dev_pm_syscore_device(&ch->tmu->pdev->dev, true); sh_tmu_enable()
185 return __sh_tmu_enable(ch); sh_tmu_enable()
188 static void __sh_tmu_disable(struct sh_tmu_channel *ch) __sh_tmu_disable() argument
191 sh_tmu_start_stop_ch(ch, 0); __sh_tmu_disable()
194 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); __sh_tmu_disable()
197 clk_disable(ch->tmu->clk); __sh_tmu_disable()
200 static void sh_tmu_disable(struct sh_tmu_channel *ch) sh_tmu_disable() argument
202 if (WARN_ON(ch->enable_count == 0)) sh_tmu_disable()
205 if (--ch->enable_count > 0) sh_tmu_disable()
208 __sh_tmu_disable(ch); sh_tmu_disable()
210 dev_pm_syscore_device(&ch->tmu->pdev->dev, false); sh_tmu_disable()
211 pm_runtime_put(&ch->tmu->pdev->dev); sh_tmu_disable()
214 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta, sh_tmu_set_next() argument
218 sh_tmu_start_stop_ch(ch, 0); sh_tmu_set_next()
221 sh_tmu_read(ch, TCR); sh_tmu_set_next()
224 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); sh_tmu_set_next()
228 sh_tmu_write(ch, TCOR, delta); sh_tmu_set_next()
230 sh_tmu_write(ch, TCOR, 0xffffffff); sh_tmu_set_next()
232 sh_tmu_write(ch, TCNT, delta); sh_tmu_set_next()
235 sh_tmu_start_stop_ch(ch, 1); sh_tmu_set_next()
240 struct sh_tmu_channel *ch = dev_id; sh_tmu_interrupt() local
243 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) sh_tmu_interrupt()
244 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); sh_tmu_interrupt()
246 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); sh_tmu_interrupt()
249 ch->ced.event_handler(&ch->ced); sh_tmu_interrupt()
260 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); sh_tmu_clocksource_read() local
262 return sh_tmu_read(ch, TCNT) ^ 0xffffffff; sh_tmu_clocksource_read()
267 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); sh_tmu_clocksource_enable() local
270 if (WARN_ON(ch->cs_enabled)) sh_tmu_clocksource_enable()
273 ret = sh_tmu_enable(ch); sh_tmu_clocksource_enable()
275 __clocksource_update_freq_hz(cs, ch->rate); sh_tmu_clocksource_enable()
276 ch->cs_enabled = true; sh_tmu_clocksource_enable()
284 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); sh_tmu_clocksource_disable() local
286 if (WARN_ON(!ch->cs_enabled)) sh_tmu_clocksource_disable()
289 sh_tmu_disable(ch); sh_tmu_clocksource_disable()
290 ch->cs_enabled = false; sh_tmu_clocksource_disable()
295 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); sh_tmu_clocksource_suspend() local
297 if (!ch->cs_enabled) sh_tmu_clocksource_suspend()
300 if (--ch->enable_count == 0) { sh_tmu_clocksource_suspend()
301 __sh_tmu_disable(ch); sh_tmu_clocksource_suspend()
302 pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev); sh_tmu_clocksource_suspend()
308 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); sh_tmu_clocksource_resume() local
310 if (!ch->cs_enabled) sh_tmu_clocksource_resume()
313 if (ch->enable_count++ == 0) { sh_tmu_clocksource_resume()
314 pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); sh_tmu_clocksource_resume()
315 __sh_tmu_enable(ch); sh_tmu_clocksource_resume()
319 static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, sh_tmu_register_clocksource() argument
322 struct clocksource *cs = &ch->cs; sh_tmu_register_clocksource()
334 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", sh_tmu_register_clocksource()
335 ch->index); sh_tmu_register_clocksource()
347 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) sh_tmu_clock_event_start() argument
349 struct clock_event_device *ced = &ch->ced; sh_tmu_clock_event_start()
351 sh_tmu_enable(ch); sh_tmu_clock_event_start()
353 clockevents_config(ced, ch->rate); sh_tmu_clock_event_start()
356 ch->periodic = (ch->rate + HZ/2) / HZ; sh_tmu_clock_event_start()
357 sh_tmu_set_next(ch, ch->periodic, 1); sh_tmu_clock_event_start()
364 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); sh_tmu_clock_event_mode() local
371 sh_tmu_disable(ch); sh_tmu_clock_event_mode()
380 dev_info(&ch->tmu->pdev->dev, sh_tmu_clock_event_mode()
381 "ch%u: used for periodic clock events\n", ch->index); sh_tmu_clock_event_mode()
382 sh_tmu_clock_event_start(ch, 1); sh_tmu_clock_event_mode()
385 dev_info(&ch->tmu->pdev->dev, sh_tmu_clock_event_mode()
386 "ch%u: used for oneshot clock events\n", ch->index); sh_tmu_clock_event_mode()
387 sh_tmu_clock_event_start(ch, 0); sh_tmu_clock_event_mode()
391 sh_tmu_disable(ch); sh_tmu_clock_event_mode()
402 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); sh_tmu_clock_event_next() local
407 sh_tmu_set_next(ch, delta, 0); sh_tmu_clock_event_next()
421 static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, sh_tmu_register_clockevent() argument
424 struct clock_event_device *ced = &ch->ced; sh_tmu_register_clockevent()
437 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", sh_tmu_register_clockevent()
438 ch->index); sh_tmu_register_clockevent()
442 ret = request_irq(ch->irq, sh_tmu_interrupt, sh_tmu_register_clockevent()
444 dev_name(&ch->tmu->pdev->dev), ch); sh_tmu_register_clockevent()
446 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", sh_tmu_register_clockevent()
447 ch->index, ch->irq); sh_tmu_register_clockevent()
452 static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, sh_tmu_register() argument
456 ch->tmu->has_clockevent = true; sh_tmu_register()
457 sh_tmu_register_clockevent(ch, name); sh_tmu_register()
459 ch->tmu->has_clocksource = true; sh_tmu_register()
460 sh_tmu_register_clocksource(ch, name); sh_tmu_register()
466 static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, sh_tmu_channel_setup() argument
474 ch->tmu = tmu; sh_tmu_channel_setup()
475 ch->index = index; sh_tmu_channel_setup()
478 ch->base = tmu->mapbase + 4 + ch->index * 12; sh_tmu_channel_setup()
480 ch->base = tmu->mapbase + 8 + ch->index * 12; sh_tmu_channel_setup()
482 ch->irq = platform_get_irq(tmu->pdev, index); sh_tmu_channel_setup()
483 if (ch->irq < 0) { sh_tmu_channel_setup()
484 dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", sh_tmu_channel_setup()
485 ch->index); sh_tmu_channel_setup()
486 return ch->irq; sh_tmu_channel_setup()
489 ch->cs_enabled = false; sh_tmu_channel_setup()
490 ch->enable_count = 0; sh_tmu_channel_setup()
492 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), sh_tmu_channel_setup()
H A Dsh_cmt.c239 static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch) sh_cmt_read_cmstr() argument
241 if (ch->iostart) sh_cmt_read_cmstr()
242 return ch->cmt->info->read_control(ch->iostart, 0); sh_cmt_read_cmstr()
244 return ch->cmt->info->read_control(ch->cmt->mapbase, 0); sh_cmt_read_cmstr()
247 static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, sh_cmt_write_cmstr() argument
250 if (ch->iostart) sh_cmt_write_cmstr()
251 ch->cmt->info->write_control(ch->iostart, 0, value); sh_cmt_write_cmstr()
253 ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); sh_cmt_write_cmstr()
256 static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) sh_cmt_read_cmcsr() argument
258 return ch->cmt->info->read_control(ch->ioctrl, CMCSR); sh_cmt_read_cmcsr()
261 static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, sh_cmt_write_cmcsr() argument
264 ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); sh_cmt_write_cmcsr()
267 static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) sh_cmt_read_cmcnt() argument
269 return ch->cmt->info->read_count(ch->ioctrl, CMCNT); sh_cmt_read_cmcnt()
272 static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, sh_cmt_write_cmcnt() argument
275 ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); sh_cmt_write_cmcnt()
278 static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, sh_cmt_write_cmcor() argument
281 ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); sh_cmt_write_cmcor()
284 static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch, sh_cmt_get_counter() argument
290 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit; sh_cmt_get_counter()
295 v1 = sh_cmt_read_cmcnt(ch); sh_cmt_get_counter()
296 v2 = sh_cmt_read_cmcnt(ch); sh_cmt_get_counter()
297 v3 = sh_cmt_read_cmcnt(ch); sh_cmt_get_counter()
298 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit; sh_cmt_get_counter()
306 static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) sh_cmt_start_stop_ch() argument
311 raw_spin_lock_irqsave(&ch->cmt->lock, flags); sh_cmt_start_stop_ch()
312 value = sh_cmt_read_cmstr(ch); sh_cmt_start_stop_ch()
315 value |= 1 << ch->timer_bit; sh_cmt_start_stop_ch()
317 value &= ~(1 << ch->timer_bit); sh_cmt_start_stop_ch()
319 sh_cmt_write_cmstr(ch, value); sh_cmt_start_stop_ch()
320 raw_spin_unlock_irqrestore(&ch->cmt->lock, flags); sh_cmt_start_stop_ch()
323 static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) sh_cmt_enable() argument
327 pm_runtime_get_sync(&ch->cmt->pdev->dev); sh_cmt_enable()
328 dev_pm_syscore_device(&ch->cmt->pdev->dev, true); sh_cmt_enable()
331 ret = clk_enable(ch->cmt->clk); sh_cmt_enable()
333 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n", sh_cmt_enable()
334 ch->index); sh_cmt_enable()
339 sh_cmt_start_stop_ch(ch, 0); sh_cmt_enable()
342 if (ch->cmt->info->width == 16) { sh_cmt_enable()
343 *rate = clk_get_rate(ch->cmt->clk) / 512; sh_cmt_enable()
344 sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | sh_cmt_enable()
347 *rate = clk_get_rate(ch->cmt->clk) / 8; sh_cmt_enable()
348 sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | sh_cmt_enable()
354 sh_cmt_write_cmcor(ch, 0xffffffff); sh_cmt_enable()
355 sh_cmt_write_cmcnt(ch, 0); sh_cmt_enable()
369 if (!sh_cmt_read_cmcnt(ch)) sh_cmt_enable()
374 if (sh_cmt_read_cmcnt(ch)) { sh_cmt_enable()
375 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", sh_cmt_enable()
376 ch->index); sh_cmt_enable()
382 sh_cmt_start_stop_ch(ch, 1); sh_cmt_enable()
386 clk_disable(ch->cmt->clk); sh_cmt_enable()
392 static void sh_cmt_disable(struct sh_cmt_channel *ch) sh_cmt_disable() argument
395 sh_cmt_start_stop_ch(ch, 0); sh_cmt_disable()
398 sh_cmt_write_cmcsr(ch, 0); sh_cmt_disable()
401 clk_disable(ch->cmt->clk); sh_cmt_disable()
403 dev_pm_syscore_device(&ch->cmt->pdev->dev, false); sh_cmt_disable()
404 pm_runtime_put(&ch->cmt->pdev->dev); sh_cmt_disable()
414 static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch, sh_cmt_clock_event_program_verify() argument
418 unsigned long value = ch->next_match_value; sh_cmt_clock_event_program_verify()
423 now = sh_cmt_get_counter(ch, &has_wrapped); sh_cmt_clock_event_program_verify()
424 ch->flags |= FLAG_REPROGRAM; /* force reprogram */ sh_cmt_clock_event_program_verify()
431 ch->flags |= FLAG_SKIPEVENT; sh_cmt_clock_event_program_verify()
443 if (new_match > ch->max_match_value) sh_cmt_clock_event_program_verify()
444 new_match = ch->max_match_value; sh_cmt_clock_event_program_verify()
446 sh_cmt_write_cmcor(ch, new_match); sh_cmt_clock_event_program_verify()
448 now = sh_cmt_get_counter(ch, &has_wrapped); sh_cmt_clock_event_program_verify()
449 if (has_wrapped && (new_match > ch->match_value)) { sh_cmt_clock_event_program_verify()
456 ch->flags |= FLAG_SKIPEVENT; sh_cmt_clock_event_program_verify()
467 ch->match_value = new_match; sh_cmt_clock_event_program_verify()
478 ch->match_value = new_match; sh_cmt_clock_event_program_verify()
494 dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n", sh_cmt_clock_event_program_verify()
495 ch->index); sh_cmt_clock_event_program_verify()
500 static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) __sh_cmt_set_next() argument
502 if (delta > ch->max_match_value) __sh_cmt_set_next()
503 dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n", __sh_cmt_set_next()
504 ch->index); __sh_cmt_set_next()
506 ch->next_match_value = delta; __sh_cmt_set_next()
507 sh_cmt_clock_event_program_verify(ch, 0); __sh_cmt_set_next()
510 static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) sh_cmt_set_next() argument
514 raw_spin_lock_irqsave(&ch->lock, flags); sh_cmt_set_next()
515 __sh_cmt_set_next(ch, delta); sh_cmt_set_next()
516 raw_spin_unlock_irqrestore(&ch->lock, flags); sh_cmt_set_next()
521 struct sh_cmt_channel *ch = dev_id; sh_cmt_interrupt() local
524 sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & sh_cmt_interrupt()
525 ch->cmt->info->clear_bits); sh_cmt_interrupt()
531 if (ch->flags & FLAG_CLOCKSOURCE) sh_cmt_interrupt()
532 ch->total_cycles += ch->match_value + 1; sh_cmt_interrupt()
534 if (!(ch->flags & FLAG_REPROGRAM)) sh_cmt_interrupt()
535 ch->next_match_value = ch->max_match_value; sh_cmt_interrupt()
537 ch->flags |= FLAG_IRQCONTEXT; sh_cmt_interrupt()
539 if (ch->flags & FLAG_CLOCKEVENT) { sh_cmt_interrupt()
540 if (!(ch->flags & FLAG_SKIPEVENT)) { sh_cmt_interrupt()
541 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) { sh_cmt_interrupt()
542 ch->next_match_value = ch->max_match_value; sh_cmt_interrupt()
543 ch->flags |= FLAG_REPROGRAM; sh_cmt_interrupt()
546 ch->ced.event_handler(&ch->ced); sh_cmt_interrupt()
550 ch->flags &= ~FLAG_SKIPEVENT; sh_cmt_interrupt()
552 if (ch->flags & FLAG_REPROGRAM) { sh_cmt_interrupt()
553 ch->flags &= ~FLAG_REPROGRAM; sh_cmt_interrupt()
554 sh_cmt_clock_event_program_verify(ch, 1); sh_cmt_interrupt()
556 if (ch->flags & FLAG_CLOCKEVENT) sh_cmt_interrupt()
557 if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) sh_cmt_interrupt()
558 || (ch->match_value == ch->next_match_value)) sh_cmt_interrupt()
559 ch->flags &= ~FLAG_REPROGRAM; sh_cmt_interrupt()
562 ch->flags &= ~FLAG_IRQCONTEXT; sh_cmt_interrupt()
567 static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag) sh_cmt_start() argument
572 raw_spin_lock_irqsave(&ch->lock, flags); sh_cmt_start()
574 if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) sh_cmt_start()
575 ret = sh_cmt_enable(ch, &ch->rate); sh_cmt_start()
579 ch->flags |= flag; sh_cmt_start()
582 if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT))) sh_cmt_start()
583 __sh_cmt_set_next(ch, ch->max_match_value); sh_cmt_start()
585 raw_spin_unlock_irqrestore(&ch->lock, flags); sh_cmt_start()
590 static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag) sh_cmt_stop() argument
595 raw_spin_lock_irqsave(&ch->lock, flags); sh_cmt_stop()
597 f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); sh_cmt_stop()
598 ch->flags &= ~flag; sh_cmt_stop()
600 if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) sh_cmt_stop()
601 sh_cmt_disable(ch); sh_cmt_stop()
604 if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) sh_cmt_stop()
605 __sh_cmt_set_next(ch, ch->max_match_value); sh_cmt_stop()
607 raw_spin_unlock_irqrestore(&ch->lock, flags); sh_cmt_stop()
617 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); sh_cmt_clocksource_read() local
622 raw_spin_lock_irqsave(&ch->lock, flags); sh_cmt_clocksource_read()
623 value = ch->total_cycles; sh_cmt_clocksource_read()
624 raw = sh_cmt_get_counter(ch, &has_wrapped); sh_cmt_clocksource_read()
627 raw += ch->match_value + 1; sh_cmt_clocksource_read()
628 raw_spin_unlock_irqrestore(&ch->lock, flags); sh_cmt_clocksource_read()
636 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); sh_cmt_clocksource_enable() local
638 WARN_ON(ch->cs_enabled); sh_cmt_clocksource_enable()
640 ch->total_cycles = 0; sh_cmt_clocksource_enable()
642 ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE); sh_cmt_clocksource_enable()
644 __clocksource_update_freq_hz(cs, ch->rate); sh_cmt_clocksource_enable()
645 ch->cs_enabled = true; sh_cmt_clocksource_enable()
652 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); sh_cmt_clocksource_disable() local
654 WARN_ON(!ch->cs_enabled); sh_cmt_clocksource_disable()
656 sh_cmt_stop(ch, FLAG_CLOCKSOURCE); sh_cmt_clocksource_disable()
657 ch->cs_enabled = false; sh_cmt_clocksource_disable()
662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); sh_cmt_clocksource_suspend() local
664 sh_cmt_stop(ch, FLAG_CLOCKSOURCE); sh_cmt_clocksource_suspend()
665 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); sh_cmt_clocksource_suspend()
670 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); sh_cmt_clocksource_resume() local
672 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); sh_cmt_clocksource_resume()
673 sh_cmt_start(ch, FLAG_CLOCKSOURCE); sh_cmt_clocksource_resume()
676 static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, sh_cmt_register_clocksource() argument
679 struct clocksource *cs = &ch->cs; sh_cmt_register_clocksource()
691 dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", sh_cmt_register_clocksource()
692 ch->index); sh_cmt_register_clocksource()
704 static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic) sh_cmt_clock_event_start() argument
706 struct clock_event_device *ced = &ch->ced; sh_cmt_clock_event_start()
708 sh_cmt_start(ch, FLAG_CLOCKEVENT); sh_cmt_clock_event_start()
713 ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift); sh_cmt_clock_event_start()
714 ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced); sh_cmt_clock_event_start()
718 sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1); sh_cmt_clock_event_start()
720 sh_cmt_set_next(ch, ch->max_match_value); sh_cmt_clock_event_start()
726 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); sh_cmt_clock_event_mode() local
732 sh_cmt_stop(ch, FLAG_CLOCKEVENT); sh_cmt_clock_event_mode()
740 dev_info(&ch->cmt->pdev->dev, sh_cmt_clock_event_mode()
741 "ch%u: used for periodic clock events\n", ch->index); sh_cmt_clock_event_mode()
742 sh_cmt_clock_event_start(ch, 1); sh_cmt_clock_event_mode()
745 dev_info(&ch->cmt->pdev->dev, sh_cmt_clock_event_mode()
746 "ch%u: used for oneshot clock events\n", ch->index); sh_cmt_clock_event_mode()
747 sh_cmt_clock_event_start(ch, 0); sh_cmt_clock_event_mode()
751 sh_cmt_stop(ch, FLAG_CLOCKEVENT); sh_cmt_clock_event_mode()
761 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); sh_cmt_clock_event_next() local
764 if (likely(ch->flags & FLAG_IRQCONTEXT)) sh_cmt_clock_event_next()
765 ch->next_match_value = delta - 1; sh_cmt_clock_event_next()
767 sh_cmt_set_next(ch, delta - 1); sh_cmt_clock_event_next()
774 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); sh_cmt_clock_event_suspend() local
776 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); sh_cmt_clock_event_suspend()
777 clk_unprepare(ch->cmt->clk); sh_cmt_clock_event_suspend()
782 struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); sh_cmt_clock_event_resume() local
784 clk_prepare(ch->cmt->clk); sh_cmt_clock_event_resume()
785 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); sh_cmt_clock_event_resume()
788 static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, sh_cmt_register_clockevent() argument
791 struct clock_event_device *ced = &ch->ced; sh_cmt_register_clockevent()
795 irq = platform_get_irq(ch->cmt->pdev, ch->index); sh_cmt_register_clockevent()
797 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", sh_cmt_register_clockevent()
798 ch->index); sh_cmt_register_clockevent()
804 dev_name(&ch->cmt->pdev->dev), ch); sh_cmt_register_clockevent()
806 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n", sh_cmt_register_clockevent()
807 ch->index, irq); sh_cmt_register_clockevent()
821 dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n", sh_cmt_register_clockevent()
822 ch->index); sh_cmt_register_clockevent()
828 static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name, sh_cmt_register() argument
834 ch->cmt->has_clockevent = true; sh_cmt_register()
835 ret = sh_cmt_register_clockevent(ch, name); sh_cmt_register()
841 ch->cmt->has_clocksource = true; sh_cmt_register()
842 sh_cmt_register_clocksource(ch, name); sh_cmt_register()
848 static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, sh_cmt_setup_channel() argument
858 ch->cmt = cmt; sh_cmt_setup_channel()
859 ch->index = index; sh_cmt_setup_channel()
860 ch->hwidx = hwidx; sh_cmt_setup_channel()
869 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; sh_cmt_setup_channel()
873 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; sh_cmt_setup_channel()
880 ch->ioctrl = cmt->mapbase + 0x40; sh_cmt_setup_channel()
883 ch->iostart = cmt->mapbase + ch->hwidx * 0x100; sh_cmt_setup_channel()
884 ch->ioctrl = ch->iostart + 0x10; sh_cmt_setup_channel()
888 if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) sh_cmt_setup_channel()
889 ch->max_match_value = ~0; sh_cmt_setup_channel()
891 ch->max_match_value = (1 << cmt->info->width) - 1; sh_cmt_setup_channel()
893 ch->match_value = ch->max_match_value; sh_cmt_setup_channel()
894 raw_spin_lock_init(&ch->lock); sh_cmt_setup_channel()
896 ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx; sh_cmt_setup_channel()
898 ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), sh_cmt_setup_channel()
901 dev_err(&cmt->pdev->dev, "ch%u: registration failed\n", sh_cmt_setup_channel()
902 ch->index); sh_cmt_setup_channel()
905 ch->cs_enabled = false; sh_cmt_setup_channel()
H A Dsh_mtu2.c160 static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr) sh_mtu2_read() argument
165 return ioread8(ch->mtu->mapbase + 0x280); sh_mtu2_read()
170 return ioread16(ch->base + offs); sh_mtu2_read()
172 return ioread8(ch->base + offs); sh_mtu2_read()
175 static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr, sh_mtu2_write() argument
181 return iowrite8(value, ch->mtu->mapbase + 0x280); sh_mtu2_write()
186 iowrite16(value, ch->base + offs); sh_mtu2_write()
188 iowrite8(value, ch->base + offs); sh_mtu2_write()
191 static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) sh_mtu2_start_stop_ch() argument
196 raw_spin_lock_irqsave(&ch->mtu->lock, flags); sh_mtu2_start_stop_ch()
197 value = sh_mtu2_read(ch, TSTR); sh_mtu2_start_stop_ch()
200 value |= 1 << ch->index; sh_mtu2_start_stop_ch()
202 value &= ~(1 << ch->index); sh_mtu2_start_stop_ch()
204 sh_mtu2_write(ch, TSTR, value); sh_mtu2_start_stop_ch()
205 raw_spin_unlock_irqrestore(&ch->mtu->lock, flags); sh_mtu2_start_stop_ch()
208 static int sh_mtu2_enable(struct sh_mtu2_channel *ch) sh_mtu2_enable() argument
214 pm_runtime_get_sync(&ch->mtu->pdev->dev); sh_mtu2_enable()
215 dev_pm_syscore_device(&ch->mtu->pdev->dev, true); sh_mtu2_enable()
218 ret = clk_enable(ch->mtu->clk); sh_mtu2_enable()
220 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", sh_mtu2_enable()
221 ch->index); sh_mtu2_enable()
226 sh_mtu2_start_stop_ch(ch, 0); sh_mtu2_enable()
228 rate = clk_get_rate(ch->mtu->clk) / 64; sh_mtu2_enable()
235 sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64); sh_mtu2_enable()
236 sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) | sh_mtu2_enable()
238 sh_mtu2_write(ch, TGR, periodic); sh_mtu2_enable()
239 sh_mtu2_write(ch, TCNT, 0); sh_mtu2_enable()
240 sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL); sh_mtu2_enable()
241 sh_mtu2_write(ch, TIER, TIER_TGIEA); sh_mtu2_enable()
244 sh_mtu2_start_stop_ch(ch, 1); sh_mtu2_enable()
249 static void sh_mtu2_disable(struct sh_mtu2_channel *ch) sh_mtu2_disable() argument
252 sh_mtu2_start_stop_ch(ch, 0); sh_mtu2_disable()
255 clk_disable(ch->mtu->clk); sh_mtu2_disable()
257 dev_pm_syscore_device(&ch->mtu->pdev->dev, false); sh_mtu2_disable()
258 pm_runtime_put(&ch->mtu->pdev->dev); sh_mtu2_disable()
263 struct sh_mtu2_channel *ch = dev_id; sh_mtu2_interrupt() local
266 sh_mtu2_read(ch, TSR); sh_mtu2_interrupt()
267 sh_mtu2_write(ch, TSR, ~TSR_TGFA); sh_mtu2_interrupt()
270 ch->ced.event_handler(&ch->ced); sh_mtu2_interrupt()
282 struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced); sh_mtu2_clock_event_mode() local
288 sh_mtu2_disable(ch); sh_mtu2_clock_event_mode()
297 dev_info(&ch->mtu->pdev->dev, sh_mtu2_clock_event_mode()
298 "ch%u: used for periodic clock events\n", ch->index); sh_mtu2_clock_event_mode()
299 sh_mtu2_enable(ch); sh_mtu2_clock_event_mode()
303 sh_mtu2_disable(ch); sh_mtu2_clock_event_mode()
321 static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, sh_mtu2_register_clockevent() argument
324 struct clock_event_device *ced = &ch->ced; sh_mtu2_register_clockevent()
334 dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", sh_mtu2_register_clockevent()
335 ch->index); sh_mtu2_register_clockevent()
339 static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name) sh_mtu2_register() argument
341 ch->mtu->has_clockevent = true; sh_mtu2_register()
342 sh_mtu2_register_clockevent(ch, name); sh_mtu2_register()
347 static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, sh_mtu2_setup_channel() argument
357 ch->mtu = mtu; sh_mtu2_setup_channel()
368 dev_name(&ch->mtu->pdev->dev), ch); sh_mtu2_setup_channel()
370 dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", sh_mtu2_setup_channel()
375 ch->base = mtu->mapbase + channel_offsets[index]; sh_mtu2_setup_channel()
376 ch->index = index; sh_mtu2_setup_channel()
378 return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev)); sh_mtu2_setup_channel()
/linux-4.1.27/drivers/scsi/
H A Dch.c87 #define ch_printk(prefix, ch, fmt, a...) \
88 sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a)
93 ch_printk(KERN_DEBUG, ch, fmt, ##arg); \
98 ch_printk(level, ch, fmt, ##arg); \
185 ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len, ch_do_scsi() argument
197 result = scsi_execute_req(ch->device, cmd, direction, buffer, ch_do_scsi()
203 scsi_print_sense_hdr(ch->device, ch->name, &sshdr); ch_do_scsi()
208 ch->unit_attention = 1; ch_do_scsi()
220 ch_elem_to_typecode(scsi_changer *ch, u_int elem) ch_elem_to_typecode() argument
225 if (elem >= ch->firsts[i] && ch_elem_to_typecode()
226 elem < ch->firsts[i] + ch_elem_to_typecode()
227 ch->counts[i]) ch_elem_to_typecode()
234 ch_read_element_status(scsi_changer *ch, u_int elem, char *data) ch_read_element_status() argument
247 cmd[1] = ((ch->device->lun & 0x7) << 5) | ch_read_element_status()
248 (ch->voltags ? 0x10 : 0) | ch_read_element_status()
249 ch_elem_to_typecode(ch,elem); ch_read_element_status()
254 if (0 == (result = ch_do_scsi(ch, cmd, 12, ch_read_element_status()
264 if (ch->voltags) { ch_read_element_status()
265 ch->voltags = 0; ch_read_element_status()
276 ch_init_elem(scsi_changer *ch) ch_init_elem() argument
284 cmd[1] = (ch->device->lun & 0x7) << 5; ch_init_elem()
285 err = ch_do_scsi(ch, cmd, 6, NULL, 0, DMA_NONE); ch_init_elem()
291 ch_readconfig(scsi_changer *ch) ch_readconfig() argument
304 cmd[1] = (ch->device->lun & 0x7) << 5; ch_readconfig()
307 result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE); ch_readconfig()
310 result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE); ch_readconfig()
313 ch->firsts[CHET_MT] = ch_readconfig()
315 ch->counts[CHET_MT] = ch_readconfig()
317 ch->firsts[CHET_ST] = ch_readconfig()
319 ch->counts[CHET_ST] = ch_readconfig()
321 ch->firsts[CHET_IE] = ch_readconfig()
323 ch->counts[CHET_IE] = ch_readconfig()
325 ch->firsts[CHET_DT] = ch_readconfig()
327 ch->counts[CHET_DT] = ch_readconfig()
330 ch->firsts[CHET_MT], ch_readconfig()
331 ch->counts[CHET_MT]); ch_readconfig()
333 ch->firsts[CHET_ST], ch_readconfig()
334 ch->counts[CHET_ST]); ch_readconfig()
336 ch->firsts[CHET_IE], ch_readconfig()
337 ch->counts[CHET_IE]); ch_readconfig()
339 ch->firsts[CHET_DT], ch_readconfig()
340 ch->counts[CHET_DT]); ch_readconfig()
351 ch->firsts[CHET_V1+i] = vendor_firsts[i]; ch_readconfig()
352 ch->counts[CHET_V1+i] = vendor_counts[i]; ch_readconfig()
359 ch->dt = kcalloc(ch->counts[CHET_DT], sizeof(*ch->dt), ch_readconfig()
362 if (!ch->dt) { ch_readconfig()
367 for (elem = 0; elem < ch->counts[CHET_DT]; elem++) { ch_readconfig()
374 elem+ch->firsts[CHET_DT]); ch_readconfig()
376 (ch,elem+ch->firsts[CHET_DT],data)) { ch_readconfig()
378 elem+ch->firsts[CHET_DT]); ch_readconfig()
380 VPRINTK(KERN_INFO, "dt 0x%x: ",elem+ch->firsts[CHET_DT]); ch_readconfig()
383 ch->dt[elem] = NULL; ch_readconfig()
386 ch->dt[elem] = NULL; ch_readconfig()
388 id = ch->device->id; ch_readconfig()
396 ch->dt[elem] = ch_readconfig()
397 scsi_device_lookup(ch->device->host, ch_readconfig()
398 ch->device->channel, ch_readconfig()
400 if (!ch->dt[elem]) { ch_readconfig()
405 ch->dt[elem]->vendor, ch_readconfig()
406 ch->dt[elem]->model, ch_readconfig()
407 ch->dt[elem]->rev); ch_readconfig()
411 ch->voltags = 1; ch_readconfig()
420 ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate) ch_position() argument
426 trans = ch->firsts[CHET_MT]; ch_position()
429 cmd[1] = (ch->device->lun & 0x7) << 5; ch_position()
435 return ch_do_scsi(ch, cmd, 10, NULL, 0, DMA_NONE); ch_position()
439 ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate) ch_move() argument
445 trans = ch->firsts[CHET_MT]; ch_move()
448 cmd[1] = (ch->device->lun & 0x7) << 5; ch_move()
456 return ch_do_scsi(ch, cmd, 12, NULL,0, DMA_NONE); ch_move()
460 ch_exchange(scsi_changer *ch, u_int trans, u_int src, ch_exchange() argument
468 trans = ch->firsts[CHET_MT]; ch_exchange()
471 cmd[1] = (ch->device->lun & 0x7) << 5; ch_exchange()
482 return ch_do_scsi(ch, cmd, 12, NULL, 0, DMA_NONE); ch_exchange()
502 ch_set_voltag(scsi_changer *ch, u_int elem, ch_set_voltag() argument
519 cmd[1] = ((ch->device->lun & 0x7) << 5) | ch_set_voltag()
520 ch_elem_to_typecode(ch,elem); ch_set_voltag()
532 result = ch_do_scsi(ch, cmd, 12, buffer, 256, DMA_TO_DEVICE); ch_set_voltag()
537 static int ch_gstatus(scsi_changer *ch, int type, unsigned char __user *dest) ch_gstatus() argument
543 mutex_lock(&ch->lock); ch_gstatus()
544 for (i = 0; i < ch->counts[type]; i++) { ch_gstatus()
546 (ch, ch->firsts[type]+i,data)) { ch_gstatus()
553 ch->firsts[type]+i, ch_gstatus()
556 (ch, ch->firsts[type]+i,data); ch_gstatus()
560 mutex_unlock(&ch->lock); ch_gstatus()
569 scsi_changer *ch = file->private_data; ch_release() local
571 scsi_device_put(ch->device); ch_release()
579 scsi_changer *ch; ch_open() local
584 ch = idr_find(&ch_index_idr, minor); ch_open()
586 if (NULL == ch || scsi_device_get(ch->device)) { ch_open()
593 file->private_data = ch; ch_open()
599 ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit) ch_checkrange() argument
601 if (type >= CH_TYPES || unit >= ch->counts[type]) ch_checkrange()
609 scsi_changer *ch = file->private_data; ch_ioctl() local
613 retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd, ch_ioctl()
624 params.cp_npickers = ch->counts[CHET_MT]; ch_ioctl()
625 params.cp_nslots = ch->counts[CHET_ST]; ch_ioctl()
626 params.cp_nportals = ch->counts[CHET_IE]; ch_ioctl()
627 params.cp_ndrives = ch->counts[CHET_DT]; ch_ioctl()
638 if (ch->counts[CHET_V1]) { ch_ioctl()
639 vparams.cvp_n1 = ch->counts[CHET_V1]; ch_ioctl()
642 if (ch->counts[CHET_V2]) { ch_ioctl()
643 vparams.cvp_n2 = ch->counts[CHET_V2]; ch_ioctl()
646 if (ch->counts[CHET_V3]) { ch_ioctl()
647 vparams.cvp_n3 = ch->counts[CHET_V3]; ch_ioctl()
650 if (ch->counts[CHET_V4]) { ch_ioctl()
651 vparams.cvp_n4 = ch->counts[CHET_V4]; ch_ioctl()
666 if (0 != ch_checkrange(ch, pos.cp_type, pos.cp_unit)) { ch_ioctl()
670 mutex_lock(&ch->lock); ch_ioctl()
671 retval = ch_position(ch,0, ch_ioctl()
672 ch->firsts[pos.cp_type] + pos.cp_unit, ch_ioctl()
674 mutex_unlock(&ch->lock); ch_ioctl()
685 if (0 != ch_checkrange(ch, mv.cm_fromtype, mv.cm_fromunit) || ch_ioctl()
686 0 != ch_checkrange(ch, mv.cm_totype, mv.cm_tounit )) { ch_ioctl()
691 mutex_lock(&ch->lock); ch_ioctl()
692 retval = ch_move(ch,0, ch_ioctl()
693 ch->firsts[mv.cm_fromtype] + mv.cm_fromunit, ch_ioctl()
694 ch->firsts[mv.cm_totype] + mv.cm_tounit, ch_ioctl()
696 mutex_unlock(&ch->lock); ch_ioctl()
707 if (0 != ch_checkrange(ch, mv.ce_srctype, mv.ce_srcunit ) || ch_ioctl()
708 0 != ch_checkrange(ch, mv.ce_fdsttype, mv.ce_fdstunit) || ch_ioctl()
709 0 != ch_checkrange(ch, mv.ce_sdsttype, mv.ce_sdstunit)) { ch_ioctl()
714 mutex_lock(&ch->lock); ch_ioctl()
716 (ch,0, ch_ioctl()
717 ch->firsts[mv.ce_srctype] + mv.ce_srcunit, ch_ioctl()
718 ch->firsts[mv.ce_fdsttype] + mv.ce_fdstunit, ch_ioctl()
719 ch->firsts[mv.ce_sdsttype] + mv.ce_sdstunit, ch_ioctl()
721 mutex_unlock(&ch->lock); ch_ioctl()
734 return ch_gstatus(ch, ces.ces_type, ces.ces_data); ch_ioctl()
748 if (0 != ch_checkrange(ch, cge.cge_type, cge.cge_unit)) ch_ioctl()
750 elem = ch->firsts[cge.cge_type] + cge.cge_unit; ch_ioctl()
755 mutex_lock(&ch->lock); ch_ioctl()
760 ch_cmd[1] = ((ch->device->lun & 0x7) << 5) | ch_ioctl()
761 (ch->voltags ? 0x10 : 0) | ch_ioctl()
762 ch_elem_to_typecode(ch,elem); ch_ioctl()
768 result = ch_do_scsi(ch, ch_cmd, 12, ch_ioctl()
782 if (elem >= ch->firsts[i] && ch_ioctl()
783 elem < ch->firsts[i] + ch->counts[i]) { ch_ioctl()
785 cge.cge_srcunit = elem-ch->firsts[i]; ch_ioctl()
802 } else if (ch->voltags) { ch_ioctl()
803 ch->voltags = 0; ch_ioctl()
808 mutex_unlock(&ch->lock); ch_ioctl()
817 mutex_lock(&ch->lock); ch_ioctl()
818 retval = ch_init_elem(ch); ch_ioctl()
819 mutex_unlock(&ch->lock); ch_ioctl()
831 if (0 != ch_checkrange(ch, csv.csv_type, csv.csv_unit)) { ch_ioctl()
835 elem = ch->firsts[csv.csv_type] + csv.csv_unit; ch_ioctl()
836 mutex_lock(&ch->lock); ch_ioctl()
837 retval = ch_set_voltag(ch, elem, ch_ioctl()
841 mutex_unlock(&ch->lock); ch_ioctl()
846 return scsi_ioctl(ch->device, cmd, argp); ch_ioctl()
862 scsi_changer *ch = file->private_data; ch_ioctl_compat() local
886 return ch_gstatus(ch, ces32.ces_type, data); ch_ioctl_compat()
889 // return scsi_ioctl_compat(ch->device, cmd, (void*)arg); ch_ioctl_compat()
903 scsi_changer *ch; ch_probe() local
908 ch = kzalloc(sizeof(*ch), GFP_KERNEL); ch_probe()
909 if (NULL == ch) ch_probe()
914 ret = idr_alloc(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT); ch_probe()
924 ch->minor = ret; ch_probe()
925 sprintf(ch->name,"ch%d",ch->minor); ch_probe()
928 MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch, ch_probe()
929 "s%s", ch->name); ch_probe()
931 sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n", ch_probe()
932 ch->minor); ch_probe()
937 mutex_init(&ch->lock); ch_probe()
938 ch->device = sd; ch_probe()
939 ch_readconfig(ch); ch_probe()
941 ch_init_elem(ch); ch_probe()
943 dev_set_drvdata(dev, ch); ch_probe()
944 sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name); ch_probe()
948 idr_remove(&ch_index_idr, ch->minor); ch_probe()
950 kfree(ch); ch_probe()
956 scsi_changer *ch = dev_get_drvdata(dev); ch_remove() local
959 idr_remove(&ch_index_idr, ch->minor); ch_remove()
962 device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor)); ch_remove()
963 kfree(ch->dt); ch_remove()
964 kfree(ch); ch_remove()
970 .name = "ch",
998 rc = register_chrdev(SCSI_CHANGER_MAJOR,"ch",&changer_fops); init_ch_module()
1010 unregister_chrdev(SCSI_CHANGER_MAJOR, "ch"); init_ch_module()
1019 unregister_chrdev(SCSI_CHANGER_MAJOR, "ch"); exit_ch_module()
/linux-4.1.27/drivers/misc/sgi-xp/
H A Dxpc_channel.c27 xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) xpc_process_connect() argument
31 DBUG_ON(!spin_is_locked(&ch->lock)); xpc_process_connect()
33 if (!(ch->flags & XPC_C_OPENREQUEST) || xpc_process_connect()
34 !(ch->flags & XPC_C_ROPENREQUEST)) { xpc_process_connect()
38 DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); xpc_process_connect()
40 if (!(ch->flags & XPC_C_SETUP)) { xpc_process_connect()
41 spin_unlock_irqrestore(&ch->lock, *irq_flags); xpc_process_connect()
42 ret = xpc_arch_ops.setup_msg_structures(ch); xpc_process_connect()
43 spin_lock_irqsave(&ch->lock, *irq_flags); xpc_process_connect()
46 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); xpc_process_connect()
48 ch->flags |= XPC_C_SETUP; xpc_process_connect()
50 if (ch->flags & XPC_C_DISCONNECTING) xpc_process_connect()
54 if (!(ch->flags & XPC_C_OPENREPLY)) { xpc_process_connect()
55 ch->flags |= XPC_C_OPENREPLY; xpc_process_connect()
56 xpc_arch_ops.send_chctl_openreply(ch, irq_flags); xpc_process_connect()
59 if (!(ch->flags & XPC_C_ROPENREPLY)) xpc_process_connect()
62 if (!(ch->flags & XPC_C_OPENCOMPLETE)) { xpc_process_connect()
63 ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED); xpc_process_connect()
64 xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags); xpc_process_connect()
67 if (!(ch->flags & XPC_C_ROPENCOMPLETE)) xpc_process_connect()
71 ch->number, ch->partid); xpc_process_connect()
73 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ xpc_process_connect()
80 xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) xpc_process_disconnect() argument
82 struct xpc_partition *part = &xpc_partitions[ch->partid]; xpc_process_disconnect()
83 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); xpc_process_disconnect()
85 DBUG_ON(!spin_is_locked(&ch->lock)); xpc_process_disconnect()
87 if (!(ch->flags & XPC_C_DISCONNECTING)) xpc_process_disconnect()
90 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); xpc_process_disconnect()
94 if (atomic_read(&ch->kthreads_assigned) > 0 || xpc_process_disconnect()
95 atomic_read(&ch->references) > 0) { xpc_process_disconnect()
98 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && xpc_process_disconnect()
99 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); xpc_process_disconnect()
103 if (xpc_arch_ops.partition_engaged(ch->partid)) xpc_process_disconnect()
110 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) xpc_process_disconnect()
113 if (!(ch->flags & XPC_C_CLOSEREPLY)) { xpc_process_disconnect()
114 ch->flags |= XPC_C_CLOSEREPLY; xpc_process_disconnect()
115 xpc_arch_ops.send_chctl_closereply(ch, irq_flags); xpc_process_disconnect()
118 if (!(ch->flags & XPC_C_RCLOSEREPLY)) xpc_process_disconnect()
123 if (atomic_read(&ch->n_to_notify) > 0) { xpc_process_disconnect()
124 /* we do callout while holding ch->lock, callout can't block */ xpc_process_disconnect()
125 xpc_arch_ops.notify_senders_of_disconnect(ch); xpc_process_disconnect()
130 if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { xpc_process_disconnect()
131 spin_unlock_irqrestore(&ch->lock, *irq_flags); xpc_process_disconnect()
132 xpc_disconnect_callout(ch, xpDisconnected); xpc_process_disconnect()
133 spin_lock_irqsave(&ch->lock, *irq_flags); xpc_process_disconnect()
136 DBUG_ON(atomic_read(&ch->n_to_notify) != 0); xpc_process_disconnect()
139 xpc_arch_ops.teardown_msg_structures(ch); xpc_process_disconnect()
141 ch->func = NULL; xpc_process_disconnect()
142 ch->key = NULL; xpc_process_disconnect()
143 ch->entry_size = 0; xpc_process_disconnect()
144 ch->local_nentries = 0; xpc_process_disconnect()
145 ch->remote_nentries = 0; xpc_process_disconnect()
146 ch->kthreads_assigned_limit = 0; xpc_process_disconnect()
147 ch->kthreads_idle_limit = 0; xpc_process_disconnect()
155 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); xpc_process_disconnect()
161 "reason=%d\n", ch->number, ch->partid, ch->reason); xpc_process_disconnect()
164 if (ch->flags & XPC_C_WDISCONNECT) { xpc_process_disconnect()
165 /* we won't lose the CPU since we're holding ch->lock */ xpc_process_disconnect()
166 complete(&ch->wdisconnect_wait); xpc_process_disconnect()
167 } else if (ch->delayed_chctl_flags) { xpc_process_disconnect()
171 part->chctl.flags[ch->number] |= xpc_process_disconnect()
172 ch->delayed_chctl_flags; xpc_process_disconnect()
175 ch->delayed_chctl_flags = 0; xpc_process_disconnect()
189 struct xpc_channel *ch = &part->channels[ch_number]; xpc_process_openclose_chctl_flags() local
194 spin_lock_irqsave(&ch->lock, irq_flags); xpc_process_openclose_chctl_flags()
198 if ((ch->flags & XPC_C_DISCONNECTED) && xpc_process_openclose_chctl_flags()
199 (ch->flags & XPC_C_WDISCONNECT)) { xpc_process_openclose_chctl_flags()
204 ch->delayed_chctl_flags |= chctl_flags; xpc_process_openclose_chctl_flags()
212 ch->partid, ch->number); xpc_process_openclose_chctl_flags()
220 if (ch->flags & XPC_C_RCLOSEREQUEST) { xpc_process_openclose_chctl_flags()
221 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); xpc_process_openclose_chctl_flags()
222 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); xpc_process_openclose_chctl_flags()
223 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); xpc_process_openclose_chctl_flags()
224 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); xpc_process_openclose_chctl_flags()
228 ch->flags |= XPC_C_RCLOSEREPLY; xpc_process_openclose_chctl_flags()
231 xpc_process_disconnect(ch, &irq_flags); xpc_process_openclose_chctl_flags()
232 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); xpc_process_openclose_chctl_flags()
236 if (ch->flags & XPC_C_DISCONNECTED) { xpc_process_openclose_chctl_flags()
241 DBUG_ON(ch->delayed_chctl_flags != 0); xpc_process_openclose_chctl_flags()
250 XPC_SET_REASON(ch, 0, 0); xpc_process_openclose_chctl_flags()
251 ch->flags &= ~XPC_C_DISCONNECTED; xpc_process_openclose_chctl_flags()
254 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); xpc_process_openclose_chctl_flags()
265 ch->flags |= XPC_C_RCLOSEREQUEST; xpc_process_openclose_chctl_flags()
267 if (!(ch->flags & XPC_C_DISCONNECTING)) { xpc_process_openclose_chctl_flags()
274 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); xpc_process_openclose_chctl_flags()
280 xpc_process_disconnect(ch, &irq_flags); xpc_process_openclose_chctl_flags()
286 "%d, channel=%d\n", ch->partid, ch->number); xpc_process_openclose_chctl_flags()
288 if (ch->flags & XPC_C_DISCONNECTED) { xpc_process_openclose_chctl_flags()
293 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); xpc_process_openclose_chctl_flags()
295 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { xpc_process_openclose_chctl_flags()
299 DBUG_ON(ch->delayed_chctl_flags != 0); xpc_process_openclose_chctl_flags()
308 ch->flags |= XPC_C_RCLOSEREPLY; xpc_process_openclose_chctl_flags()
310 if (ch->flags & XPC_C_CLOSEREPLY) { xpc_process_openclose_chctl_flags()
312 xpc_process_disconnect(ch, &irq_flags); xpc_process_openclose_chctl_flags()
321 ch->partid, ch->number); xpc_process_openclose_chctl_flags()
324 (ch->flags & XPC_C_ROPENREQUEST)) { xpc_process_openclose_chctl_flags()
328 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { xpc_process_openclose_chctl_flags()
329 ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; xpc_process_openclose_chctl_flags()
332 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | xpc_process_openclose_chctl_flags()
334 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | xpc_process_openclose_chctl_flags()
347 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); xpc_process_openclose_chctl_flags()
348 ch->remote_nentries = args->local_nentries; xpc_process_openclose_chctl_flags()
350 if (ch->flags & XPC_C_OPENREQUEST) { xpc_process_openclose_chctl_flags()
351 if (args->entry_size != ch->entry_size) { xpc_process_openclose_chctl_flags()
352 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, xpc_process_openclose_chctl_flags()
357 ch->entry_size = args->entry_size; xpc_process_openclose_chctl_flags()
359 XPC_SET_REASON(ch, 0, 0); xpc_process_openclose_chctl_flags()
360 ch->flags &= ~XPC_C_DISCONNECTED; xpc_process_openclose_chctl_flags()
365 xpc_process_connect(ch, &irq_flags); xpc_process_openclose_chctl_flags()
374 args->remote_nentries, ch->partid, ch->number); xpc_process_openclose_chctl_flags()
376 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) xpc_process_openclose_chctl_flags()
379 if (!(ch->flags & XPC_C_OPENREQUEST)) { xpc_process_openclose_chctl_flags()
380 XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, xpc_process_openclose_chctl_flags()
385 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); xpc_process_openclose_chctl_flags()
386 DBUG_ON(ch->flags & XPC_C_CONNECTED); xpc_process_openclose_chctl_flags()
399 ret = xpc_arch_ops.save_remote_msgqueue_pa(ch, xpc_process_openclose_chctl_flags()
402 XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags); xpc_process_openclose_chctl_flags()
405 ch->flags |= XPC_C_ROPENREPLY; xpc_process_openclose_chctl_flags()
407 if (args->local_nentries < ch->remote_nentries) { xpc_process_openclose_chctl_flags()
411 args->local_nentries, ch->remote_nentries, xpc_process_openclose_chctl_flags()
412 ch->partid, ch->number); xpc_process_openclose_chctl_flags()
414 ch->remote_nentries = args->local_nentries; xpc_process_openclose_chctl_flags()
416 if (args->remote_nentries < ch->local_nentries) { xpc_process_openclose_chctl_flags()
420 args->remote_nentries, ch->local_nentries, xpc_process_openclose_chctl_flags()
421 ch->partid, ch->number); xpc_process_openclose_chctl_flags()
423 ch->local_nentries = args->remote_nentries; xpc_process_openclose_chctl_flags()
426 xpc_process_connect(ch, &irq_flags); xpc_process_openclose_chctl_flags()
432 "partid=%d, channel=%d\n", ch->partid, ch->number); xpc_process_openclose_chctl_flags()
434 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) xpc_process_openclose_chctl_flags()
437 if (!(ch->flags & XPC_C_OPENREQUEST) || xpc_process_openclose_chctl_flags()
438 !(ch->flags & XPC_C_OPENREPLY)) { xpc_process_openclose_chctl_flags()
439 XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, xpc_process_openclose_chctl_flags()
444 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); xpc_process_openclose_chctl_flags()
445 DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY)); xpc_process_openclose_chctl_flags()
446 DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); xpc_process_openclose_chctl_flags()
448 ch->flags |= XPC_C_ROPENCOMPLETE; xpc_process_openclose_chctl_flags()
450 xpc_process_connect(ch, &irq_flags); xpc_process_openclose_chctl_flags()
455 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_process_openclose_chctl_flags()
458 xpc_create_kthreads(ch, 1, 0); xpc_process_openclose_chctl_flags()
465 xpc_connect_channel(struct xpc_channel *ch) xpc_connect_channel() argument
468 struct xpc_registration *registration = &xpc_registrations[ch->number]; xpc_connect_channel()
473 if (!XPC_CHANNEL_REGISTERED(ch->number)) { xpc_connect_channel()
478 spin_lock_irqsave(&ch->lock, irq_flags); xpc_connect_channel()
480 DBUG_ON(ch->flags & XPC_C_CONNECTED); xpc_connect_channel()
481 DBUG_ON(ch->flags & XPC_C_OPENREQUEST); xpc_connect_channel()
483 if (ch->flags & XPC_C_DISCONNECTING) { xpc_connect_channel()
484 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_connect_channel()
486 return ch->reason; xpc_connect_channel()
491 ch->kthreads_assigned_limit = registration->assigned_limit; xpc_connect_channel()
492 ch->kthreads_idle_limit = registration->idle_limit; xpc_connect_channel()
493 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); xpc_connect_channel()
494 DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); xpc_connect_channel()
495 DBUG_ON(atomic_read(&ch->kthreads_active) != 0); xpc_connect_channel()
497 ch->func = registration->func; xpc_connect_channel()
499 ch->key = registration->key; xpc_connect_channel()
501 ch->local_nentries = registration->nentries; xpc_connect_channel()
503 if (ch->flags & XPC_C_ROPENREQUEST) { xpc_connect_channel()
504 if (registration->entry_size != ch->entry_size) { xpc_connect_channel()
517 XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, xpc_connect_channel()
519 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_connect_channel()
523 ch->entry_size = registration->entry_size; xpc_connect_channel()
525 XPC_SET_REASON(ch, 0, 0); xpc_connect_channel()
526 ch->flags &= ~XPC_C_DISCONNECTED; xpc_connect_channel()
528 atomic_inc(&xpc_partitions[ch->partid].nchannels_active); xpc_connect_channel()
535 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); xpc_connect_channel()
536 xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags); xpc_connect_channel()
538 xpc_process_connect(ch, &irq_flags); xpc_connect_channel()
540 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_connect_channel()
550 struct xpc_channel *ch; xpc_process_sent_chctl_flags() local
564 ch = &part->channels[ch_number]; xpc_process_sent_chctl_flags()
576 ch_flags = ch->flags; /* need an atomic snapshot of flags */ xpc_process_sent_chctl_flags()
579 spin_lock_irqsave(&ch->lock, irq_flags); xpc_process_sent_chctl_flags()
580 xpc_process_disconnect(ch, &irq_flags); xpc_process_sent_chctl_flags()
581 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_process_sent_chctl_flags()
591 (void)xpc_connect_channel(ch); xpc_process_sent_chctl_flags()
621 struct xpc_channel *ch; xpc_partition_going_down() local
634 ch = &part->channels[ch_number]; xpc_partition_going_down()
636 xpc_msgqueue_ref(ch); xpc_partition_going_down()
637 spin_lock_irqsave(&ch->lock, irq_flags); xpc_partition_going_down()
639 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); xpc_partition_going_down()
641 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_partition_going_down()
642 xpc_msgqueue_deref(ch); xpc_partition_going_down()
659 struct xpc_channel *ch; xpc_initiate_connect() local
667 ch = &part->channels[ch_number]; xpc_initiate_connect()
680 xpc_connected_callout(struct xpc_channel *ch) xpc_connected_callout() argument
684 if (ch->func != NULL) { xpc_connected_callout()
685 dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " xpc_connected_callout()
686 "partid=%d, channel=%d\n", ch->partid, ch->number); xpc_connected_callout()
688 ch->func(xpConnected, ch->partid, ch->number, xpc_connected_callout()
689 (void *)(u64)ch->local_nentries, ch->key); xpc_connected_callout()
691 dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " xpc_connected_callout()
692 "partid=%d, channel=%d\n", ch->partid, ch->number); xpc_connected_callout()
715 struct xpc_channel *ch; xpc_initiate_disconnect() local
724 ch = &part->channels[ch_number]; xpc_initiate_disconnect()
725 xpc_msgqueue_ref(ch); xpc_initiate_disconnect()
727 spin_lock_irqsave(&ch->lock, irq_flags); xpc_initiate_disconnect()
729 if (!(ch->flags & XPC_C_DISCONNECTED)) { xpc_initiate_disconnect()
730 ch->flags |= XPC_C_WDISCONNECT; xpc_initiate_disconnect()
732 XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, xpc_initiate_disconnect()
736 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_initiate_disconnect()
738 xpc_msgqueue_deref(ch); xpc_initiate_disconnect()
756 xpc_disconnect_channel(const int line, struct xpc_channel *ch, xpc_disconnect_channel() argument
759 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); xpc_disconnect_channel()
761 DBUG_ON(!spin_is_locked(&ch->lock)); xpc_disconnect_channel()
763 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) xpc_disconnect_channel()
766 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); xpc_disconnect_channel()
769 reason, line, ch->partid, ch->number); xpc_disconnect_channel()
771 XPC_SET_REASON(ch, reason, line); xpc_disconnect_channel()
773 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); xpc_disconnect_channel()
775 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | xpc_disconnect_channel()
779 xpc_arch_ops.send_chctl_closerequest(ch, irq_flags); xpc_disconnect_channel()
782 ch->flags |= XPC_C_WASCONNECTED; xpc_disconnect_channel()
784 spin_unlock_irqrestore(&ch->lock, *irq_flags); xpc_disconnect_channel()
787 if (atomic_read(&ch->kthreads_idle) > 0) { xpc_disconnect_channel()
788 wake_up_all(&ch->idle_wq); xpc_disconnect_channel()
790 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && xpc_disconnect_channel()
791 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { xpc_disconnect_channel()
793 xpc_create_kthreads(ch, 1, 1); xpc_disconnect_channel()
797 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) xpc_disconnect_channel()
798 wake_up(&ch->msg_allocate_wq); xpc_disconnect_channel()
800 spin_lock_irqsave(&ch->lock, *irq_flags); xpc_disconnect_channel()
804 xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) xpc_disconnect_callout() argument
812 if (ch->func != NULL) { xpc_disconnect_callout()
813 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " xpc_disconnect_callout()
814 "channel=%d\n", reason, ch->partid, ch->number); xpc_disconnect_callout()
816 ch->func(reason, ch->partid, ch->number, NULL, ch->key); xpc_disconnect_callout()
818 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " xpc_disconnect_callout()
819 "channel=%d\n", reason, ch->partid, ch->number); xpc_disconnect_callout()
828 xpc_allocate_msg_wait(struct xpc_channel *ch) xpc_allocate_msg_wait() argument
833 if (ch->flags & XPC_C_DISCONNECTING) { xpc_allocate_msg_wait()
834 DBUG_ON(ch->reason == xpInterrupted); xpc_allocate_msg_wait()
835 return ch->reason; xpc_allocate_msg_wait()
838 atomic_inc(&ch->n_on_msg_allocate_wq); xpc_allocate_msg_wait()
839 prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); xpc_allocate_msg_wait()
841 finish_wait(&ch->msg_allocate_wq, &wait); xpc_allocate_msg_wait()
842 atomic_dec(&ch->n_on_msg_allocate_wq); xpc_allocate_msg_wait()
844 if (ch->flags & XPC_C_DISCONNECTING) { xpc_allocate_msg_wait()
845 ret = ch->reason; xpc_allocate_msg_wait()
846 DBUG_ON(ch->reason == xpInterrupted); xpc_allocate_msg_wait()
952 xpc_deliver_payload(struct xpc_channel *ch) xpc_deliver_payload() argument
956 payload = xpc_arch_ops.get_deliverable_payload(ch); xpc_deliver_payload()
964 xpc_msgqueue_ref(ch); xpc_deliver_payload()
966 atomic_inc(&ch->kthreads_active); xpc_deliver_payload()
968 if (ch->func != NULL) { xpc_deliver_payload()
969 dev_dbg(xpc_chan, "ch->func() called, payload=0x%p " xpc_deliver_payload()
970 "partid=%d channel=%d\n", payload, ch->partid, xpc_deliver_payload()
971 ch->number); xpc_deliver_payload()
974 ch->func(xpMsgReceived, ch->partid, ch->number, payload, xpc_deliver_payload()
975 ch->key); xpc_deliver_payload()
977 dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p " xpc_deliver_payload()
978 "partid=%d channel=%d\n", payload, ch->partid, xpc_deliver_payload()
979 ch->number); xpc_deliver_payload()
982 atomic_dec(&ch->kthreads_active); xpc_deliver_payload()
1004 struct xpc_channel *ch; xpc_initiate_received() local
1009 ch = &part->channels[ch_number]; xpc_initiate_received()
1010 xpc_arch_ops.received_payload(ch, payload); xpc_initiate_received()
1013 xpc_msgqueue_deref(ch); xpc_initiate_received()
H A Dxpc_sn2.c345 xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, xpc_send_notify_IRQ_sn2() argument
348 struct xpc_partition *part = &xpc_partitions[ch->partid]; xpc_send_notify_IRQ_sn2()
354 chctl.flags[ch->number] = chctl_flag; xpc_send_notify_IRQ_sn2()
361 chctl_flag_string, ch->partid, ch->number, ret); xpc_send_notify_IRQ_sn2()
364 spin_unlock_irqrestore(&ch->lock, *irq_flags); xpc_send_notify_IRQ_sn2()
367 spin_lock_irqsave(&ch->lock, *irq_flags); xpc_send_notify_IRQ_sn2()
381 xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag, xpc_send_local_notify_IRQ_sn2() argument
384 struct xpc_partition *part = &xpc_partitions[ch->partid]; xpc_send_local_notify_IRQ_sn2()
387 chctl.flags[ch->number] = chctl_flag; xpc_send_local_notify_IRQ_sn2()
391 chctl_flag_string, ch->partid, ch->number); xpc_send_local_notify_IRQ_sn2()
398 xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch, xpc_send_chctl_closerequest_sn2() argument
401 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; xpc_send_chctl_closerequest_sn2()
403 args->reason = ch->reason; xpc_send_chctl_closerequest_sn2()
404 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags); xpc_send_chctl_closerequest_sn2()
408 xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) xpc_send_chctl_closereply_sn2() argument
410 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags); xpc_send_chctl_closereply_sn2()
414 xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) xpc_send_chctl_openrequest_sn2() argument
416 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; xpc_send_chctl_openrequest_sn2()
418 args->entry_size = ch->entry_size; xpc_send_chctl_openrequest_sn2()
419 args->local_nentries = ch->local_nentries; xpc_send_chctl_openrequest_sn2()
420 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags); xpc_send_chctl_openrequest_sn2()
424 xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) xpc_send_chctl_openreply_sn2() argument
426 struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; xpc_send_chctl_openreply_sn2()
428 args->remote_nentries = ch->remote_nentries; xpc_send_chctl_openreply_sn2()
429 args->local_nentries = ch->local_nentries; xpc_send_chctl_openreply_sn2()
430 args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue); xpc_send_chctl_openreply_sn2()
431 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); xpc_send_chctl_openreply_sn2()
435 xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch, xpc_send_chctl_opencomplete_sn2() argument
438 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags); xpc_send_chctl_opencomplete_sn2()
442 xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch) xpc_send_chctl_msgrequest_sn2() argument
444 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL); xpc_send_chctl_msgrequest_sn2()
448 xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch) xpc_send_chctl_local_msgrequest_sn2() argument
450 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); xpc_send_chctl_local_msgrequest_sn2()
454 xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch, xpc_save_remote_msgqueue_pa_sn2() argument
457 ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa; xpc_save_remote_msgqueue_pa_sn2()
1273 /* setup of ch structures failed */ xpc_setup_ch_structures_sn2()
1561 xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) xpc_allocate_local_msgqueue_sn2() argument
1563 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_allocate_local_msgqueue_sn2()
1568 for (nentries = ch->local_nentries; nentries > 0; nentries--) { xpc_allocate_local_msgqueue_sn2()
1570 nbytes = nentries * ch->entry_size; xpc_allocate_local_msgqueue_sn2()
1585 spin_lock_irqsave(&ch->lock, irq_flags); xpc_allocate_local_msgqueue_sn2()
1586 if (nentries < ch->local_nentries) { xpc_allocate_local_msgqueue_sn2()
1589 ch->local_nentries, ch->partid, ch->number); xpc_allocate_local_msgqueue_sn2()
1591 ch->local_nentries = nentries; xpc_allocate_local_msgqueue_sn2()
1593 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_allocate_local_msgqueue_sn2()
1598 "queue, partid=%d, channel=%d\n", ch->partid, ch->number); xpc_allocate_local_msgqueue_sn2()
1606 xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) xpc_allocate_remote_msgqueue_sn2() argument
1608 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_allocate_remote_msgqueue_sn2()
1613 DBUG_ON(ch->remote_nentries <= 0); xpc_allocate_remote_msgqueue_sn2()
1615 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { xpc_allocate_remote_msgqueue_sn2()
1617 nbytes = nentries * ch->entry_size; xpc_allocate_remote_msgqueue_sn2()
1624 spin_lock_irqsave(&ch->lock, irq_flags); xpc_allocate_remote_msgqueue_sn2()
1625 if (nentries < ch->remote_nentries) { xpc_allocate_remote_msgqueue_sn2()
1628 ch->remote_nentries, ch->partid, ch->number); xpc_allocate_remote_msgqueue_sn2()
1630 ch->remote_nentries = nentries; xpc_allocate_remote_msgqueue_sn2()
1632 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_allocate_remote_msgqueue_sn2()
1637 "partid=%d, channel=%d\n", ch->partid, ch->number); xpc_allocate_remote_msgqueue_sn2()
1647 xpc_setup_msg_structures_sn2(struct xpc_channel *ch) xpc_setup_msg_structures_sn2() argument
1649 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_setup_msg_structures_sn2()
1652 DBUG_ON(ch->flags & XPC_C_SETUP); xpc_setup_msg_structures_sn2()
1654 ret = xpc_allocate_local_msgqueue_sn2(ch); xpc_setup_msg_structures_sn2()
1657 ret = xpc_allocate_remote_msgqueue_sn2(ch); xpc_setup_msg_structures_sn2()
1673 xpc_teardown_msg_structures_sn2(struct xpc_channel *ch) xpc_teardown_msg_structures_sn2() argument
1675 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_teardown_msg_structures_sn2()
1677 DBUG_ON(!spin_is_locked(&ch->lock)); xpc_teardown_msg_structures_sn2()
1691 if (ch->flags & XPC_C_SETUP) { xpc_teardown_msg_structures_sn2()
1692 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", xpc_teardown_msg_structures_sn2()
1693 ch->flags, ch->partid, ch->number); xpc_teardown_msg_structures_sn2()
1708 xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put) xpc_notify_senders_sn2() argument
1712 s64 get = ch->sn.sn2.w_remote_GP.get - 1; xpc_notify_senders_sn2()
1714 while (++get < put && atomic_read(&ch->n_to_notify) > 0) { xpc_notify_senders_sn2()
1716 notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries]; xpc_notify_senders_sn2()
1732 atomic_dec(&ch->n_to_notify); xpc_notify_senders_sn2()
1737 (void *)notify, get, ch->partid, ch->number); xpc_notify_senders_sn2()
1739 notify->func(reason, ch->partid, ch->number, xpc_notify_senders_sn2()
1744 (void *)notify, get, ch->partid, ch->number); xpc_notify_senders_sn2()
1750 xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch) xpc_notify_senders_of_disconnect_sn2() argument
1752 xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put); xpc_notify_senders_of_disconnect_sn2()
1759 xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch) xpc_clear_local_msgqueue_flags_sn2() argument
1761 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_clear_local_msgqueue_flags_sn2()
1768 (get % ch->local_nentries) * xpc_clear_local_msgqueue_flags_sn2()
1769 ch->entry_size); xpc_clear_local_msgqueue_flags_sn2()
1779 xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) xpc_clear_remote_msgqueue_flags_sn2() argument
1781 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_clear_remote_msgqueue_flags_sn2()
1783 s64 put, remote_nentries = ch->remote_nentries; xpc_clear_remote_msgqueue_flags_sn2()
1793 ch->entry_size); xpc_clear_remote_msgqueue_flags_sn2()
1802 xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch) xpc_n_of_deliverable_payloads_sn2() argument
1804 return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get; xpc_n_of_deliverable_payloads_sn2()
1810 struct xpc_channel *ch = &part->channels[ch_number]; xpc_process_msg_chctl_flags_sn2() local
1811 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_process_msg_chctl_flags_sn2()
1818 xpc_msgqueue_ref(ch); xpc_process_msg_chctl_flags_sn2()
1823 xpc_msgqueue_deref(ch); xpc_process_msg_chctl_flags_sn2()
1827 if (!(ch->flags & XPC_C_CONNECTED)) { xpc_process_msg_chctl_flags_sn2()
1828 xpc_msgqueue_deref(ch); xpc_process_msg_chctl_flags_sn2()
1847 if (atomic_read(&ch->n_to_notify) > 0) { xpc_process_msg_chctl_flags_sn2()
1852 xpc_notify_senders_sn2(ch, xpMsgDelivered, xpc_process_msg_chctl_flags_sn2()
1860 xpc_clear_local_msgqueue_flags_sn2(ch); xpc_process_msg_chctl_flags_sn2()
1865 "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid, xpc_process_msg_chctl_flags_sn2()
1866 ch->number); xpc_process_msg_chctl_flags_sn2()
1872 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) xpc_process_msg_chctl_flags_sn2()
1873 wake_up(&ch->msg_allocate_wq); xpc_process_msg_chctl_flags_sn2()
1886 xpc_clear_remote_msgqueue_flags_sn2(ch); xpc_process_msg_chctl_flags_sn2()
1892 "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid, xpc_process_msg_chctl_flags_sn2()
1893 ch->number); xpc_process_msg_chctl_flags_sn2()
1895 npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch); xpc_process_msg_chctl_flags_sn2()
1899 npayloads_sent, ch->partid, ch->number); xpc_process_msg_chctl_flags_sn2()
1901 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) xpc_process_msg_chctl_flags_sn2()
1902 xpc_activate_kthreads(ch, npayloads_sent); xpc_process_msg_chctl_flags_sn2()
1906 xpc_msgqueue_deref(ch); xpc_process_msg_chctl_flags_sn2()
1910 xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) xpc_pull_remote_msg_sn2() argument
1912 struct xpc_partition *part = &xpc_partitions[ch->partid]; xpc_pull_remote_msg_sn2()
1913 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_pull_remote_msg_sn2()
1930 msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries; xpc_pull_remote_msg_sn2()
1934 if (msg_index + nmsgs > ch->remote_nentries) { xpc_pull_remote_msg_sn2()
1936 nmsgs = ch->remote_nentries - msg_index; xpc_pull_remote_msg_sn2()
1939 msg_offset = msg_index * ch->entry_size; xpc_pull_remote_msg_sn2()
1945 nmsgs * ch->entry_size); xpc_pull_remote_msg_sn2()
1951 ch->partid, ch->number, ret); xpc_pull_remote_msg_sn2()
1965 msg_offset = (get % ch->remote_nentries) * ch->entry_size; xpc_pull_remote_msg_sn2()
1975 xpc_get_deliverable_payload_sn2(struct xpc_channel *ch) xpc_get_deliverable_payload_sn2() argument
1977 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_get_deliverable_payload_sn2()
1983 if (ch->flags & XPC_C_DISCONNECTING) xpc_get_deliverable_payload_sn2()
2003 ch->partid, ch->number); xpc_get_deliverable_payload_sn2()
2007 msg = xpc_pull_remote_msg_sn2(ch, get); xpc_get_deliverable_payload_sn2()
2030 xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) xpc_send_msgs_sn2() argument
2032 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_send_msgs_sn2()
2045 ch->local_nentries) * xpc_send_msgs_sn2()
2046 ch->entry_size); xpc_send_msgs_sn2()
2069 "channel=%d\n", put, ch->partid, ch->number); xpc_send_msgs_sn2()
2082 xpc_send_chctl_msgrequest_sn2(ch); xpc_send_msgs_sn2()
2090 xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, xpc_allocate_msg_sn2() argument
2093 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_allocate_msg_sn2()
2109 if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) { xpc_allocate_msg_sn2()
2135 xpc_send_chctl_local_msgrequest_sn2(ch); xpc_allocate_msg_sn2()
2140 ret = xpc_allocate_msg_wait(ch); xpc_allocate_msg_sn2()
2147 (put % ch->local_nentries) * xpc_allocate_msg_sn2()
2148 ch->entry_size); xpc_allocate_msg_sn2()
2155 (void *)msg, msg->number, ch->partid, ch->number); xpc_allocate_msg_sn2()
2167 xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload, xpc_send_payload_sn2() argument
2172 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_send_payload_sn2()
2180 if (XPC_MSG_SIZE(payload_size) > ch->entry_size) xpc_send_payload_sn2()
2183 xpc_msgqueue_ref(ch); xpc_send_payload_sn2()
2185 if (ch->flags & XPC_C_DISCONNECTING) { xpc_send_payload_sn2()
2186 ret = ch->reason; xpc_send_payload_sn2()
2189 if (!(ch->flags & XPC_C_CONNECTED)) { xpc_send_payload_sn2()
2194 ret = xpc_allocate_msg_sn2(ch, flags, &msg); xpc_send_payload_sn2()
2207 atomic_inc(&ch->n_to_notify); xpc_send_payload_sn2()
2209 notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries]; xpc_send_payload_sn2()
2216 if (ch->flags & XPC_C_DISCONNECTING) { xpc_send_payload_sn2()
2226 atomic_dec(&ch->n_to_notify); xpc_send_payload_sn2()
2227 ret = ch->reason; xpc_send_payload_sn2()
2247 xpc_send_msgs_sn2(ch, put); xpc_send_payload_sn2()
2250 xpc_msgqueue_deref(ch); xpc_send_payload_sn2()
2263 xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) xpc_acknowledge_msgs_sn2() argument
2265 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; xpc_acknowledge_msgs_sn2()
2278 ch->remote_nentries) * xpc_acknowledge_msgs_sn2()
2279 ch->entry_size); xpc_acknowledge_msgs_sn2()
2303 "channel=%d\n", get, ch->partid, ch->number); xpc_acknowledge_msgs_sn2()
2316 xpc_send_chctl_msgrequest_sn2(ch); xpc_acknowledge_msgs_sn2()
2320 xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) xpc_received_payload_sn2() argument
2330 (void *)msg, msg_number, ch->partid, ch->number); xpc_received_payload_sn2()
2332 DBUG_ON((((u64)msg - (u64)ch->sn.sn2.remote_msgqueue) / ch->entry_size) != xpc_received_payload_sn2()
2333 msg_number % ch->remote_nentries); xpc_received_payload_sn2()
2349 get = ch->sn.sn2.local_GP->get; xpc_received_payload_sn2()
2351 xpc_acknowledge_msgs_sn2(ch, get, msg->flags); xpc_received_payload_sn2()
H A Dxpc_main.c412 struct xpc_channel *ch; xpc_setup_ch_structures() local
451 ch = &part->channels[ch_number]; xpc_setup_ch_structures()
453 ch->partid = partid; xpc_setup_ch_structures()
454 ch->number = ch_number; xpc_setup_ch_structures()
455 ch->flags = XPC_C_DISCONNECTED; xpc_setup_ch_structures()
457 atomic_set(&ch->kthreads_assigned, 0); xpc_setup_ch_structures()
458 atomic_set(&ch->kthreads_idle, 0); xpc_setup_ch_structures()
459 atomic_set(&ch->kthreads_active, 0); xpc_setup_ch_structures()
461 atomic_set(&ch->references, 0); xpc_setup_ch_structures()
462 atomic_set(&ch->n_to_notify, 0); xpc_setup_ch_structures()
464 spin_lock_init(&ch->lock); xpc_setup_ch_structures()
465 init_completion(&ch->wdisconnect_wait); xpc_setup_ch_structures()
467 atomic_set(&ch->n_on_msg_allocate_wq, 0); xpc_setup_ch_structures()
468 init_waitqueue_head(&ch->msg_allocate_wq); xpc_setup_ch_structures()
469 init_waitqueue_head(&ch->idle_wq); xpc_setup_ch_structures()
484 /* setup of ch structures failed */ xpc_setup_ch_structures()
617 xpc_activate_kthreads(struct xpc_channel *ch, int needed) xpc_activate_kthreads() argument
619 int idle = atomic_read(&ch->kthreads_idle); xpc_activate_kthreads()
620 int assigned = atomic_read(&ch->kthreads_assigned); xpc_activate_kthreads()
630 "channel=%d\n", wakeup, ch->partid, ch->number); xpc_activate_kthreads()
633 wake_up_nr(&ch->idle_wq, wakeup); xpc_activate_kthreads()
639 if (needed + assigned > ch->kthreads_assigned_limit) { xpc_activate_kthreads()
640 needed = ch->kthreads_assigned_limit - assigned; xpc_activate_kthreads()
646 needed, ch->partid, ch->number); xpc_activate_kthreads()
648 xpc_create_kthreads(ch, needed, 0); xpc_activate_kthreads()
655 xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) xpc_kthread_waitmsgs() argument
663 while (n_of_deliverable_payloads(ch) > 0 && xpc_kthread_waitmsgs()
664 !(ch->flags & XPC_C_DISCONNECTING)) { xpc_kthread_waitmsgs()
665 xpc_deliver_payload(ch); xpc_kthread_waitmsgs()
668 if (atomic_inc_return(&ch->kthreads_idle) > xpc_kthread_waitmsgs()
669 ch->kthreads_idle_limit) { xpc_kthread_waitmsgs()
671 atomic_dec(&ch->kthreads_idle); xpc_kthread_waitmsgs()
678 (void)wait_event_interruptible_exclusive(ch->idle_wq, xpc_kthread_waitmsgs()
679 (n_of_deliverable_payloads(ch) > 0 || xpc_kthread_waitmsgs()
680 (ch->flags & XPC_C_DISCONNECTING))); xpc_kthread_waitmsgs()
682 atomic_dec(&ch->kthreads_idle); xpc_kthread_waitmsgs()
684 } while (!(ch->flags & XPC_C_DISCONNECTING)); xpc_kthread_waitmsgs()
693 struct xpc_channel *ch; xpc_kthread_start() local
702 ch = &part->channels[ch_number]; xpc_kthread_start()
704 if (!(ch->flags & XPC_C_DISCONNECTING)) { xpc_kthread_start()
708 spin_lock_irqsave(&ch->lock, irq_flags); xpc_kthread_start()
709 if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { xpc_kthread_start()
710 ch->flags |= XPC_C_CONNECTEDCALLOUT; xpc_kthread_start()
711 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_kthread_start()
713 xpc_connected_callout(ch); xpc_kthread_start()
715 spin_lock_irqsave(&ch->lock, irq_flags); xpc_kthread_start()
716 ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; xpc_kthread_start()
717 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_kthread_start()
726 n_needed = n_of_deliverable_payloads(ch) - 1; xpc_kthread_start()
727 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) xpc_kthread_start()
728 xpc_activate_kthreads(ch, n_needed); xpc_kthread_start()
731 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_kthread_start()
734 xpc_kthread_waitmsgs(part, ch); xpc_kthread_start()
739 spin_lock_irqsave(&ch->lock, irq_flags); xpc_kthread_start()
740 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && xpc_kthread_start()
741 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { xpc_kthread_start()
742 ch->flags |= XPC_C_DISCONNECTINGCALLOUT; xpc_kthread_start()
743 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_kthread_start()
745 xpc_disconnect_callout(ch, xpDisconnecting); xpc_kthread_start()
747 spin_lock_irqsave(&ch->lock, irq_flags); xpc_kthread_start()
748 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; xpc_kthread_start()
750 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_kthread_start()
752 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && xpc_kthread_start()
757 xpc_msgqueue_deref(ch); xpc_kthread_start()
779 xpc_create_kthreads(struct xpc_channel *ch, int needed, xpc_create_kthreads() argument
783 u64 args = XPC_PACK_ARGS(ch->partid, ch->number); xpc_create_kthreads()
784 struct xpc_partition *part = &xpc_partitions[ch->partid]; xpc_create_kthreads()
797 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { xpc_create_kthreads()
799 BUG_ON(!(ch->flags & xpc_create_kthreads()
804 } else if (ch->flags & XPC_C_DISCONNECTING) { xpc_create_kthreads()
807 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && xpc_create_kthreads()
812 xpc_msgqueue_ref(ch); xpc_create_kthreads()
815 "xpc%02dc%d", ch->partid, ch->number); xpc_create_kthreads()
821 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, xpc_create_kthreads()
829 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && xpc_create_kthreads()
833 xpc_msgqueue_deref(ch); xpc_create_kthreads()
836 if (atomic_read(&ch->kthreads_assigned) < xpc_create_kthreads()
837 ch->kthreads_idle_limit) { xpc_create_kthreads()
843 spin_lock_irqsave(&ch->lock, irq_flags); xpc_create_kthreads()
844 XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, xpc_create_kthreads()
846 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_create_kthreads()
859 struct xpc_channel *ch; xpc_disconnect_wait() local
869 ch = &part->channels[ch_number]; xpc_disconnect_wait()
871 if (!(ch->flags & XPC_C_WDISCONNECT)) { xpc_disconnect_wait()
876 wait_for_completion(&ch->wdisconnect_wait); xpc_disconnect_wait()
878 spin_lock_irqsave(&ch->lock, irq_flags); xpc_disconnect_wait()
879 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); xpc_disconnect_wait()
882 if (ch->delayed_chctl_flags) { xpc_disconnect_wait()
885 part->chctl.flags[ch->number] |= xpc_disconnect_wait()
886 ch->delayed_chctl_flags; xpc_disconnect_wait()
890 ch->delayed_chctl_flags = 0; xpc_disconnect_wait()
893 ch->flags &= ~XPC_C_WDISCONNECT; xpc_disconnect_wait()
894 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_disconnect_wait()
H A Dxpc_uv.c714 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ xpc_send_activate_IRQ_uv()
739 xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, xpc_send_activate_IRQ_ch_uv() argument
742 struct xpc_partition *part = &xpc_partitions[ch->partid]; xpc_send_activate_IRQ_ch_uv()
748 spin_unlock_irqrestore(&ch->lock, *irq_flags); xpc_send_activate_IRQ_ch_uv()
753 spin_lock_irqsave(&ch->lock, *irq_flags); xpc_send_activate_IRQ_ch_uv()
1082 xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) xpc_allocate_send_msg_slot_uv() argument
1084 struct xpc_channel_uv *ch_uv = &ch->sn.uv; xpc_allocate_send_msg_slot_uv()
1091 for (nentries = ch->local_nentries; nentries > 0; nentries--) { xpc_allocate_send_msg_slot_uv()
1105 spin_lock_irqsave(&ch->lock, irq_flags); xpc_allocate_send_msg_slot_uv()
1106 if (nentries < ch->local_nentries) xpc_allocate_send_msg_slot_uv()
1107 ch->local_nentries = nentries; xpc_allocate_send_msg_slot_uv()
1108 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_allocate_send_msg_slot_uv()
1116 xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) xpc_allocate_recv_msg_slot_uv() argument
1118 struct xpc_channel_uv *ch_uv = &ch->sn.uv; xpc_allocate_recv_msg_slot_uv()
1125 for (nentries = ch->remote_nentries; nentries > 0; nentries--) { xpc_allocate_recv_msg_slot_uv()
1126 nbytes = nentries * ch->entry_size; xpc_allocate_recv_msg_slot_uv()
1133 entry * ch->entry_size; xpc_allocate_recv_msg_slot_uv()
1138 spin_lock_irqsave(&ch->lock, irq_flags); xpc_allocate_recv_msg_slot_uv()
1139 if (nentries < ch->remote_nentries) xpc_allocate_recv_msg_slot_uv()
1140 ch->remote_nentries = nentries; xpc_allocate_recv_msg_slot_uv()
1141 spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_allocate_recv_msg_slot_uv()
1152 xpc_setup_msg_structures_uv(struct xpc_channel *ch) xpc_setup_msg_structures_uv() argument
1155 struct xpc_channel_uv *ch_uv = &ch->sn.uv; xpc_setup_msg_structures_uv()
1157 DBUG_ON(ch->flags & XPC_C_SETUP); xpc_setup_msg_structures_uv()
1165 ret = xpc_allocate_send_msg_slot_uv(ch); xpc_setup_msg_structures_uv()
1168 ret = xpc_allocate_recv_msg_slot_uv(ch); xpc_setup_msg_structures_uv()
1182 xpc_teardown_msg_structures_uv(struct xpc_channel *ch) xpc_teardown_msg_structures_uv() argument
1184 struct xpc_channel_uv *ch_uv = &ch->sn.uv; xpc_teardown_msg_structures_uv()
1186 DBUG_ON(!spin_is_locked(&ch->lock)); xpc_teardown_msg_structures_uv()
1191 if (ch->flags & XPC_C_SETUP) { xpc_teardown_msg_structures_uv()
1200 xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) xpc_send_chctl_closerequest_uv() argument
1204 msg.ch_number = ch->number; xpc_send_chctl_closerequest_uv()
1205 msg.reason = ch->reason; xpc_send_chctl_closerequest_uv()
1206 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_closerequest_uv()
1211 xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) xpc_send_chctl_closereply_uv() argument
1215 msg.ch_number = ch->number; xpc_send_chctl_closereply_uv()
1216 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_closereply_uv()
1221 xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) xpc_send_chctl_openrequest_uv() argument
1225 msg.ch_number = ch->number; xpc_send_chctl_openrequest_uv()
1226 msg.entry_size = ch->entry_size; xpc_send_chctl_openrequest_uv()
1227 msg.local_nentries = ch->local_nentries; xpc_send_chctl_openrequest_uv()
1228 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_openrequest_uv()
1233 xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) xpc_send_chctl_openreply_uv() argument
1237 msg.ch_number = ch->number; xpc_send_chctl_openreply_uv()
1238 msg.local_nentries = ch->local_nentries; xpc_send_chctl_openreply_uv()
1239 msg.remote_nentries = ch->remote_nentries; xpc_send_chctl_openreply_uv()
1241 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_openreply_uv()
1246 xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) xpc_send_chctl_opencomplete_uv() argument
1250 msg.ch_number = ch->number; xpc_send_chctl_opencomplete_uv()
1251 xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), xpc_send_chctl_opencomplete_uv()
1268 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, xpc_save_remote_msgqueue_pa_uv() argument
1271 struct xpc_channel_uv *ch_uv = &ch->sn.uv; xpc_save_remote_msgqueue_pa_uv()
1328 xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, xpc_allocate_msg_slot_uv() argument
1336 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); xpc_allocate_msg_slot_uv()
1343 ret = xpc_allocate_msg_wait(ch); xpc_allocate_msg_slot_uv()
1354 xpc_free_msg_slot_uv(struct xpc_channel *ch, xpc_free_msg_slot_uv() argument
1357 xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); xpc_free_msg_slot_uv()
1360 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) xpc_free_msg_slot_uv()
1361 wake_up(&ch->msg_allocate_wq); xpc_free_msg_slot_uv()
1365 xpc_notify_sender_uv(struct xpc_channel *ch, xpc_notify_sender_uv() argument
1373 atomic_dec(&ch->n_to_notify); xpc_notify_sender_uv()
1377 msg_slot->msg_slot_number, ch->partid, ch->number); xpc_notify_sender_uv()
1379 func(reason, ch->partid, ch->number, msg_slot->key); xpc_notify_sender_uv()
1383 msg_slot->msg_slot_number, ch->partid, ch->number); xpc_notify_sender_uv()
1388 xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, xpc_handle_notify_mq_ack_uv() argument
1392 int entry = msg->hdr.msg_slot_number % ch->local_nentries; xpc_handle_notify_mq_ack_uv()
1394 msg_slot = &ch->sn.uv.send_msg_slots[entry]; xpc_handle_notify_mq_ack_uv()
1397 msg_slot->msg_slot_number += ch->local_nentries; xpc_handle_notify_mq_ack_uv()
1400 xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); xpc_handle_notify_mq_ack_uv()
1402 xpc_free_msg_slot_uv(ch, msg_slot); xpc_handle_notify_mq_ack_uv()
1410 struct xpc_channel *ch; xpc_handle_notify_mq_msg_uv() local
1433 ch = &part->channels[ch_number]; xpc_handle_notify_mq_msg_uv()
1434 xpc_msgqueue_ref(ch); xpc_handle_notify_mq_msg_uv()
1436 if (!(ch->flags & XPC_C_CONNECTED)) { xpc_handle_notify_mq_msg_uv()
1437 xpc_msgqueue_deref(ch); xpc_handle_notify_mq_msg_uv()
1443 xpc_handle_notify_mq_ack_uv(ch, msg); xpc_handle_notify_mq_msg_uv()
1444 xpc_msgqueue_deref(ch); xpc_handle_notify_mq_msg_uv()
1449 ch_uv = &ch->sn.uv; xpc_handle_notify_mq_msg_uv()
1452 (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; xpc_handle_notify_mq_msg_uv()
1460 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { xpc_handle_notify_mq_msg_uv()
1466 if (atomic_read(&ch->kthreads_idle) > 0) xpc_handle_notify_mq_msg_uv()
1467 wake_up_nr(&ch->idle_wq, 1); xpc_handle_notify_mq_msg_uv()
1469 xpc_send_chctl_local_msgrequest_uv(part, ch->number); xpc_handle_notify_mq_msg_uv()
1471 xpc_msgqueue_deref(ch); xpc_handle_notify_mq_msg_uv()
1504 xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) xpc_n_of_deliverable_payloads_uv() argument
1506 return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); xpc_n_of_deliverable_payloads_uv()
1512 struct xpc_channel *ch = &part->channels[ch_number]; xpc_process_msg_chctl_flags_uv() local
1515 xpc_msgqueue_ref(ch); xpc_process_msg_chctl_flags_uv()
1517 ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); xpc_process_msg_chctl_flags_uv()
1520 (ch->flags & XPC_C_CONNECTED) && xpc_process_msg_chctl_flags_uv()
1521 (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { xpc_process_msg_chctl_flags_uv()
1523 xpc_activate_kthreads(ch, ndeliverable_payloads); xpc_process_msg_chctl_flags_uv()
1526 xpc_msgqueue_deref(ch); xpc_process_msg_chctl_flags_uv()
1530 xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, xpc_send_payload_uv() argument
1543 if (msg_size > ch->entry_size) xpc_send_payload_uv()
1546 xpc_msgqueue_ref(ch); xpc_send_payload_uv()
1548 if (ch->flags & XPC_C_DISCONNECTING) { xpc_send_payload_uv()
1549 ret = ch->reason; xpc_send_payload_uv()
1552 if (!(ch->flags & XPC_C_CONNECTED)) { xpc_send_payload_uv()
1557 ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); xpc_send_payload_uv()
1562 atomic_inc(&ch->n_to_notify); xpc_send_payload_uv()
1568 if (ch->flags & XPC_C_DISCONNECTING) { xpc_send_payload_uv()
1569 ret = ch->reason; xpc_send_payload_uv()
1576 msg->hdr.ch_number = ch->number; xpc_send_payload_uv()
1581 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, xpc_send_payload_uv()
1586 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); xpc_send_payload_uv()
1605 atomic_dec(&ch->n_to_notify); xpc_send_payload_uv()
1607 xpc_free_msg_slot_uv(ch, msg_slot); xpc_send_payload_uv()
1609 xpc_msgqueue_deref(ch); xpc_send_payload_uv()
1621 xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) xpc_notify_senders_of_disconnect_uv() argument
1626 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); xpc_notify_senders_of_disconnect_uv()
1628 for (entry = 0; entry < ch->local_nentries; entry++) { xpc_notify_senders_of_disconnect_uv()
1630 if (atomic_read(&ch->n_to_notify) == 0) xpc_notify_senders_of_disconnect_uv()
1633 msg_slot = &ch->sn.uv.send_msg_slots[entry]; xpc_notify_senders_of_disconnect_uv()
1635 xpc_notify_sender_uv(ch, msg_slot, ch->reason); xpc_notify_senders_of_disconnect_uv()
1643 xpc_get_deliverable_payload_uv(struct xpc_channel *ch) xpc_get_deliverable_payload_uv() argument
1649 if (!(ch->flags & XPC_C_DISCONNECTING)) { xpc_get_deliverable_payload_uv()
1650 entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); xpc_get_deliverable_payload_uv()
1661 xpc_received_payload_uv(struct xpc_channel *ch, void *payload) xpc_received_payload_uv() argument
1673 ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, xpc_received_payload_uv()
1676 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); xpc_received_payload_uv()
/linux-4.1.27/drivers/tty/serial/jsm/
H A Djsm_neo.c44 static void neo_set_cts_flow_control(struct jsm_channel *ch) neo_set_cts_flow_control() argument
47 ier = readb(&ch->ch_neo_uart->ier); neo_set_cts_flow_control()
48 efr = readb(&ch->ch_neo_uart->efr); neo_set_cts_flow_control()
50 jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Setting CTSFLOW\n"); neo_set_cts_flow_control()
60 writeb(0, &ch->ch_neo_uart->efr); neo_set_cts_flow_control()
63 writeb(efr, &ch->ch_neo_uart->efr); neo_set_cts_flow_control()
66 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); neo_set_cts_flow_control()
69 writeb(8, &ch->ch_neo_uart->tfifo); neo_set_cts_flow_control()
70 ch->ch_t_tlevel = 8; neo_set_cts_flow_control()
72 writeb(ier, &ch->ch_neo_uart->ier); neo_set_cts_flow_control()
75 static void neo_set_rts_flow_control(struct jsm_channel *ch) neo_set_rts_flow_control() argument
78 ier = readb(&ch->ch_neo_uart->ier); neo_set_rts_flow_control()
79 efr = readb(&ch->ch_neo_uart->efr); neo_set_rts_flow_control()
81 jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Setting RTSFLOW\n"); neo_set_rts_flow_control()
92 writeb(0, &ch->ch_neo_uart->efr); neo_set_rts_flow_control()
95 writeb(efr, &ch->ch_neo_uart->efr); neo_set_rts_flow_control()
97 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr); neo_set_rts_flow_control()
98 ch->ch_r_watermark = 4; neo_set_rts_flow_control()
100 writeb(56, &ch->ch_neo_uart->rfifo); neo_set_rts_flow_control()
101 ch->ch_r_tlevel = 56; neo_set_rts_flow_control()
103 writeb(ier, &ch->ch_neo_uart->ier); neo_set_rts_flow_control()
111 ch->ch_mostat |= (UART_MCR_RTS); neo_set_rts_flow_control()
115 static void neo_set_ixon_flow_control(struct jsm_channel *ch) neo_set_ixon_flow_control() argument
118 ier = readb(&ch->ch_neo_uart->ier); neo_set_ixon_flow_control()
119 efr = readb(&ch->ch_neo_uart->efr); neo_set_ixon_flow_control()
121 jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Setting IXON FLOW\n"); neo_set_ixon_flow_control()
131 writeb(0, &ch->ch_neo_uart->efr); neo_set_ixon_flow_control()
134 writeb(efr, &ch->ch_neo_uart->efr); neo_set_ixon_flow_control()
136 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); neo_set_ixon_flow_control()
137 ch->ch_r_watermark = 4; neo_set_ixon_flow_control()
139 writeb(32, &ch->ch_neo_uart->rfifo); neo_set_ixon_flow_control()
140 ch->ch_r_tlevel = 32; neo_set_ixon_flow_control()
143 writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); neo_set_ixon_flow_control()
144 writeb(0, &ch->ch_neo_uart->xonchar2); neo_set_ixon_flow_control()
146 writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); neo_set_ixon_flow_control()
147 writeb(0, &ch->ch_neo_uart->xoffchar2); neo_set_ixon_flow_control()
149 writeb(ier, &ch->ch_neo_uart->ier); neo_set_ixon_flow_control()
152 static void neo_set_ixoff_flow_control(struct jsm_channel *ch) neo_set_ixoff_flow_control() argument
155 ier = readb(&ch->ch_neo_uart->ier); neo_set_ixoff_flow_control()
156 efr = readb(&ch->ch_neo_uart->efr); neo_set_ixoff_flow_control()
158 jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Setting IXOFF FLOW\n"); neo_set_ixoff_flow_control()
169 writeb(0, &ch->ch_neo_uart->efr); neo_set_ixoff_flow_control()
172 writeb(efr, &ch->ch_neo_uart->efr); neo_set_ixoff_flow_control()
175 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); neo_set_ixoff_flow_control()
177 writeb(8, &ch->ch_neo_uart->tfifo); neo_set_ixoff_flow_control()
178 ch->ch_t_tlevel = 8; neo_set_ixoff_flow_control()
181 writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); neo_set_ixoff_flow_control()
182 writeb(0, &ch->ch_neo_uart->xonchar2); neo_set_ixoff_flow_control()
184 writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); neo_set_ixoff_flow_control()
185 writeb(0, &ch->ch_neo_uart->xoffchar2); neo_set_ixoff_flow_control()
187 writeb(ier, &ch->ch_neo_uart->ier); neo_set_ixoff_flow_control()
190 static void neo_set_no_input_flow_control(struct jsm_channel *ch) neo_set_no_input_flow_control() argument
193 ier = readb(&ch->ch_neo_uart->ier); neo_set_no_input_flow_control()
194 efr = readb(&ch->ch_neo_uart->efr); neo_set_no_input_flow_control()
196 jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Unsetting Input FLOW\n"); neo_set_no_input_flow_control()
204 if (ch->ch_c_iflag & IXON) neo_set_no_input_flow_control()
210 writeb(0, &ch->ch_neo_uart->efr); neo_set_no_input_flow_control()
213 writeb(efr, &ch->ch_neo_uart->efr); neo_set_no_input_flow_control()
216 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); neo_set_no_input_flow_control()
218 ch->ch_r_watermark = 0; neo_set_no_input_flow_control()
220 writeb(16, &ch->ch_neo_uart->tfifo); neo_set_no_input_flow_control()
221 ch->ch_t_tlevel = 16; neo_set_no_input_flow_control()
223 writeb(16, &ch->ch_neo_uart->rfifo); neo_set_no_input_flow_control()
224 ch->ch_r_tlevel = 16; neo_set_no_input_flow_control()
226 writeb(ier, &ch->ch_neo_uart->ier); neo_set_no_input_flow_control()
229 static void neo_set_no_output_flow_control(struct jsm_channel *ch) neo_set_no_output_flow_control() argument
232 ier = readb(&ch->ch_neo_uart->ier); neo_set_no_output_flow_control()
233 efr = readb(&ch->ch_neo_uart->efr); neo_set_no_output_flow_control()
235 jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "Unsetting Output FLOW\n"); neo_set_no_output_flow_control()
242 if (ch->ch_c_iflag & IXOFF) neo_set_no_output_flow_control()
248 writeb(0, &ch->ch_neo_uart->efr); neo_set_no_output_flow_control()
251 writeb(efr, &ch->ch_neo_uart->efr); neo_set_no_output_flow_control()
254 writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); neo_set_no_output_flow_control()
256 ch->ch_r_watermark = 0; neo_set_no_output_flow_control()
258 writeb(16, &ch->ch_neo_uart->tfifo); neo_set_no_output_flow_control()
259 ch->ch_t_tlevel = 16; neo_set_no_output_flow_control()
261 writeb(16, &ch->ch_neo_uart->rfifo); neo_set_no_output_flow_control()
262 ch->ch_r_tlevel = 16; neo_set_no_output_flow_control()
264 writeb(ier, &ch->ch_neo_uart->ier); neo_set_no_output_flow_control()
267 static inline void neo_set_new_start_stop_chars(struct jsm_channel *ch) neo_set_new_start_stop_chars() argument
271 if (ch->ch_c_cflag & CRTSCTS) neo_set_new_start_stop_chars()
274 jsm_dbg(PARAM, &ch->ch_bd->pci_dev, "start\n"); neo_set_new_start_stop_chars()
277 writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); neo_set_new_start_stop_chars()
278 writeb(0, &ch->ch_neo_uart->xonchar2); neo_set_new_start_stop_chars()
280 writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); neo_set_new_start_stop_chars()
281 writeb(0, &ch->ch_neo_uart->xoffchar2); neo_set_new_start_stop_chars()
284 static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch) neo_copy_data_from_uart_to_queue() argument
294 if (!ch) neo_copy_data_from_uart_to_queue()
298 head = ch->ch_r_head & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
299 tail = ch->ch_r_tail & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
302 linestatus = ch->ch_cached_lsr; neo_copy_data_from_uart_to_queue()
303 ch->ch_cached_lsr = 0; neo_copy_data_from_uart_to_queue()
316 if (!(ch->ch_flags & CH_FIFO_ENABLED)) neo_copy_data_from_uart_to_queue()
319 total = readb(&ch->ch_neo_uart->rfifo); neo_copy_data_from_uart_to_queue()
345 linestatus = readb(&ch->ch_neo_uart->lsr); neo_copy_data_from_uart_to_queue()
372 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_copy_data_from_uart_to_queue()
377 memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); neo_copy_data_from_uart_to_queue()
383 memset(ch->ch_equeue + head, 0, n); neo_copy_data_from_uart_to_queue()
389 ch->ch_rxcount += n; neo_copy_data_from_uart_to_queue()
396 if (ch->ch_c_iflag & IGNBRK) neo_copy_data_from_uart_to_queue()
409 linestatus |= readb(&ch->ch_neo_uart->lsr); neo_copy_data_from_uart_to_queue()
417 ch->ch_cached_lsr = linestatus; neo_copy_data_from_uart_to_queue()
431 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_copy_data_from_uart_to_queue()
440 memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1); neo_copy_data_from_uart_to_queue()
453 jsm_dbg(READ, &ch->ch_bd->pci_dev, neo_copy_data_from_uart_to_queue()
455 ch->ch_rqueue[tail], ch->ch_equeue[tail]); neo_copy_data_from_uart_to_queue()
457 ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
458 ch->ch_err_overrun++; neo_copy_data_from_uart_to_queue()
462 memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1); neo_copy_data_from_uart_to_queue()
463 ch->ch_equeue[head] = (u8) linestatus; neo_copy_data_from_uart_to_queue()
465 jsm_dbg(READ, &ch->ch_bd->pci_dev, "DATA/LSR pair: %x %x\n", neo_copy_data_from_uart_to_queue()
466 ch->ch_rqueue[head], ch->ch_equeue[head]); neo_copy_data_from_uart_to_queue()
475 ch->ch_rxcount++; neo_copy_data_from_uart_to_queue()
481 ch->ch_r_head = head & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
482 ch->ch_e_head = head & EQUEUEMASK; neo_copy_data_from_uart_to_queue()
483 jsm_input(ch); neo_copy_data_from_uart_to_queue()
486 static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch) neo_copy_data_from_queue_to_uart() argument
496 if (!ch) neo_copy_data_from_queue_to_uart()
499 circ = &ch->uart_port.state->xmit; neo_copy_data_from_queue_to_uart()
506 if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) neo_copy_data_from_queue_to_uart()
511 if (!(ch->ch_flags & CH_FIFO_ENABLED)) { neo_copy_data_from_queue_to_uart()
512 u8 lsrbits = readb(&ch->ch_neo_uart->lsr); neo_copy_data_from_queue_to_uart()
514 ch->ch_cached_lsr |= lsrbits; neo_copy_data_from_queue_to_uart()
515 if (ch->ch_cached_lsr & UART_LSR_THRE) { neo_copy_data_from_queue_to_uart()
516 ch->ch_cached_lsr &= ~(UART_LSR_THRE); neo_copy_data_from_queue_to_uart()
518 writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx); neo_copy_data_from_queue_to_uart()
519 jsm_dbg(WRITE, &ch->ch_bd->pci_dev, neo_copy_data_from_queue_to_uart()
522 ch->ch_txcount++; neo_copy_data_from_queue_to_uart()
530 if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) neo_copy_data_from_queue_to_uart()
533 n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel; neo_copy_data_from_queue_to_uart()
551 memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s); neo_copy_data_from_queue_to_uart()
555 ch->ch_txcount += s; neo_copy_data_from_queue_to_uart()
562 if (len_written >= ch->ch_t_tlevel) neo_copy_data_from_queue_to_uart()
563 ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_copy_data_from_queue_to_uart()
566 uart_write_wakeup(&ch->uart_port); neo_copy_data_from_queue_to_uart()
569 static void neo_parse_modem(struct jsm_channel *ch, u8 signals) neo_parse_modem() argument
573 jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, neo_parse_modem()
575 ch->ch_portnum, msignals); neo_parse_modem()
582 uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD); neo_parse_modem()
584 uart_handle_cts_change(&ch->uart_port, msignals & UART_MSR_CTS); neo_parse_modem()
586 ch->ch_mistat |= UART_MSR_DCD; neo_parse_modem()
588 ch->ch_mistat &= ~UART_MSR_DCD; neo_parse_modem()
591 ch->ch_mistat |= UART_MSR_DSR; neo_parse_modem()
593 ch->ch_mistat &= ~UART_MSR_DSR; neo_parse_modem()
596 ch->ch_mistat |= UART_MSR_RI; neo_parse_modem()
598 ch->ch_mistat &= ~UART_MSR_RI; neo_parse_modem()
601 ch->ch_mistat |= UART_MSR_CTS; neo_parse_modem()
603 ch->ch_mistat &= ~UART_MSR_CTS; neo_parse_modem()
605 jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, neo_parse_modem()
607 ch->ch_portnum, neo_parse_modem()
608 !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR), neo_parse_modem()
609 !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS), neo_parse_modem()
610 !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS), neo_parse_modem()
611 !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR), neo_parse_modem()
612 !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI), neo_parse_modem()
613 !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)); neo_parse_modem()
617 static void neo_assert_modem_signals(struct jsm_channel *ch) neo_assert_modem_signals() argument
619 if (!ch) neo_assert_modem_signals()
622 writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); neo_assert_modem_signals()
625 neo_pci_posting_flush(ch->ch_bd); neo_assert_modem_signals()
633 static void neo_flush_uart_write(struct jsm_channel *ch) neo_flush_uart_write() argument
638 if (!ch) neo_flush_uart_write()
641 writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); neo_flush_uart_write()
646 tmp = readb(&ch->ch_neo_uart->isr_fcr); neo_flush_uart_write()
648 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, neo_flush_uart_write()
656 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_flush_uart_write()
665 static void neo_flush_uart_read(struct jsm_channel *ch) neo_flush_uart_read() argument
670 if (!ch) neo_flush_uart_read()
673 writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr); neo_flush_uart_read()
678 tmp = readb(&ch->ch_neo_uart->isr_fcr); neo_flush_uart_read()
680 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, neo_flush_uart_read()
692 static void neo_clear_break(struct jsm_channel *ch) neo_clear_break() argument
696 spin_lock_irqsave(&ch->ch_lock, lock_flags); neo_clear_break()
699 if (ch->ch_flags & CH_BREAK_SENDING) { neo_clear_break()
700 u8 temp = readb(&ch->ch_neo_uart->lcr); neo_clear_break()
701 writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr); neo_clear_break()
703 ch->ch_flags &= ~(CH_BREAK_SENDING); neo_clear_break()
704 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, neo_clear_break()
709 neo_pci_posting_flush(ch->ch_bd); neo_clear_break()
711 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_clear_break()
719 struct jsm_channel *ch; neo_parse_isr() local
730 ch = brd->channels[port]; neo_parse_isr()
731 if (!ch) neo_parse_isr()
737 isr = readb(&ch->ch_neo_uart->isr_fcr); neo_parse_isr()
748 jsm_dbg(INTR, &ch->ch_bd->pci_dev, "%s:%d isr: %x\n", neo_parse_isr()
753 neo_copy_data_from_uart_to_queue(ch); neo_parse_isr()
756 spin_lock_irqsave(&ch->ch_lock, lock_flags); neo_parse_isr()
757 jsm_check_queue_flow_control(ch); neo_parse_isr()
758 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_parse_isr()
763 spin_lock_irqsave(&ch->ch_lock, lock_flags); neo_parse_isr()
764 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_parse_isr()
765 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_parse_isr()
766 neo_copy_data_from_queue_to_uart(ch); neo_parse_isr()
770 cause = readb(&ch->ch_neo_uart->xoffchar1); neo_parse_isr()
772 jsm_dbg(INTR, &ch->ch_bd->pci_dev, neo_parse_isr()
781 spin_lock_irqsave(&ch->ch_lock, lock_flags); neo_parse_isr()
785 ch->ch_flags &= ~(CH_STOP); neo_parse_isr()
787 jsm_dbg(INTR, &ch->ch_bd->pci_dev, neo_parse_isr()
793 ch->ch_flags |= CH_STOP; neo_parse_isr()
794 jsm_dbg(INTR, &ch->ch_bd->pci_dev, neo_parse_isr()
797 jsm_dbg(INTR, &ch->ch_bd->pci_dev, neo_parse_isr()
801 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_parse_isr()
809 cause = readb(&ch->ch_neo_uart->mcr); neo_parse_isr()
812 spin_lock_irqsave(&ch->ch_lock, lock_flags); neo_parse_isr()
815 ch->ch_mostat |= UART_MCR_RTS; neo_parse_isr()
817 ch->ch_mostat &= ~(UART_MCR_RTS); neo_parse_isr()
820 ch->ch_mostat |= UART_MCR_DTR; neo_parse_isr()
822 ch->ch_mostat &= ~(UART_MCR_DTR); neo_parse_isr()
824 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_parse_isr()
828 jsm_dbg(INTR, &ch->ch_bd->pci_dev, neo_parse_isr()
830 neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); neo_parse_isr()
836 struct jsm_channel *ch; neo_parse_lsr() local
846 ch = brd->channels[port]; neo_parse_lsr()
847 if (!ch) neo_parse_lsr()
850 linestatus = readb(&ch->ch_neo_uart->lsr); neo_parse_lsr()
852 jsm_dbg(INTR, &ch->ch_bd->pci_dev, "%s:%d port: %d linestatus: %x\n", neo_parse_lsr()
855 ch->ch_cached_lsr |= linestatus; neo_parse_lsr()
857 if (ch->ch_cached_lsr & UART_LSR_DR) { neo_parse_lsr()
859 neo_copy_data_from_uart_to_queue(ch); neo_parse_lsr()
860 spin_lock_irqsave(&ch->ch_lock, lock_flags); neo_parse_lsr()
861 jsm_check_queue_flow_control(ch); neo_parse_lsr()
862 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_parse_lsr()
872 jsm_dbg(INTR, &ch->ch_bd->pci_dev, neo_parse_lsr()
882 ch->ch_err_parity++; neo_parse_lsr()
883 jsm_dbg(INTR, &ch->ch_bd->pci_dev, "%s:%d Port: %d. PAR ERR!\n", neo_parse_lsr()
888 ch->ch_err_frame++; neo_parse_lsr()
889 jsm_dbg(INTR, &ch->ch_bd->pci_dev, "%s:%d Port: %d. FRM ERR!\n", neo_parse_lsr()
894 ch->ch_err_break++; neo_parse_lsr()
895 jsm_dbg(INTR, &ch->ch_bd->pci_dev, neo_parse_lsr()
907 ch->ch_err_overrun++; neo_parse_lsr()
908 jsm_dbg(INTR, &ch->ch_bd->pci_dev, neo_parse_lsr()
914 spin_lock_irqsave(&ch->ch_lock, lock_flags); neo_parse_lsr()
915 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_parse_lsr()
916 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_parse_lsr()
919 neo_copy_data_from_queue_to_uart(ch); neo_parse_lsr()
922 spin_lock_irqsave(&ch->ch_lock, lock_flags); neo_parse_lsr()
923 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_parse_lsr()
924 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_parse_lsr()
927 neo_copy_data_from_queue_to_uart(ch); neo_parse_lsr()
935 static void neo_param(struct jsm_channel *ch) neo_param() argument
943 bd = ch->ch_bd; neo_param()
950 if ((ch->ch_c_cflag & (CBAUD)) == 0) { neo_param()
951 ch->ch_r_head = ch->ch_r_tail = 0; neo_param()
952 ch->ch_e_head = ch->ch_e_tail = 0; neo_param()
954 neo_flush_uart_write(ch); neo_param()
955 neo_flush_uart_read(ch); neo_param()
957 ch->ch_flags |= (CH_BAUD0); neo_param()
958 ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); neo_param()
959 neo_assert_modem_signals(ch); neo_param()
990 cflag = C_BAUD(ch->uart_port.state->port.tty); neo_param()
999 if (ch->ch_flags & CH_BAUD0) neo_param()
1000 ch->ch_flags &= ~(CH_BAUD0); neo_param()
1003 if (ch->ch_c_cflag & PARENB) neo_param()
1006 if (!(ch->ch_c_cflag & PARODD)) neo_param()
1014 if (ch->ch_c_cflag & CMSPAR) neo_param()
1018 if (ch->ch_c_cflag & CSTOPB) neo_param()
1021 switch (ch->ch_c_cflag & CSIZE) { neo_param()
1037 ier = readb(&ch->ch_neo_uart->ier); neo_param()
1038 uart_lcr = readb(&ch->ch_neo_uart->lcr); neo_param()
1040 quot = ch->ch_bd->bd_dividend / baud; neo_param()
1043 writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr); neo_param()
1044 writeb((quot & 0xff), &ch->ch_neo_uart->txrx); neo_param()
1045 writeb((quot >> 8), &ch->ch_neo_uart->ier); neo_param()
1046 writeb(lcr, &ch->ch_neo_uart->lcr); neo_param()
1050 writeb(lcr, &ch->ch_neo_uart->lcr); neo_param()
1052 if (ch->ch_c_cflag & CREAD) neo_param()
1057 writeb(ier, &ch->ch_neo_uart->ier); neo_param()
1060 neo_set_new_start_stop_chars(ch); neo_param()
1062 if (ch->ch_c_cflag & CRTSCTS) neo_param()
1063 neo_set_cts_flow_control(ch); neo_param()
1064 else if (ch->ch_c_iflag & IXON) { neo_param()
1066 if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) neo_param()
1067 neo_set_no_output_flow_control(ch); neo_param()
1069 neo_set_ixon_flow_control(ch); neo_param()
1072 neo_set_no_output_flow_control(ch); neo_param()
1074 if (ch->ch_c_cflag & CRTSCTS) neo_param()
1075 neo_set_rts_flow_control(ch); neo_param()
1076 else if (ch->ch_c_iflag & IXOFF) { neo_param()
1078 if ((ch->ch_startc == __DISABLED_CHAR) || (ch->ch_stopc == __DISABLED_CHAR)) neo_param()
1079 neo_set_no_input_flow_control(ch); neo_param()
1081 neo_set_ixoff_flow_control(ch); neo_param()
1084 neo_set_no_input_flow_control(ch); neo_param()
1091 writeb(1, &ch->ch_neo_uart->rfifo); neo_param()
1092 ch->ch_r_tlevel = 1; neo_param()
1095 neo_assert_modem_signals(ch); neo_param()
1098 neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); neo_param()
1110 struct jsm_channel *ch; neo_intr() local
1186 ch = brd->channels[port]; neo_intr()
1187 neo_copy_data_from_uart_to_queue(ch); neo_intr()
1190 spin_lock_irqsave(&ch->ch_lock, lock_flags2); neo_intr()
1191 jsm_check_queue_flow_control(ch); neo_intr()
1192 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); neo_intr()
1251 static void neo_disable_receiver(struct jsm_channel *ch) neo_disable_receiver() argument
1253 u8 tmp = readb(&ch->ch_neo_uart->ier); neo_disable_receiver()
1255 writeb(tmp, &ch->ch_neo_uart->ier); neo_disable_receiver()
1258 neo_pci_posting_flush(ch->ch_bd); neo_disable_receiver()
1267 static void neo_enable_receiver(struct jsm_channel *ch) neo_enable_receiver() argument
1269 u8 tmp = readb(&ch->ch_neo_uart->ier); neo_enable_receiver()
1271 writeb(tmp, &ch->ch_neo_uart->ier); neo_enable_receiver()
1274 neo_pci_posting_flush(ch->ch_bd); neo_enable_receiver()
1277 static void neo_send_start_character(struct jsm_channel *ch) neo_send_start_character() argument
1279 if (!ch) neo_send_start_character()
1282 if (ch->ch_startc != __DISABLED_CHAR) { neo_send_start_character()
1283 ch->ch_xon_sends++; neo_send_start_character()
1284 writeb(ch->ch_startc, &ch->ch_neo_uart->txrx); neo_send_start_character()
1287 neo_pci_posting_flush(ch->ch_bd); neo_send_start_character()
1291 static void neo_send_stop_character(struct jsm_channel *ch) neo_send_stop_character() argument
1293 if (!ch) neo_send_stop_character()
1296 if (ch->ch_stopc != __DISABLED_CHAR) { neo_send_stop_character()
1297 ch->ch_xoff_sends++; neo_send_stop_character()
1298 writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx); neo_send_stop_character()
1301 neo_pci_posting_flush(ch->ch_bd); neo_send_stop_character()
1308 static void neo_uart_init(struct jsm_channel *ch) neo_uart_init() argument
1310 writeb(0, &ch->ch_neo_uart->ier); neo_uart_init()
1311 writeb(0, &ch->ch_neo_uart->efr); neo_uart_init()
1312 writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr); neo_uart_init()
1315 readb(&ch->ch_neo_uart->txrx); neo_uart_init()
1316 writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr); neo_uart_init()
1317 readb(&ch->ch_neo_uart->lsr); neo_uart_init()
1318 readb(&ch->ch_neo_uart->msr); neo_uart_init()
1320 ch->ch_flags |= CH_FIFO_ENABLED; neo_uart_init()
1323 writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr); neo_uart_init()
1329 static void neo_uart_off(struct jsm_channel *ch) neo_uart_off() argument
1332 writeb(0, &ch->ch_neo_uart->efr); neo_uart_off()
1335 writeb(0, &ch->ch_neo_uart->ier); neo_uart_off()
1338 static u32 neo_get_uart_bytes_left(struct jsm_channel *ch) neo_get_uart_bytes_left() argument
1341 u8 lsr = readb(&ch->ch_neo_uart->lsr); neo_get_uart_bytes_left()
1344 ch->ch_cached_lsr |= lsr; neo_get_uart_bytes_left()
1350 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); neo_get_uart_bytes_left()
1358 static void neo_send_break(struct jsm_channel *ch) neo_send_break() argument
1367 if (!(ch->ch_flags & CH_BREAK_SENDING)) { neo_send_break()
1368 u8 temp = readb(&ch->ch_neo_uart->lcr); neo_send_break()
1369 writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr); neo_send_break()
1370 ch->ch_flags |= (CH_BREAK_SENDING); neo_send_break()
1373 neo_pci_posting_flush(ch->ch_bd); neo_send_break()
1385 static void neo_send_immediate_char(struct jsm_channel *ch, unsigned char c) neo_send_immediate_char() argument
1387 if (!ch) neo_send_immediate_char()
1390 writeb(c, &ch->ch_neo_uart->txrx); neo_send_immediate_char()
1393 neo_pci_posting_flush(ch->ch_bd); neo_send_immediate_char()
H A Djsm_cls.c61 static void cls_set_cts_flow_control(struct jsm_channel *ch) cls_set_cts_flow_control() argument
63 u8 lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_cts_flow_control()
64 u8 ier = readb(&ch->ch_cls_uart->ier); cls_set_cts_flow_control()
71 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_cts_flow_control()
73 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_cts_flow_control()
79 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_cts_flow_control()
82 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_cts_flow_control()
90 writeb(ier, &ch->ch_cls_uart->ier); cls_set_cts_flow_control()
93 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_cts_flow_control()
97 &ch->ch_cls_uart->isr_fcr); cls_set_cts_flow_control()
99 ch->ch_t_tlevel = 16; cls_set_cts_flow_control()
102 static void cls_set_ixon_flow_control(struct jsm_channel *ch) cls_set_ixon_flow_control() argument
104 u8 lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_ixon_flow_control()
105 u8 ier = readb(&ch->ch_cls_uart->ier); cls_set_ixon_flow_control()
112 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_ixon_flow_control()
114 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_ixon_flow_control()
120 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_ixon_flow_control()
123 writeb(ch->ch_startc, &ch->ch_cls_uart->mcr); cls_set_ixon_flow_control()
124 writeb(0, &ch->ch_cls_uart->lsr); cls_set_ixon_flow_control()
125 writeb(ch->ch_stopc, &ch->ch_cls_uart->msr); cls_set_ixon_flow_control()
126 writeb(0, &ch->ch_cls_uart->spr); cls_set_ixon_flow_control()
129 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_ixon_flow_control()
137 writeb(ier, &ch->ch_cls_uart->ier); cls_set_ixon_flow_control()
140 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_ixon_flow_control()
144 &ch->ch_cls_uart->isr_fcr); cls_set_ixon_flow_control()
147 static void cls_set_no_output_flow_control(struct jsm_channel *ch) cls_set_no_output_flow_control() argument
149 u8 lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_no_output_flow_control()
150 u8 ier = readb(&ch->ch_cls_uart->ier); cls_set_no_output_flow_control()
157 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_no_output_flow_control()
159 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_no_output_flow_control()
165 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_no_output_flow_control()
168 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_no_output_flow_control()
176 writeb(ier, &ch->ch_cls_uart->ier); cls_set_no_output_flow_control()
179 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_no_output_flow_control()
183 &ch->ch_cls_uart->isr_fcr); cls_set_no_output_flow_control()
185 ch->ch_r_watermark = 0; cls_set_no_output_flow_control()
186 ch->ch_t_tlevel = 16; cls_set_no_output_flow_control()
187 ch->ch_r_tlevel = 16; cls_set_no_output_flow_control()
190 static void cls_set_rts_flow_control(struct jsm_channel *ch) cls_set_rts_flow_control() argument
192 u8 lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_rts_flow_control()
193 u8 ier = readb(&ch->ch_cls_uart->ier); cls_set_rts_flow_control()
200 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_rts_flow_control()
202 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_rts_flow_control()
208 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_rts_flow_control()
211 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_rts_flow_control()
215 writeb(ier, &ch->ch_cls_uart->ier); cls_set_rts_flow_control()
218 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_rts_flow_control()
222 &ch->ch_cls_uart->isr_fcr); cls_set_rts_flow_control()
224 ch->ch_r_watermark = 4; cls_set_rts_flow_control()
225 ch->ch_r_tlevel = 8; cls_set_rts_flow_control()
228 static void cls_set_ixoff_flow_control(struct jsm_channel *ch) cls_set_ixoff_flow_control() argument
230 u8 lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_ixoff_flow_control()
231 u8 ier = readb(&ch->ch_cls_uart->ier); cls_set_ixoff_flow_control()
238 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_ixoff_flow_control()
240 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_ixoff_flow_control()
246 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_ixoff_flow_control()
249 writeb(ch->ch_startc, &ch->ch_cls_uart->mcr); cls_set_ixoff_flow_control()
250 writeb(0, &ch->ch_cls_uart->lsr); cls_set_ixoff_flow_control()
251 writeb(ch->ch_stopc, &ch->ch_cls_uart->msr); cls_set_ixoff_flow_control()
252 writeb(0, &ch->ch_cls_uart->spr); cls_set_ixoff_flow_control()
255 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_ixoff_flow_control()
259 writeb(ier, &ch->ch_cls_uart->ier); cls_set_ixoff_flow_control()
262 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_ixoff_flow_control()
266 &ch->ch_cls_uart->isr_fcr); cls_set_ixoff_flow_control()
269 static void cls_set_no_input_flow_control(struct jsm_channel *ch) cls_set_no_input_flow_control() argument
271 u8 lcrb = readb(&ch->ch_cls_uart->lcr); cls_set_no_input_flow_control()
272 u8 ier = readb(&ch->ch_cls_uart->ier); cls_set_no_input_flow_control()
279 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_set_no_input_flow_control()
281 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_set_no_input_flow_control()
287 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_set_no_input_flow_control()
290 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_set_no_input_flow_control()
294 writeb(ier, &ch->ch_cls_uart->ier); cls_set_no_input_flow_control()
297 writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr); cls_set_no_input_flow_control()
301 &ch->ch_cls_uart->isr_fcr); cls_set_no_input_flow_control()
303 ch->ch_t_tlevel = 16; cls_set_no_input_flow_control()
304 ch->ch_r_tlevel = 16; cls_set_no_input_flow_control()
314 static void cls_clear_break(struct jsm_channel *ch) cls_clear_break() argument
318 spin_lock_irqsave(&ch->ch_lock, lock_flags); cls_clear_break()
321 if (ch->ch_flags & CH_BREAK_SENDING) { cls_clear_break()
322 u8 temp = readb(&ch->ch_cls_uart->lcr); cls_clear_break()
324 writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr); cls_clear_break()
326 ch->ch_flags &= ~(CH_BREAK_SENDING); cls_clear_break()
327 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, cls_clear_break()
331 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); cls_clear_break()
334 static void cls_disable_receiver(struct jsm_channel *ch) cls_disable_receiver() argument
336 u8 tmp = readb(&ch->ch_cls_uart->ier); cls_disable_receiver()
339 writeb(tmp, &ch->ch_cls_uart->ier); cls_disable_receiver()
342 static void cls_enable_receiver(struct jsm_channel *ch) cls_enable_receiver() argument
344 u8 tmp = readb(&ch->ch_cls_uart->ier); cls_enable_receiver()
347 writeb(tmp, &ch->ch_cls_uart->ier); cls_enable_receiver()
351 static void cls_assert_modem_signals(struct jsm_channel *ch) cls_assert_modem_signals() argument
353 if (!ch) cls_assert_modem_signals()
356 writeb(ch->ch_mostat, &ch->ch_cls_uart->mcr); cls_assert_modem_signals()
359 static void cls_copy_data_from_uart_to_queue(struct jsm_channel *ch) cls_copy_data_from_uart_to_queue() argument
368 if (!ch) cls_copy_data_from_uart_to_queue()
371 spin_lock_irqsave(&ch->ch_lock, flags); cls_copy_data_from_uart_to_queue()
374 head = ch->ch_r_head & RQUEUEMASK; cls_copy_data_from_uart_to_queue()
375 tail = ch->ch_r_tail & RQUEUEMASK; cls_copy_data_from_uart_to_queue()
378 linestatus = ch->ch_cached_lsr; cls_copy_data_from_uart_to_queue()
379 ch->ch_cached_lsr = 0; cls_copy_data_from_uart_to_queue()
390 if (ch->ch_c_iflag & IGNBRK) cls_copy_data_from_uart_to_queue()
398 linestatus = readb(&ch->ch_cls_uart->lsr); cls_copy_data_from_uart_to_queue()
412 discard = readb(&ch->ch_cls_uart->txrx); cls_copy_data_from_uart_to_queue()
426 ch->ch_r_tail = tail; cls_copy_data_from_uart_to_queue()
427 ch->ch_err_overrun++; cls_copy_data_from_uart_to_queue()
431 ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE cls_copy_data_from_uart_to_queue()
433 ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx); cls_copy_data_from_uart_to_queue()
437 if (ch->ch_equeue[head] & UART_LSR_PE) cls_copy_data_from_uart_to_queue()
438 ch->ch_err_parity++; cls_copy_data_from_uart_to_queue()
439 if (ch->ch_equeue[head] & UART_LSR_BI) cls_copy_data_from_uart_to_queue()
440 ch->ch_err_break++; cls_copy_data_from_uart_to_queue()
441 if (ch->ch_equeue[head] & UART_LSR_FE) cls_copy_data_from_uart_to_queue()
442 ch->ch_err_frame++; cls_copy_data_from_uart_to_queue()
446 ch->ch_rxcount++; cls_copy_data_from_uart_to_queue()
452 ch->ch_r_head = head & RQUEUEMASK; cls_copy_data_from_uart_to_queue()
453 ch->ch_e_head = head & EQUEUEMASK; cls_copy_data_from_uart_to_queue()
455 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_copy_data_from_uart_to_queue()
458 static void cls_copy_data_from_queue_to_uart(struct jsm_channel *ch) cls_copy_data_from_queue_to_uart() argument
466 if (!ch) cls_copy_data_from_queue_to_uart()
469 circ = &ch->uart_port.state->xmit; cls_copy_data_from_queue_to_uart()
476 if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) cls_copy_data_from_queue_to_uart()
480 if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) cls_copy_data_from_queue_to_uart()
493 writeb(circ->buf[tail], &ch->ch_cls_uart->txrx); cls_copy_data_from_queue_to_uart()
496 ch->ch_txcount++; cls_copy_data_from_queue_to_uart()
503 if (len_written > ch->ch_t_tlevel) cls_copy_data_from_queue_to_uart()
504 ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_copy_data_from_queue_to_uart()
507 uart_write_wakeup(&ch->uart_port); cls_copy_data_from_queue_to_uart()
510 static void cls_parse_modem(struct jsm_channel *ch, u8 signals) cls_parse_modem() argument
514 jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, cls_parse_modem()
516 ch->ch_portnum, msignals); cls_parse_modem()
526 uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_DCD); cls_parse_modem()
528 uart_handle_dcd_change(&ch->uart_port, msignals & UART_MSR_CTS); cls_parse_modem()
531 ch->ch_mistat |= UART_MSR_DCD; cls_parse_modem()
533 ch->ch_mistat &= ~UART_MSR_DCD; cls_parse_modem()
536 ch->ch_mistat |= UART_MSR_DSR; cls_parse_modem()
538 ch->ch_mistat &= ~UART_MSR_DSR; cls_parse_modem()
541 ch->ch_mistat |= UART_MSR_RI; cls_parse_modem()
543 ch->ch_mistat &= ~UART_MSR_RI; cls_parse_modem()
546 ch->ch_mistat |= UART_MSR_CTS; cls_parse_modem()
548 ch->ch_mistat &= ~UART_MSR_CTS; cls_parse_modem()
550 jsm_dbg(MSIGS, &ch->ch_bd->pci_dev, cls_parse_modem()
552 ch->ch_portnum, cls_parse_modem()
553 !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR), cls_parse_modem()
554 !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS), cls_parse_modem()
555 !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS), cls_parse_modem()
556 !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR), cls_parse_modem()
557 !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI), cls_parse_modem()
558 !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)); cls_parse_modem()
564 struct jsm_channel *ch; cls_parse_isr() local
576 ch = brd->channels[port]; cls_parse_isr()
577 if (!ch) cls_parse_isr()
582 isr = readb(&ch->ch_cls_uart->isr_fcr); cls_parse_isr()
591 cls_copy_data_from_uart_to_queue(ch); cls_parse_isr()
592 jsm_check_queue_flow_control(ch); cls_parse_isr()
598 spin_lock_irqsave(&ch->ch_lock, flags); cls_parse_isr()
599 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_parse_isr()
600 spin_unlock_irqrestore(&ch->ch_lock, flags); cls_parse_isr()
601 cls_copy_data_from_queue_to_uart(ch); cls_parse_isr()
611 cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); cls_parse_isr()
616 static void cls_flush_uart_write(struct jsm_channel *ch) cls_flush_uart_write() argument
621 if (!ch) cls_flush_uart_write()
625 &ch->ch_cls_uart->isr_fcr); cls_flush_uart_write()
629 tmp = readb(&ch->ch_cls_uart->isr_fcr); cls_flush_uart_write()
631 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, cls_flush_uart_write()
638 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_flush_uart_write()
642 static void cls_flush_uart_read(struct jsm_channel *ch) cls_flush_uart_read() argument
644 if (!ch) cls_flush_uart_read()
661 static void cls_send_start_character(struct jsm_channel *ch) cls_send_start_character() argument
663 if (!ch) cls_send_start_character()
666 if (ch->ch_startc != __DISABLED_CHAR) { cls_send_start_character()
667 ch->ch_xon_sends++; cls_send_start_character()
668 writeb(ch->ch_startc, &ch->ch_cls_uart->txrx); cls_send_start_character()
672 static void cls_send_stop_character(struct jsm_channel *ch) cls_send_stop_character() argument
674 if (!ch) cls_send_stop_character()
677 if (ch->ch_stopc != __DISABLED_CHAR) { cls_send_stop_character()
678 ch->ch_xoff_sends++; cls_send_stop_character()
679 writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx); cls_send_stop_character()
687 static void cls_param(struct jsm_channel *ch) cls_param() argument
698 bd = ch->ch_bd; cls_param()
705 if ((ch->ch_c_cflag & (CBAUD)) == 0) { cls_param()
706 ch->ch_r_head = 0; cls_param()
707 ch->ch_r_tail = 0; cls_param()
708 ch->ch_e_head = 0; cls_param()
709 ch->ch_e_tail = 0; cls_param()
711 cls_flush_uart_write(ch); cls_param()
712 cls_flush_uart_read(ch); cls_param()
715 ch->ch_flags |= (CH_BAUD0); cls_param()
716 ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); cls_param()
717 cls_assert_modem_signals(ch); cls_param()
721 cflag = C_BAUD(ch->uart_port.state->port.tty); cls_param()
730 if (ch->ch_flags & CH_BAUD0) cls_param()
731 ch->ch_flags &= ~(CH_BAUD0); cls_param()
733 if (ch->ch_c_cflag & PARENB) cls_param()
736 if (!(ch->ch_c_cflag & PARODD)) cls_param()
744 if (ch->ch_c_cflag & CMSPAR) cls_param()
748 if (ch->ch_c_cflag & CSTOPB) cls_param()
751 switch (ch->ch_c_cflag & CSIZE) { cls_param()
767 ier = readb(&ch->ch_cls_uart->ier); cls_param()
768 uart_lcr = readb(&ch->ch_cls_uart->lcr); cls_param()
770 quot = ch->ch_bd->bd_dividend / baud; cls_param()
773 writeb(UART_LCR_DLAB, &ch->ch_cls_uart->lcr); cls_param()
774 writeb((quot & 0xff), &ch->ch_cls_uart->txrx); cls_param()
775 writeb((quot >> 8), &ch->ch_cls_uart->ier); cls_param()
776 writeb(lcr, &ch->ch_cls_uart->lcr); cls_param()
780 writeb(lcr, &ch->ch_cls_uart->lcr); cls_param()
782 if (ch->ch_c_cflag & CREAD) cls_param()
787 writeb(ier, &ch->ch_cls_uart->ier); cls_param()
789 if (ch->ch_c_cflag & CRTSCTS) cls_param()
790 cls_set_cts_flow_control(ch); cls_param()
791 else if (ch->ch_c_iflag & IXON) { cls_param()
796 if ((ch->ch_startc == __DISABLED_CHAR) || cls_param()
797 (ch->ch_stopc == __DISABLED_CHAR)) cls_param()
798 cls_set_no_output_flow_control(ch); cls_param()
800 cls_set_ixon_flow_control(ch); cls_param()
802 cls_set_no_output_flow_control(ch); cls_param()
804 if (ch->ch_c_cflag & CRTSCTS) cls_param()
805 cls_set_rts_flow_control(ch); cls_param()
806 else if (ch->ch_c_iflag & IXOFF) { cls_param()
811 if ((ch->ch_startc == __DISABLED_CHAR) || cls_param()
812 (ch->ch_stopc == __DISABLED_CHAR)) cls_param()
813 cls_set_no_input_flow_control(ch); cls_param()
815 cls_set_ixoff_flow_control(ch); cls_param()
817 cls_set_no_input_flow_control(ch); cls_param()
819 cls_assert_modem_signals(ch); cls_param()
822 cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr)); cls_param()
868 static void cls_uart_init(struct jsm_channel *ch) cls_uart_init() argument
870 unsigned char lcrb = readb(&ch->ch_cls_uart->lcr); cls_uart_init()
873 writeb(0, &ch->ch_cls_uart->ier); cls_uart_init()
879 writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr); cls_uart_init()
881 isr_fcr = readb(&ch->ch_cls_uart->isr_fcr); cls_uart_init()
886 writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr); cls_uart_init()
889 writeb(lcrb, &ch->ch_cls_uart->lcr); cls_uart_init()
892 readb(&ch->ch_cls_uart->txrx); cls_uart_init()
895 &ch->ch_cls_uart->isr_fcr); cls_uart_init()
898 ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_uart_init()
900 readb(&ch->ch_cls_uart->lsr); cls_uart_init()
901 readb(&ch->ch_cls_uart->msr); cls_uart_init()
907 static void cls_uart_off(struct jsm_channel *ch) cls_uart_off() argument
910 writeb(0, &ch->ch_cls_uart->ier); cls_uart_off()
919 static u32 cls_get_uart_bytes_left(struct jsm_channel *ch) cls_get_uart_bytes_left() argument
922 u8 lsr = readb(&ch->ch_cls_uart->lsr); cls_get_uart_bytes_left()
928 ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); cls_get_uart_bytes_left()
941 static void cls_send_break(struct jsm_channel *ch) cls_send_break() argument
944 if (!(ch->ch_flags & CH_BREAK_SENDING)) { cls_send_break()
945 u8 temp = readb(&ch->ch_cls_uart->lcr); cls_send_break()
947 writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr); cls_send_break()
948 ch->ch_flags |= (CH_BREAK_SENDING); cls_send_break()
959 static void cls_send_immediate_char(struct jsm_channel *ch, unsigned char c) cls_send_immediate_char() argument
961 writeb(c, &ch->ch_cls_uart->txrx); cls_send_immediate_char()
H A Djsm_tty.c34 static void jsm_carrier(struct jsm_channel *ch);
36 static inline int jsm_get_mstat(struct jsm_channel *ch) jsm_get_mstat() argument
41 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "start\n"); jsm_get_mstat()
43 mstat = (ch->ch_mostat | ch->ch_mistat); jsm_get_mstat()
60 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n"); jsm_get_mstat()
156 static void jsm_tty_send_xchar(struct uart_port *port, char ch) jsm_tty_send_xchar() argument
165 if (ch == termios->c_cc[VSTART]) jsm_tty_send_xchar()
168 if (ch == termios->c_cc[VSTOP]) jsm_tty_send_xchar()
377 struct jsm_channel *ch; jsm_tty_init() local
410 ch = brd->channels[0]; jsm_tty_init()
414 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { jsm_tty_init()
419 spin_lock_init(&ch->ch_lock); jsm_tty_init()
422 ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i); jsm_tty_init()
424 ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i); jsm_tty_init()
426 ch->ch_bd = brd; jsm_tty_init()
427 ch->ch_portnum = i; jsm_tty_init()
430 ch->ch_close_delay = 250; jsm_tty_init()
432 init_waitqueue_head(&ch->ch_flags_wait); jsm_tty_init()
443 struct jsm_channel *ch; jsm_uart_port_init() local
457 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { jsm_uart_port_init()
492 struct jsm_channel *ch; jsm_remove_uart_port() local
511 ch = brd->channels[i]; jsm_remove_uart_port()
513 clear_bit(ch->uart_port.line, linemap); jsm_remove_uart_port()
521 void jsm_input(struct jsm_channel *ch) jsm_input() argument
536 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n"); jsm_input()
538 if (!ch) jsm_input()
541 port = &ch->uart_port.state->port; jsm_input()
544 bd = ch->ch_bd; jsm_input()
548 spin_lock_irqsave(&ch->ch_lock, lock_flags); jsm_input()
557 head = ch->ch_r_head & rmask; jsm_input()
558 tail = ch->ch_r_tail & rmask; jsm_input()
562 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_input()
566 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n"); jsm_input()
575 jsm_dbg(READ, &ch->ch_bd->pci_dev, jsm_input()
577 data_len, ch->ch_portnum); jsm_input()
578 ch->ch_r_head = tail; jsm_input()
581 jsm_check_queue_flow_control(ch); jsm_input()
583 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_input()
590 if (ch->ch_flags & CH_STOPI) { jsm_input()
591 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_input()
592 jsm_dbg(READ, &ch->ch_bd->pci_dev, jsm_input()
594 ch->ch_portnum, head, tail); jsm_input()
598 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start 2\n"); jsm_input()
628 if (*(ch->ch_equeue +tail +i) & UART_LSR_BI) jsm_input()
629 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_BREAK); jsm_input()
630 else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE) jsm_input()
631 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_PARITY); jsm_input()
632 else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE) jsm_input()
633 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_FRAME); jsm_input()
635 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_NORMAL); jsm_input()
638 tty_insert_flip_string(port, ch->ch_rqueue + tail, s); jsm_input()
646 ch->ch_r_tail = tail & rmask; jsm_input()
647 ch->ch_e_tail = tail & rmask; jsm_input()
648 jsm_check_queue_flow_control(ch); jsm_input()
649 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_input()
654 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n"); jsm_input()
657 static void jsm_carrier(struct jsm_channel *ch) jsm_carrier() argument
664 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "start\n"); jsm_carrier()
665 if (!ch) jsm_carrier()
668 bd = ch->ch_bd; jsm_carrier()
673 if (ch->ch_mistat & UART_MSR_DCD) { jsm_carrier()
674 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "mistat: %x D_CD: %x\n", jsm_carrier()
675 ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD); jsm_carrier()
679 if (ch->ch_c_cflag & CLOCAL) jsm_carrier()
682 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "DCD: physical: %d virt: %d\n", jsm_carrier()
688 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { jsm_carrier()
695 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "carrier: virt DCD rose\n"); jsm_carrier()
697 if (waitqueue_active(&(ch->ch_flags_wait))) jsm_carrier()
698 wake_up_interruptible(&ch->ch_flags_wait); jsm_carrier()
704 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { jsm_carrier()
711 jsm_dbg(CARR, &ch->ch_bd->pci_dev, jsm_carrier()
714 if (waitqueue_active(&(ch->ch_flags_wait))) jsm_carrier()
715 wake_up_interruptible(&ch->ch_flags_wait); jsm_carrier()
727 if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) jsm_carrier()
741 if (waitqueue_active(&(ch->ch_flags_wait))) jsm_carrier()
742 wake_up_interruptible(&ch->ch_flags_wait); jsm_carrier()
749 ch->ch_flags |= CH_FCAR; jsm_carrier()
751 ch->ch_flags &= ~CH_FCAR; jsm_carrier()
754 ch->ch_flags |= CH_CD; jsm_carrier()
756 ch->ch_flags &= ~CH_CD; jsm_carrier()
760 void jsm_check_queue_flow_control(struct jsm_channel *ch) jsm_check_queue_flow_control() argument
762 struct board_ops *bd_ops = ch->ch_bd->bd_ops; jsm_check_queue_flow_control()
766 if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0) jsm_check_queue_flow_control()
786 if (ch->ch_c_cflag & CRTSCTS) { jsm_check_queue_flow_control()
787 if(!(ch->ch_flags & CH_RECEIVER_OFF)) { jsm_check_queue_flow_control()
788 bd_ops->disable_receiver(ch); jsm_check_queue_flow_control()
789 ch->ch_flags |= (CH_RECEIVER_OFF); jsm_check_queue_flow_control()
790 jsm_dbg(READ, &ch->ch_bd->pci_dev, jsm_check_queue_flow_control()
796 else if (ch->ch_c_iflag & IXOFF) { jsm_check_queue_flow_control()
797 if (ch->ch_stops_sent <= MAX_STOPS_SENT) { jsm_check_queue_flow_control()
798 bd_ops->send_stop_character(ch); jsm_check_queue_flow_control()
799 ch->ch_stops_sent++; jsm_check_queue_flow_control()
800 jsm_dbg(READ, &ch->ch_bd->pci_dev, jsm_check_queue_flow_control()
802 ch->ch_stops_sent); jsm_check_queue_flow_control()
824 if (ch->ch_c_cflag & CRTSCTS) { jsm_check_queue_flow_control()
825 if (ch->ch_flags & CH_RECEIVER_OFF) { jsm_check_queue_flow_control()
826 bd_ops->enable_receiver(ch); jsm_check_queue_flow_control()
827 ch->ch_flags &= ~(CH_RECEIVER_OFF); jsm_check_queue_flow_control()
828 jsm_dbg(READ, &ch->ch_bd->pci_dev, jsm_check_queue_flow_control()
834 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) { jsm_check_queue_flow_control()
835 ch->ch_stops_sent = 0; jsm_check_queue_flow_control()
836 bd_ops->send_start_character(ch); jsm_check_queue_flow_control()
837 jsm_dbg(READ, &ch->ch_bd->pci_dev, jsm_check_queue_flow_control()
H A Djsm.h114 void (*uart_init)(struct jsm_channel *ch);
115 void (*uart_off)(struct jsm_channel *ch);
116 void (*param)(struct jsm_channel *ch);
117 void (*assert_modem_signals)(struct jsm_channel *ch);
118 void (*flush_uart_write)(struct jsm_channel *ch);
119 void (*flush_uart_read)(struct jsm_channel *ch);
120 void (*disable_receiver)(struct jsm_channel *ch);
121 void (*enable_receiver)(struct jsm_channel *ch);
122 void (*send_break)(struct jsm_channel *ch);
123 void (*clear_break)(struct jsm_channel *ch);
124 void (*send_start_character)(struct jsm_channel *ch);
125 void (*send_stop_character)(struct jsm_channel *ch);
126 void (*copy_data_from_queue_to_uart)(struct jsm_channel *ch);
127 u32 (*get_uart_bytes_left)(struct jsm_channel *ch);
128 void (*send_immediate_char)(struct jsm_channel *ch, unsigned char);
444 void jsm_input(struct jsm_channel *ch);
445 void jsm_check_queue_flow_control(struct jsm_channel *ch);
/linux-4.1.27/drivers/dma/
H A Dmic_x100_dma.c54 static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch) mic_dma_hw_ring_inc_head() argument
56 ch->head = mic_dma_hw_ring_inc(ch->head); mic_dma_hw_ring_inc_head()
88 static void mic_dma_cleanup(struct mic_dma_chan *ch) mic_dma_cleanup() argument
94 spin_lock(&ch->cleanup_lock); mic_dma_cleanup()
95 tail = mic_dma_read_cmp_cnt(ch); mic_dma_cleanup()
102 for (last_tail = ch->last_tail; tail != last_tail;) { mic_dma_cleanup()
103 tx = &ch->tx_array[last_tail]; mic_dma_cleanup()
115 ch->last_tail = last_tail; mic_dma_cleanup()
116 spin_unlock(&ch->cleanup_lock); mic_dma_cleanup()
131 static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required) mic_dma_avail_desc_ring_space() argument
133 struct device *dev = mic_dma_ch_to_device(ch); mic_dma_avail_desc_ring_space()
136 count = mic_dma_ring_count(ch->head, ch->last_tail); mic_dma_avail_desc_ring_space()
138 mic_dma_cleanup(ch); mic_dma_avail_desc_ring_space()
139 count = mic_dma_ring_count(ch->head, ch->last_tail); mic_dma_avail_desc_ring_space()
153 static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src, mic_dma_prog_memcpy_desc() argument
157 size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size; mic_dma_prog_memcpy_desc()
165 ret = mic_dma_avail_desc_ring_space(ch, num_desc); mic_dma_prog_memcpy_desc()
170 mic_dma_memcpy_desc(&ch->desc_ring[ch->head], mic_dma_prog_memcpy_desc()
172 mic_dma_hw_ring_inc_head(ch); mic_dma_prog_memcpy_desc()
181 static void mic_dma_prog_intr(struct mic_dma_chan *ch) mic_dma_prog_intr() argument
183 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, mic_dma_prog_intr()
184 ch->status_dest_micpa, false); mic_dma_prog_intr()
185 mic_dma_hw_ring_inc_head(ch); mic_dma_prog_intr()
186 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, mic_dma_prog_intr()
187 ch->status_dest_micpa, true); mic_dma_prog_intr()
188 mic_dma_hw_ring_inc_head(ch); mic_dma_prog_intr()
192 static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src, mic_dma_do_dma() argument
195 if (-ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) mic_dma_do_dma()
199 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, mic_dma_do_dma()
200 ch->status_dest_micpa, false); mic_dma_do_dma()
201 mic_dma_hw_ring_inc_head(ch); mic_dma_do_dma()
205 mic_dma_prog_intr(ch); mic_dma_do_dma()
210 static inline void mic_dma_issue_pending(struct dma_chan *ch) mic_dma_issue_pending() argument
212 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); mic_dma_issue_pending()
235 static inline void mic_dma_update_pending(struct mic_dma_chan *ch) mic_dma_update_pending() argument
237 if (mic_dma_ring_count(ch->issued, ch->submitted) mic_dma_update_pending()
239 mic_dma_issue_pending(&ch->api_ch); mic_dma_update_pending()
262 allocate_tx(struct mic_dma_chan *ch) allocate_tx() argument
264 u32 idx = mic_dma_hw_ring_dec(ch->head); allocate_tx()
265 struct dma_async_tx_descriptor *tx = &ch->tx_array[idx]; allocate_tx()
267 dma_async_tx_descriptor_init(tx, &ch->api_ch); allocate_tx()
278 mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, mic_dma_prep_memcpy_lock() argument
281 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); mic_dma_prep_memcpy_lock()
298 mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags) mic_dma_prep_interrupt_lock() argument
300 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); mic_dma_prep_interrupt_lock()
313 mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie, mic_dma_tx_status() argument
316 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); mic_dma_tx_status()
318 if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate)) mic_dma_tx_status()
321 return dma_cookie_status(ch, cookie, txstate); mic_dma_tx_status()
332 struct mic_dma_chan *ch = ((struct mic_dma_chan *)data); mic_dma_intr_handler() local
334 mic_dma_ack_interrupt(ch); mic_dma_intr_handler()
338 static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch) mic_dma_alloc_desc_ring() argument
340 u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); mic_dma_alloc_desc_ring()
341 struct device *dev = &to_mbus_device(ch)->dev; mic_dma_alloc_desc_ring()
344 ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL); mic_dma_alloc_desc_ring()
346 if (!ch->desc_ring) mic_dma_alloc_desc_ring()
349 ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring, mic_dma_alloc_desc_ring()
351 if (dma_mapping_error(dev, ch->desc_ring_micpa)) mic_dma_alloc_desc_ring()
354 ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array)); mic_dma_alloc_desc_ring()
355 if (!ch->tx_array) mic_dma_alloc_desc_ring()
359 dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size, mic_dma_alloc_desc_ring()
362 kfree(ch->desc_ring); mic_dma_alloc_desc_ring()
366 static void mic_dma_free_desc_ring(struct mic_dma_chan *ch) mic_dma_free_desc_ring() argument
368 u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); mic_dma_free_desc_ring()
370 vfree(ch->tx_array); mic_dma_free_desc_ring()
372 dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa, mic_dma_free_desc_ring()
374 kfree(ch->desc_ring); mic_dma_free_desc_ring()
375 ch->desc_ring = NULL; mic_dma_free_desc_ring()
378 static void mic_dma_free_status_dest(struct mic_dma_chan *ch) mic_dma_free_status_dest() argument
380 dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa, mic_dma_free_status_dest()
382 kfree(ch->status_dest); mic_dma_free_status_dest()
385 static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch) mic_dma_alloc_status_dest() argument
387 struct device *dev = &to_mbus_device(ch)->dev; mic_dma_alloc_status_dest()
389 ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL); mic_dma_alloc_status_dest()
390 if (!ch->status_dest) mic_dma_alloc_status_dest()
392 ch->status_dest_micpa = dma_map_single(dev, ch->status_dest, mic_dma_alloc_status_dest()
394 if (dma_mapping_error(dev, ch->status_dest_micpa)) { mic_dma_alloc_status_dest()
395 kfree(ch->status_dest); mic_dma_alloc_status_dest()
396 ch->status_dest = NULL; mic_dma_alloc_status_dest()
402 static int mic_dma_check_chan(struct mic_dma_chan *ch) mic_dma_check_chan() argument
404 if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) || mic_dma_check_chan()
405 mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) { mic_dma_check_chan()
406 mic_dma_disable_chan(ch); mic_dma_check_chan()
407 mic_dma_chan_mask_intr(ch); mic_dma_check_chan()
408 dev_err(mic_dma_ch_to_device(ch), mic_dma_check_chan()
410 __func__, __LINE__, ch->ch_num); mic_dma_check_chan()
416 static int mic_dma_chan_setup(struct mic_dma_chan *ch) mic_dma_chan_setup() argument
418 if (MIC_DMA_CHAN_MIC == ch->owner) mic_dma_chan_setup()
419 mic_dma_chan_set_owner(ch); mic_dma_chan_setup()
420 mic_dma_disable_chan(ch); mic_dma_chan_setup()
421 mic_dma_chan_mask_intr(ch); mic_dma_chan_setup()
422 mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0); mic_dma_chan_setup()
423 mic_dma_chan_set_desc_ring(ch); mic_dma_chan_setup()
424 ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR); mic_dma_chan_setup()
425 ch->head = ch->last_tail; mic_dma_chan_setup()
426 ch->issued = 0; mic_dma_chan_setup()
427 mic_dma_chan_unmask_intr(ch); mic_dma_chan_setup()
428 mic_dma_enable_chan(ch); mic_dma_chan_setup()
429 return mic_dma_check_chan(ch); mic_dma_chan_setup()
432 static void mic_dma_chan_destroy(struct mic_dma_chan *ch) mic_dma_chan_destroy() argument
434 mic_dma_disable_chan(ch); mic_dma_chan_destroy()
435 mic_dma_chan_mask_intr(ch); mic_dma_chan_destroy()
443 static int mic_dma_setup_irq(struct mic_dma_chan *ch) mic_dma_setup_irq() argument
445 ch->cookie = mic_dma_setup_irq()
446 to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch), mic_dma_setup_irq()
448 "mic dma_channel", ch, ch->ch_num); mic_dma_setup_irq()
449 if (IS_ERR(ch->cookie)) mic_dma_setup_irq()
450 return IS_ERR(ch->cookie); mic_dma_setup_irq()
454 static inline void mic_dma_free_irq(struct mic_dma_chan *ch) mic_dma_free_irq() argument
456 to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch); mic_dma_free_irq()
459 static int mic_dma_chan_init(struct mic_dma_chan *ch) mic_dma_chan_init() argument
461 int ret = mic_dma_alloc_desc_ring(ch); mic_dma_chan_init()
465 ret = mic_dma_alloc_status_dest(ch); mic_dma_chan_init()
468 ret = mic_dma_chan_setup(ch); mic_dma_chan_init()
473 mic_dma_free_status_dest(ch); mic_dma_chan_init()
475 mic_dma_free_desc_ring(ch); mic_dma_chan_init()
480 static int mic_dma_drain_chan(struct mic_dma_chan *ch) mic_dma_drain_chan() argument
486 tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE); mic_dma_drain_chan()
496 err = dma_sync_wait(&ch->api_ch, cookie); mic_dma_drain_chan()
498 dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n", mic_dma_drain_chan()
499 __func__, __LINE__, ch->ch_num); mic_dma_drain_chan()
503 mic_dma_cleanup(ch); mic_dma_drain_chan()
507 static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch) mic_dma_chan_uninit() argument
509 mic_dma_chan_destroy(ch); mic_dma_chan_uninit()
510 mic_dma_cleanup(ch); mic_dma_chan_uninit()
511 mic_dma_free_status_dest(ch); mic_dma_chan_uninit()
512 mic_dma_free_desc_ring(ch); mic_dma_chan_uninit()
519 struct mic_dma_chan *ch; mic_dma_init() local
524 ch = &mic_dma_dev->mic_ch[i]; mic_dma_init()
525 data = (unsigned long)ch; mic_dma_init()
526 ch->ch_num = i; mic_dma_init()
527 ch->owner = owner; mic_dma_init()
528 spin_lock_init(&ch->cleanup_lock); mic_dma_init()
529 spin_lock_init(&ch->prep_lock); mic_dma_init()
530 spin_lock_init(&ch->issue_lock); mic_dma_init()
531 ret = mic_dma_setup_irq(ch); mic_dma_init()
538 mic_dma_free_irq(ch); mic_dma_init()
545 struct mic_dma_chan *ch; mic_dma_uninit() local
548 ch = &mic_dma_dev->mic_ch[i]; mic_dma_uninit()
549 mic_dma_free_irq(ch); mic_dma_uninit()
553 static int mic_dma_alloc_chan_resources(struct dma_chan *ch) mic_dma_alloc_chan_resources() argument
555 int ret = mic_dma_chan_init(to_mic_dma_chan(ch)); mic_dma_alloc_chan_resources()
561 static void mic_dma_free_chan_resources(struct dma_chan *ch) mic_dma_free_chan_resources() argument
563 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); mic_dma_free_chan_resources()
658 struct mic_dma_chan *ch; mic_dma_reg_seq_show() local
668 ch = &mic_dma_dev->mic_ch[i]; mic_dma_reg_seq_show()
669 chan_num = ch->ch_num; mic_dma_reg_seq_show()
672 mic_dma_read_reg(ch, MIC_DMA_REG_DCAR), mic_dma_reg_seq_show()
673 mic_dma_read_reg(ch, MIC_DMA_REG_DTPR), mic_dma_reg_seq_show()
674 mic_dma_read_reg(ch, MIC_DMA_REG_DHPR), mic_dma_reg_seq_show()
675 mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI)); mic_dma_reg_seq_show()
677 mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO), mic_dma_reg_seq_show()
678 mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR), mic_dma_reg_seq_show()
679 mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK), mic_dma_reg_seq_show()
680 mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT)); mic_dma_reg_seq_show()
H A Dmoxart-dma.c187 struct moxart_chan *ch = to_moxart_dma_chan(chan); moxart_terminate_all() local
192 dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch); moxart_terminate_all() local
194 spin_lock_irqsave(&ch->vc.lock, flags); moxart_terminate_all()
196 if (ch->desc) { moxart_terminate_all()
197 moxart_dma_desc_free(&ch->desc->vd); moxart_terminate_all()
198 ch->desc = NULL; moxart_terminate_all()
201 ctrl = readl(ch->base + REG_OFF_CTRL); moxart_terminate_all()
203 writel(ctrl, ch->base + REG_OFF_CTRL); moxart_terminate_all()
205 vchan_get_all_descriptors(&ch->vc, &head); moxart_terminate_all()
206 spin_unlock_irqrestore(&ch->vc.lock, flags); moxart_terminate_all()
207 vchan_dma_desc_free_list(&ch->vc, &head); moxart_terminate_all()
215 struct moxart_chan *ch = to_moxart_dma_chan(chan); moxart_slave_config() local
218 ch->cfg = *cfg; moxart_slave_config()
220 ctrl = readl(ch->base + REG_OFF_CTRL); moxart_slave_config()
225 switch (ch->cfg.src_addr_width) { moxart_slave_config()
228 if (ch->cfg.direction != DMA_MEM_TO_DEV) moxart_slave_config()
235 if (ch->cfg.direction != DMA_MEM_TO_DEV) moxart_slave_config()
242 if (ch->cfg.direction != DMA_MEM_TO_DEV) moxart_slave_config()
251 if (ch->cfg.direction == DMA_MEM_TO_DEV) { moxart_slave_config()
254 ctrl |= (ch->line_reqno << 16 & moxart_slave_config()
259 ctrl |= (ch->line_reqno << 24 & moxart_slave_config()
263 writel(ctrl, ch->base + REG_OFF_CTRL); moxart_slave_config()
273 struct moxart_chan *ch = to_moxart_dma_chan(chan); moxart_prep_slave_sg() local
288 dev_addr = ch->cfg.src_addr; moxart_prep_slave_sg()
289 dev_width = ch->cfg.src_addr_width; moxart_prep_slave_sg()
291 dev_addr = ch->cfg.dst_addr; moxart_prep_slave_sg()
292 dev_width = ch->cfg.dst_addr_width; moxart_prep_slave_sg()
326 ch->error = 0;
328 return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
336 struct moxart_chan *ch; moxart_of_xlate() local
342 ch = to_moxart_dma_chan(chan); moxart_of_xlate()
343 ch->line_reqno = dma_spec->args[0]; moxart_of_xlate()
350 struct moxart_chan *ch = to_moxart_dma_chan(chan); moxart_alloc_chan_resources() local
353 __func__, ch->ch_num); moxart_alloc_chan_resources()
354 ch->allocated = 1; moxart_alloc_chan_resources()
361 struct moxart_chan *ch = to_moxart_dma_chan(chan); moxart_free_chan_resources() local
363 vchan_free_chan_resources(&ch->vc); moxart_free_chan_resources()
366 __func__, ch->ch_num); moxart_free_chan_resources()
367 ch->allocated = 0; moxart_free_chan_resources()
370 static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr, moxart_dma_set_params() argument
373 writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE); moxart_dma_set_params()
374 writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST); moxart_dma_set_params()
377 static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len) moxart_set_transfer_params() argument
379 struct moxart_desc *d = ch->desc; moxart_set_transfer_params()
388 writel(d->dma_cycles, ch->base + REG_OFF_CYCLES); moxart_set_transfer_params()
390 dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n", moxart_set_transfer_params()
394 static void moxart_start_dma(struct moxart_chan *ch) moxart_start_dma() argument
398 ctrl = readl(ch->base + REG_OFF_CTRL); moxart_start_dma()
400 writel(ctrl, ch->base + REG_OFF_CTRL); moxart_start_dma()
403 static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx) moxart_dma_start_sg() argument
405 struct moxart_desc *d = ch->desc; moxart_dma_start_sg()
406 struct moxart_sg *sg = ch->desc->sg + idx; moxart_dma_start_sg()
408 if (ch->desc->dma_dir == DMA_MEM_TO_DEV) moxart_dma_start_sg()
409 moxart_dma_set_params(ch, sg->addr, d->dev_addr); moxart_dma_start_sg()
410 else if (ch->desc->dma_dir == DMA_DEV_TO_MEM) moxart_dma_start_sg()
411 moxart_dma_set_params(ch, d->dev_addr, sg->addr); moxart_dma_start_sg()
413 moxart_set_transfer_params(ch, sg->len); moxart_dma_start_sg()
415 moxart_start_dma(ch); moxart_dma_start_sg()
420 struct moxart_chan *ch = to_moxart_dma_chan(chan); moxart_dma_start_desc() local
423 vd = vchan_next_desc(&ch->vc); moxart_dma_start_desc()
426 ch->desc = NULL; moxart_dma_start_desc()
432 ch->desc = to_moxart_dma_desc(&vd->tx); moxart_dma_start_desc()
433 ch->sgidx = 0; moxart_dma_start_desc()
435 moxart_dma_start_sg(ch, 0); moxart_dma_start_desc()
440 struct moxart_chan *ch = to_moxart_dma_chan(chan); moxart_issue_pending() local
443 spin_lock_irqsave(&ch->vc.lock, flags); moxart_issue_pending()
444 if (vchan_issue_pending(&ch->vc) && !ch->desc) moxart_issue_pending()
446 spin_unlock_irqrestore(&ch->vc.lock, flags); moxart_issue_pending()
461 static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch) moxart_dma_desc_size_in_flight() argument
466 size = moxart_dma_desc_size(ch->desc, ch->sgidx); moxart_dma_desc_size_in_flight()
467 cycles = readl(ch->base + REG_OFF_CYCLES); moxart_dma_desc_size_in_flight()
468 completed_cycles = (ch->desc->dma_cycles - cycles); moxart_dma_desc_size_in_flight()
469 size -= completed_cycles << es_bytes[ch->desc->es]; moxart_dma_desc_size_in_flight()
471 dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size); moxart_dma_desc_size_in_flight()
480 struct moxart_chan *ch = to_moxart_dma_chan(chan); moxart_tx_status() local
491 spin_lock_irqsave(&ch->vc.lock, flags); moxart_tx_status()
492 vd = vchan_find_desc(&ch->vc, cookie); moxart_tx_status()
496 } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) { moxart_tx_status()
497 txstate->residue = moxart_dma_desc_size_in_flight(ch); moxart_tx_status()
499 spin_unlock_irqrestore(&ch->vc.lock, flags); moxart_tx_status()
501 if (ch->error) moxart_tx_status()
524 struct moxart_chan *ch = &mc->slave_chans[0]; moxart_dma_interrupt() local
529 dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__); moxart_dma_interrupt()
531 for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) { moxart_dma_interrupt()
532 if (!ch->allocated) moxart_dma_interrupt()
535 ctrl = readl(ch->base + REG_OFF_CTRL); moxart_dma_interrupt()
537 dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n", moxart_dma_interrupt()
538 __func__, ch, ch->base, ctrl); moxart_dma_interrupt()
542 if (ch->desc) { moxart_dma_interrupt()
543 spin_lock_irqsave(&ch->vc.lock, flags); moxart_dma_interrupt()
544 if (++ch->sgidx < ch->desc->sglen) { moxart_dma_interrupt()
545 moxart_dma_start_sg(ch, ch->sgidx); moxart_dma_interrupt()
547 vchan_cookie_complete(&ch->desc->vd); moxart_dma_interrupt()
548 moxart_dma_start_desc(&ch->vc.chan); moxart_dma_interrupt()
550 spin_unlock_irqrestore(&ch->vc.lock, flags); moxart_dma_interrupt()
556 ch->error = 1; moxart_dma_interrupt()
559 writel(ctrl, ch->base + REG_OFF_CTRL); moxart_dma_interrupt()
573 struct moxart_chan *ch; moxart_probe() local
599 ch = &mdc->slave_chans[0]; moxart_probe()
600 for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) { moxart_probe()
601 ch->ch_num = i; moxart_probe()
602 ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE; moxart_probe()
603 ch->allocated = 0; moxart_probe()
605 ch->vc.desc_free = moxart_dma_desc_free; moxart_probe()
606 vchan_init(&ch->vc, &mdc->dma_slave); moxart_probe()
608 dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n", moxart_probe()
609 __func__, i, ch->ch_num, ch->base); moxart_probe()
H A Dmic_x100_dma.h151 static inline struct mic_dma_chan *to_mic_dma_chan(struct dma_chan *ch) to_mic_dma_chan() argument
153 return container_of(ch, struct mic_dma_chan, api_ch); to_mic_dma_chan()
156 static inline struct mic_dma_device *to_mic_dma_dev(struct mic_dma_chan *ch) to_mic_dma_dev() argument
160 (ch - ch->ch_num), struct mic_dma_device, mic_ch); to_mic_dma_dev()
163 static inline struct mbus_device *to_mbus_device(struct mic_dma_chan *ch) to_mbus_device() argument
165 return to_mic_dma_dev(ch)->mbdev; to_mbus_device()
168 static inline struct mbus_hw_ops *to_mbus_hw_ops(struct mic_dma_chan *ch) to_mbus_hw_ops() argument
170 return to_mbus_device(ch)->hw_ops; to_mbus_hw_ops()
173 static inline struct device *mic_dma_ch_to_device(struct mic_dma_chan *ch) mic_dma_ch_to_device() argument
175 return to_mic_dma_dev(ch)->dma_dev.dev; mic_dma_ch_to_device()
178 static inline void __iomem *mic_dma_chan_to_mmio(struct mic_dma_chan *ch) mic_dma_chan_to_mmio() argument
180 return to_mic_dma_dev(ch)->mmio; mic_dma_chan_to_mmio()
183 static inline u32 mic_dma_read_reg(struct mic_dma_chan *ch, u32 reg) mic_dma_read_reg() argument
185 return ioread32(mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE + mic_dma_read_reg()
186 ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg); mic_dma_read_reg()
189 static inline void mic_dma_write_reg(struct mic_dma_chan *ch, u32 reg, u32 val) mic_dma_write_reg() argument
191 iowrite32(val, mic_dma_chan_to_mmio(ch) + MIC_DMA_SBOX_CH_BASE + mic_dma_write_reg()
192 ch->ch_num * MIC_DMA_SBOX_CHAN_OFF + reg); mic_dma_write_reg()
195 static inline u32 mic_dma_mmio_read(struct mic_dma_chan *ch, u32 offset) mic_dma_mmio_read() argument
197 return ioread32(mic_dma_chan_to_mmio(ch) + offset); mic_dma_mmio_read()
200 static inline void mic_dma_mmio_write(struct mic_dma_chan *ch, u32 val, mic_dma_mmio_write() argument
203 iowrite32(val, mic_dma_chan_to_mmio(ch) + offset); mic_dma_mmio_write()
206 static inline u32 mic_dma_read_cmp_cnt(struct mic_dma_chan *ch) mic_dma_read_cmp_cnt() argument
208 return mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & mic_dma_read_cmp_cnt()
212 static inline void mic_dma_chan_set_owner(struct mic_dma_chan *ch) mic_dma_chan_set_owner() argument
214 u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR); mic_dma_chan_set_owner()
215 u32 chan_num = ch->ch_num; mic_dma_chan_set_owner()
217 dcr = (dcr & ~(0x1 << (chan_num * 2))) | (ch->owner << (chan_num * 2)); mic_dma_chan_set_owner()
218 mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR); mic_dma_chan_set_owner()
221 static inline void mic_dma_enable_chan(struct mic_dma_chan *ch) mic_dma_enable_chan() argument
223 u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR); mic_dma_enable_chan()
225 dcr |= 2 << (ch->ch_num << 1); mic_dma_enable_chan()
226 mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR); mic_dma_enable_chan()
229 static inline void mic_dma_disable_chan(struct mic_dma_chan *ch) mic_dma_disable_chan() argument
231 u32 dcr = mic_dma_mmio_read(ch, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR); mic_dma_disable_chan()
233 dcr &= ~(2 << (ch->ch_num << 1)); mic_dma_disable_chan()
234 mic_dma_mmio_write(ch, dcr, MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR); mic_dma_disable_chan()
237 static void mic_dma_chan_set_desc_ring(struct mic_dma_chan *ch) mic_dma_chan_set_desc_ring() argument
240 dma_addr_t desc_ring_micpa = ch->desc_ring_micpa; mic_dma_chan_set_desc_ring()
243 if (MIC_DMA_CHAN_MIC == ch->owner) { mic_dma_chan_set_desc_ring()
251 mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_LO, (u32) desc_ring_micpa); mic_dma_chan_set_desc_ring()
252 mic_dma_write_reg(ch, MIC_DMA_REG_DRAR_HI, drar_hi); mic_dma_chan_set_desc_ring()
255 static inline void mic_dma_chan_mask_intr(struct mic_dma_chan *ch) mic_dma_chan_mask_intr() argument
257 u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR); mic_dma_chan_mask_intr()
259 if (MIC_DMA_CHAN_MIC == ch->owner) mic_dma_chan_mask_intr()
263 mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar); mic_dma_chan_mask_intr()
266 static inline void mic_dma_chan_unmask_intr(struct mic_dma_chan *ch) mic_dma_chan_unmask_intr() argument
268 u32 dcar = mic_dma_read_reg(ch, MIC_DMA_REG_DCAR); mic_dma_chan_unmask_intr()
270 if (MIC_DMA_CHAN_MIC == ch->owner) mic_dma_chan_unmask_intr()
274 mic_dma_write_reg(ch, MIC_DMA_REG_DCAR, dcar); mic_dma_chan_unmask_intr()
277 static void mic_dma_ack_interrupt(struct mic_dma_chan *ch) mic_dma_ack_interrupt() argument
279 if (MIC_DMA_CHAN_MIC == ch->owner) { mic_dma_ack_interrupt()
281 mic_dma_chan_mask_intr(ch); mic_dma_ack_interrupt()
282 mic_dma_chan_unmask_intr(ch); mic_dma_ack_interrupt()
284 to_mbus_hw_ops(ch)->ack_interrupt(to_mbus_device(ch), ch->ch_num); mic_dma_ack_interrupt()
H A Dfsl-edma.c231 u32 ch = fsl_chan->vchan.chan.chan_id; fsl_edma_enable_request() local
233 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI); fsl_edma_enable_request()
234 edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ); fsl_edma_enable_request()
240 u32 ch = fsl_chan->vchan.chan.chan_id; fsl_edma_disable_request() local
242 edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ); fsl_edma_disable_request()
243 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI); fsl_edma_disable_request()
249 u32 ch = fsl_chan->vchan.chan.chan_id; fsl_edma_chan_mux() local
255 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; fsl_edma_chan_mux()
362 u32 ch = fsl_chan->vchan.chan.chan_id; fsl_edma_desc_residue() local
377 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch)); fsl_edma_desc_residue()
379 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch)); fsl_edma_desc_residue()
434 u32 ch = fsl_chan->vchan.chan.chan_id; fsl_edma_set_tcd_regs() local
441 edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch)); fsl_edma_set_tcd_regs()
442 edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch)); fsl_edma_set_tcd_regs()
443 edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch)); fsl_edma_set_tcd_regs()
445 edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch)); fsl_edma_set_tcd_regs()
446 edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch)); fsl_edma_set_tcd_regs()
448 edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch)); fsl_edma_set_tcd_regs()
449 edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch)); fsl_edma_set_tcd_regs()
451 edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch)); fsl_edma_set_tcd_regs()
452 edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch)); fsl_edma_set_tcd_regs()
453 edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch)); fsl_edma_set_tcd_regs()
455 edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch)); fsl_edma_set_tcd_regs()
457 edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch)); fsl_edma_set_tcd_regs()
656 unsigned int intr, ch; fsl_edma_tx_handler() local
666 for (ch = 0; ch < fsl_edma->n_chans; ch++) { fsl_edma_tx_handler()
667 if (intr & (0x1 << ch)) { fsl_edma_tx_handler()
668 edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), fsl_edma_tx_handler()
671 fsl_chan = &fsl_edma->chans[ch]; fsl_edma_tx_handler()
695 unsigned int err, ch; fsl_edma_err_handler() local
701 for (ch = 0; ch < fsl_edma->n_chans; ch++) { fsl_edma_err_handler()
702 if (err & (0x1 << ch)) { fsl_edma_err_handler()
703 fsl_edma_disable_request(&fsl_edma->chans[ch]); fsl_edma_err_handler()
704 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), fsl_edma_err_handler()
706 fsl_edma->chans[ch].status = DMA_ERROR; fsl_edma_err_handler()
H A Dsirf-dma.c163 int ch; sirfsoc_dma_irq() local
166 while ((ch = fls(is) - 1) >= 0) { sirfsoc_dma_irq()
167 is &= ~(1 << ch); sirfsoc_dma_irq()
168 writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); sirfsoc_dma_irq()
169 schan = &sdma->channels[ch]; sirfsoc_dma_irq()
817 int ch; sirfsoc_dma_pm_suspend() local
834 for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) { sirfsoc_dma_pm_suspend()
835 schan = &sdma->channels[ch]; sirfsoc_dma_pm_suspend()
841 save->ctrl[ch] = readl_relaxed(sdma->base + sirfsoc_dma_pm_suspend()
842 ch * 0x10 + SIRFSOC_DMA_CH_CTRL); sirfsoc_dma_pm_suspend()
858 int ch; sirfsoc_dma_pm_resume() local
867 for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) { sirfsoc_dma_pm_resume()
868 schan = &sdma->channels[ch]; sirfsoc_dma_pm_resume()
875 sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4); sirfsoc_dma_pm_resume()
877 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN); sirfsoc_dma_pm_resume()
879 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN); sirfsoc_dma_pm_resume()
880 writel_relaxed(save->ctrl[ch], sirfsoc_dma_pm_resume()
881 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); sirfsoc_dma_pm_resume()
883 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); sirfsoc_dma_pm_resume()
H A Damba-pl08x.c351 static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) pl08x_phy_channel_busy() argument
355 val = readl(ch->reg_config); pl08x_phy_channel_busy()
435 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) pl08x_pause_phy_chan() argument
441 val = readl(ch->reg_config); pl08x_pause_phy_chan()
443 writel(val, ch->reg_config); pl08x_pause_phy_chan()
447 if (!pl08x_phy_channel_busy(ch)) pl08x_pause_phy_chan()
451 if (pl08x_phy_channel_busy(ch)) pl08x_pause_phy_chan()
452 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); pl08x_pause_phy_chan()
455 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) pl08x_resume_phy_chan() argument
460 val = readl(ch->reg_config); pl08x_resume_phy_chan()
462 writel(val, ch->reg_config); pl08x_resume_phy_chan()
472 struct pl08x_phy_chan *ch) pl08x_terminate_phy_chan()
474 u32 val = readl(ch->reg_config); pl08x_terminate_phy_chan()
479 writel(val, ch->reg_config); pl08x_terminate_phy_chan()
481 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); pl08x_terminate_phy_chan()
482 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); pl08x_terminate_phy_chan()
530 struct pl08x_phy_chan *ch; pl08x_getbytes_chan() local
537 ch = plchan->phychan; pl08x_getbytes_chan()
540 if (!ch || !txd) pl08x_getbytes_chan()
547 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; pl08x_getbytes_chan()
552 readl(ch->base + PL080_CH_CONTROL), pl08x_getbytes_chan()
553 readl(ch->base + PL080S_CH_CONTROL2)); pl08x_getbytes_chan()
555 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); pl08x_getbytes_chan()
604 struct pl08x_phy_chan *ch = NULL; pl08x_get_phy_channel() local
609 ch = &pl08x->phy_chans[i]; pl08x_get_phy_channel()
611 spin_lock_irqsave(&ch->lock, flags); pl08x_get_phy_channel()
613 if (!ch->locked && !ch->serving) { pl08x_get_phy_channel()
614 ch->serving = virt_chan; pl08x_get_phy_channel()
615 spin_unlock_irqrestore(&ch->lock, flags); pl08x_get_phy_channel()
619 spin_unlock_irqrestore(&ch->lock, flags); pl08x_get_phy_channel()
627 return ch; pl08x_get_phy_channel()
632 struct pl08x_phy_chan *ch) pl08x_put_phy_channel()
634 ch->serving = NULL; pl08x_put_phy_channel()
645 struct pl08x_phy_chan *ch; pl08x_phy_alloc_and_start() local
647 ch = pl08x_get_phy_channel(pl08x, plchan); pl08x_phy_alloc_and_start()
648 if (!ch) { pl08x_phy_alloc_and_start()
655 ch->id, plchan->name); pl08x_phy_alloc_and_start()
657 plchan->phychan = ch; pl08x_phy_alloc_and_start()
662 static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, pl08x_phy_reassign_start() argument
668 ch->id, plchan->name); pl08x_phy_reassign_start()
675 ch->serving = plchan; pl08x_phy_reassign_start()
676 plchan->phychan = ch; pl08x_phy_reassign_start()
1965 struct pl08x_phy_chan *ch; pl08x_debugfs_show() local
1975 ch = &pl08x->phy_chans[i]; pl08x_debugfs_show()
1977 spin_lock_irqsave(&ch->lock, flags); pl08x_debugfs_show()
1978 virt_chan = ch->serving; pl08x_debugfs_show()
1981 ch->id, pl08x_debugfs_show()
1983 ch->locked ? " LOCKED" : ""); pl08x_debugfs_show()
1985 spin_unlock_irqrestore(&ch->lock, flags); pl08x_debugfs_show()
2159 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; pl08x_probe() local
2161 ch->id = i; pl08x_probe()
2162 ch->base = pl08x->base + PL080_Cx_BASE(i); pl08x_probe()
2163 ch->reg_config = ch->base + vd->config_offset; pl08x_probe()
2164 spin_lock_init(&ch->lock); pl08x_probe()
2174 val = readl(ch->reg_config); pl08x_probe()
2177 ch->locked = true; pl08x_probe()
2182 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); pl08x_probe()
471 pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) pl08x_terminate_phy_chan() argument
631 pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) pl08x_put_phy_channel() argument
/linux-4.1.27/drivers/s390/net/
H A Dctcm_fsms.c187 * ch : The channel, the error belongs to.
190 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) ctcm_ccw_check_rc() argument
194 CTCM_FUNTAIL, ch->id, msg, rc); ctcm_ccw_check_rc()
198 ch->id); ctcm_ccw_check_rc()
199 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); ctcm_ccw_check_rc()
203 ch->id); ctcm_ccw_check_rc()
204 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); ctcm_ccw_check_rc()
209 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); ctcm_ccw_check_rc()
247 struct channel *ch = arg; chx_txdone() local
248 struct net_device *dev = ch->netdev; chx_txdone()
256 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); chx_txdone()
258 duration = done_stamp - ch->prof.send_stamp; chx_txdone()
259 if (duration > ch->prof.tx_time) chx_txdone()
260 ch->prof.tx_time = duration; chx_txdone()
262 if (ch->irb->scsw.cmd.count != 0) chx_txdone()
265 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); chx_txdone()
266 fsm_deltimer(&ch->timer); chx_txdone()
267 while ((skb = skb_dequeue(&ch->io_queue))) { chx_txdone()
277 spin_lock(&ch->collect_lock); chx_txdone()
278 clear_normalized_cda(&ch->ccw[4]); chx_txdone()
279 if (ch->collect_len > 0) { chx_txdone()
282 if (ctcm_checkalloc_buffer(ch)) { chx_txdone()
283 spin_unlock(&ch->collect_lock); chx_txdone()
286 ch->trans_skb->data = ch->trans_skb_data; chx_txdone()
287 skb_reset_tail_pointer(ch->trans_skb); chx_txdone()
288 ch->trans_skb->len = 0; chx_txdone()
289 if (ch->prof.maxmulti < (ch->collect_len + 2)) chx_txdone()
290 ch->prof.maxmulti = ch->collect_len + 2; chx_txdone()
291 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) chx_txdone()
292 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); chx_txdone()
293 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; chx_txdone()
295 while ((skb = skb_dequeue(&ch->collect_queue))) { chx_txdone()
297 skb_put(ch->trans_skb, skb->len), skb->len); chx_txdone()
304 ch->collect_len = 0; chx_txdone()
305 spin_unlock(&ch->collect_lock); chx_txdone()
306 ch->ccw[1].count = ch->trans_skb->len; chx_txdone()
307 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); chx_txdone()
308 ch->prof.send_stamp = jiffies; chx_txdone()
309 rc = ccw_device_start(ch->cdev, &ch->ccw[0], chx_txdone()
310 (unsigned long)ch, 0xff, 0); chx_txdone()
311 ch->prof.doios_multi++; chx_txdone()
315 fsm_deltimer(&ch->timer); chx_txdone()
316 ctcm_ccw_check_rc(ch, rc, "chained TX"); chx_txdone()
319 spin_unlock(&ch->collect_lock); chx_txdone()
336 struct channel *ch = arg; ctcm_chx_txidle() local
337 struct net_device *dev = ch->netdev; ctcm_chx_txidle()
340 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); ctcm_chx_txidle()
342 fsm_deltimer(&ch->timer); ctcm_chx_txidle()
344 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); ctcm_chx_txidle()
357 struct channel *ch = arg; chx_rx() local
358 struct net_device *dev = ch->netdev; chx_rx()
360 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; chx_rx()
361 struct sk_buff *skb = ch->trans_skb; chx_rx()
366 fsm_deltimer(&ch->timer); chx_rx()
375 if (len > ch->max_bufsize) { chx_rx()
378 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); chx_rx()
387 switch (ch->protocol) { chx_rx()
410 ctcm_unpack_skb(ch, skb); chx_rx()
413 skb->data = ch->trans_skb_data; chx_rx()
416 if (ctcm_checkalloc_buffer(ch)) chx_rx()
418 ch->ccw[1].count = ch->max_bufsize; chx_rx()
419 rc = ccw_device_start(ch->cdev, &ch->ccw[0], chx_rx()
420 (unsigned long)ch, 0xff, 0); chx_rx()
422 ctcm_ccw_check_rc(ch, rc, "normal RX"); chx_rx()
435 struct channel *ch = arg; chx_firstio() local
440 CTCM_FUNTAIL, ch->id, fsmstate); chx_firstio()
442 ch->sense_rc = 0; /* reset unit check report control */ chx_firstio()
446 CTCM_FUNTAIL, ch->id); chx_firstio()
447 fsm_deltimer(&ch->timer); chx_firstio()
448 if (ctcm_checkalloc_buffer(ch)) chx_firstio()
451 (ch->protocol == CTCM_PROTO_OS390)) { chx_firstio()
453 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { chx_firstio()
454 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; chx_firstio()
455 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, chx_firstio()
456 CTC_EVENT_TIMER, ch); chx_firstio()
459 struct net_device *dev = ch->netdev; chx_firstio()
471 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || chx_firstio()
472 (ch->protocol != CTCM_PROTO_S390)) chx_firstio()
473 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); chx_firstio()
475 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; chx_firstio()
476 ch->ccw[1].count = 2; /* Transfer only length */ chx_firstio()
478 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) chx_firstio()
480 rc = ccw_device_start(ch->cdev, &ch->ccw[0], chx_firstio()
481 (unsigned long)ch, 0xff, 0); chx_firstio()
483 fsm_deltimer(&ch->timer); chx_firstio()
485 ctcm_ccw_check_rc(ch, rc, "init IO"); chx_firstio()
494 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && chx_firstio()
495 (ch->protocol == CTCM_PROTO_S390)) { chx_firstio()
496 struct net_device *dev = ch->netdev; chx_firstio()
513 struct channel *ch = arg; chx_rxidle() local
514 struct net_device *dev = ch->netdev; chx_rxidle()
519 fsm_deltimer(&ch->timer); chx_rxidle()
520 buflen = *((__u16 *)ch->trans_skb->data); chx_rxidle()
525 if (ctcm_checkalloc_buffer(ch)) chx_rxidle()
527 ch->ccw[1].count = ch->max_bufsize; chx_rxidle()
529 rc = ccw_device_start(ch->cdev, &ch->ccw[0], chx_rxidle()
530 (unsigned long)ch, 0xff, 0); chx_rxidle()
533 ctcm_ccw_check_rc(ch, rc, "initial RX"); chx_rxidle()
553 struct channel *ch = arg; ctcm_chx_setmode() local
558 fsm_deltimer(&ch->timer); ctcm_chx_setmode()
559 if (IS_MPC(ch)) { ctcm_chx_setmode()
561 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", ctcm_chx_setmode()
562 __func__, smp_processor_id(), ch, ch->id); ctcm_chx_setmode()
564 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); ctcm_chx_setmode()
566 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); ctcm_chx_setmode()
569 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_setmode()
573 rc = ccw_device_start(ch->cdev, &ch->ccw[6], ctcm_chx_setmode()
574 (unsigned long)ch, 0xff, 0); ctcm_chx_setmode()
576 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_setmode()
578 fsm_deltimer(&ch->timer); ctcm_chx_setmode()
580 ctcm_ccw_check_rc(ch, rc, "set Mode"); ctcm_chx_setmode()
582 ch->retry = 0; ctcm_chx_setmode()
594 struct channel *ch = arg; ctcm_chx_start() local
599 CTCM_FUNTAIL, ch->id, ctcm_chx_start()
600 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); ctcm_chx_start()
602 if (ch->trans_skb != NULL) { ctcm_chx_start()
603 clear_normalized_cda(&ch->ccw[1]); ctcm_chx_start()
604 dev_kfree_skb(ch->trans_skb); ctcm_chx_start()
605 ch->trans_skb = NULL; ctcm_chx_start()
607 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { ctcm_chx_start()
608 ch->ccw[1].cmd_code = CCW_CMD_READ; ctcm_chx_start()
609 ch->ccw[1].flags = CCW_FLAG_SLI; ctcm_chx_start()
610 ch->ccw[1].count = 0; ctcm_chx_start()
612 ch->ccw[1].cmd_code = CCW_CMD_WRITE; ctcm_chx_start()
613 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ctcm_chx_start()
614 ch->ccw[1].count = 0; ctcm_chx_start()
616 if (ctcm_checkalloc_buffer(ch)) { ctcm_chx_start()
620 CTCM_FUNTAIL, ch->id, ctcm_chx_start()
621 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? ctcm_chx_start()
624 ch->ccw[0].cmd_code = CCW_CMD_PREPARE; ctcm_chx_start()
625 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ctcm_chx_start()
626 ch->ccw[0].count = 0; ctcm_chx_start()
627 ch->ccw[0].cda = 0; ctcm_chx_start()
628 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ ctcm_chx_start()
629 ch->ccw[2].flags = CCW_FLAG_SLI; ctcm_chx_start()
630 ch->ccw[2].count = 0; ctcm_chx_start()
631 ch->ccw[2].cda = 0; ctcm_chx_start()
632 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); ctcm_chx_start()
633 ch->ccw[4].cda = 0; ctcm_chx_start()
634 ch->ccw[4].flags &= ~CCW_FLAG_IDA; ctcm_chx_start()
637 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); ctcm_chx_start()
638 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_start()
639 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); ctcm_chx_start()
640 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_start()
643 fsm_deltimer(&ch->timer); ctcm_chx_start()
644 ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); ctcm_chx_start()
657 struct channel *ch = arg; ctcm_chx_haltio() local
662 fsm_deltimer(&ch->timer); ctcm_chx_haltio()
663 if (IS_MPC(ch)) ctcm_chx_haltio()
664 fsm_deltimer(&ch->sweep_timer); ctcm_chx_haltio()
666 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ctcm_chx_haltio()
669 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_haltio()
674 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); ctcm_chx_haltio()
677 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_haltio()
681 fsm_deltimer(&ch->timer); ctcm_chx_haltio()
684 ctcm_ccw_check_rc(ch, rc, (char *)__func__); ctcm_chx_haltio()
695 * ch The channel to operate on.
698 struct channel *ch) ctcm_chx_cleanup()
700 struct net_device *dev = ch->netdev; ctcm_chx_cleanup()
705 CTCM_FUNTAIL, dev->name, ch->id, state); ctcm_chx_cleanup()
707 fsm_deltimer(&ch->timer); ctcm_chx_cleanup()
708 if (IS_MPC(ch)) ctcm_chx_cleanup()
709 fsm_deltimer(&ch->sweep_timer); ctcm_chx_cleanup()
712 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { ctcm_chx_cleanup()
713 clear_normalized_cda(&ch->ccw[1]); ctcm_chx_cleanup()
714 dev_kfree_skb_any(ch->trans_skb); ctcm_chx_cleanup()
715 ch->trans_skb = NULL; ctcm_chx_cleanup()
718 ch->th_seg = 0x00; ctcm_chx_cleanup()
719 ch->th_seq_num = 0x00; ctcm_chx_cleanup()
720 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { ctcm_chx_cleanup()
721 skb_queue_purge(&ch->io_queue); ctcm_chx_cleanup()
724 ctcm_purge_skb_queue(&ch->io_queue); ctcm_chx_cleanup()
725 if (IS_MPC(ch)) ctcm_chx_cleanup()
726 ctcm_purge_skb_queue(&ch->sweep_queue); ctcm_chx_cleanup()
727 spin_lock(&ch->collect_lock); ctcm_chx_cleanup()
728 ctcm_purge_skb_queue(&ch->collect_queue); ctcm_chx_cleanup()
729 ch->collect_len = 0; ctcm_chx_cleanup()
730 spin_unlock(&ch->collect_lock); ctcm_chx_cleanup()
784 struct channel *ch = arg; ctcm_chx_setuperr() local
785 struct net_device *dev = ch->netdev; ctcm_chx_setuperr()
797 fsm_deltimer(&ch->timer); ctcm_chx_setuperr()
798 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ctcm_chx_setuperr()
799 if (!IS_MPC(ch) && ctcm_chx_setuperr()
800 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { ctcm_chx_setuperr()
801 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); ctcm_chx_setuperr()
803 ctcm_ccw_check_rc(ch, rc, ctcm_chx_setuperr()
812 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", ctcm_chx_setuperr()
815 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { ctcm_chx_setuperr()
833 struct channel *ch = arg; ctcm_chx_restart() local
834 struct net_device *dev = ch->netdev; ctcm_chx_restart()
841 CTCM_FUNTAIL, ch->id, event, dev->name); ctcm_chx_restart()
843 fsm_deltimer(&ch->timer); ctcm_chx_restart()
845 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ctcm_chx_restart()
849 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_restart()
853 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); ctcm_chx_restart()
855 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_restart()
858 fsm_deltimer(&ch->timer); ctcm_chx_restart()
861 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); ctcm_chx_restart()
875 struct channel *ch = arg; ctcm_chx_rxiniterr() local
876 struct net_device *dev = ch->netdev; ctcm_chx_rxiniterr()
882 fsm_deltimer(&ch->timer); ctcm_chx_rxiniterr()
883 if (ch->retry++ < 3) ctcm_chx_rxiniterr()
891 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, ctcm_chx_rxiniterr()
910 struct channel *ch = arg; ctcm_chx_rxinitfail() local
911 struct net_device *dev = ch->netdev; ctcm_chx_rxinitfail()
916 CTCM_FUNTAIL, dev->name, ch->id); ctcm_chx_rxinitfail()
930 struct channel *ch = arg; ctcm_chx_rxdisc() local
932 struct net_device *dev = ch->netdev; ctcm_chx_rxdisc()
938 fsm_deltimer(&ch->timer); ctcm_chx_rxdisc()
949 ccw_device_halt(ch->cdev, (unsigned long)ch); ctcm_chx_rxdisc()
962 struct channel *ch = arg; ctcm_chx_txiniterr() local
963 struct net_device *dev = ch->netdev; ctcm_chx_txiniterr()
967 fsm_deltimer(&ch->timer); ctcm_chx_txiniterr()
968 if (ch->retry++ < 3) ctcm_chx_txiniterr()
976 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, ctcm_chx_txiniterr()
994 struct channel *ch = arg; ctcm_chx_txretry() local
995 struct net_device *dev = ch->netdev; ctcm_chx_txretry()
999 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", ctcm_chx_txretry()
1000 __func__, smp_processor_id(), ch, ch->id); ctcm_chx_txretry()
1002 fsm_deltimer(&ch->timer); ctcm_chx_txretry()
1003 if (ch->retry++ > 3) { ctcm_chx_txretry()
1007 CTCM_FUNTAIL, ch->id); ctcm_chx_txretry()
1018 CTCM_FUNTAIL, ch->id, ch->retry); ctcm_chx_txretry()
1019 skb = skb_peek(&ch->io_queue); ctcm_chx_txretry()
1023 clear_normalized_cda(&ch->ccw[4]); ctcm_chx_txretry()
1024 ch->ccw[4].count = skb->len; ctcm_chx_txretry()
1025 if (set_normalized_cda(&ch->ccw[4], skb->data)) { ctcm_chx_txretry()
1028 CTCM_FUNTAIL, ch->id); ctcm_chx_txretry()
1033 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); ctcm_chx_txretry()
1035 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ctcm_chx_txretry()
1040 ctcmpc_dumpit((char *)&ch->ccw[3], ctcm_chx_txretry()
1043 rc = ccw_device_start(ch->cdev, &ch->ccw[3], ctcm_chx_txretry()
1044 (unsigned long)ch, 0xff, 0); ctcm_chx_txretry()
1046 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), ctcm_chx_txretry()
1049 fsm_deltimer(&ch->timer); ctcm_chx_txretry()
1050 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); ctcm_chx_txretry()
1051 ctcm_purge_skb_queue(&ch->io_queue); ctcm_chx_txretry()
1067 struct channel *ch = arg; ctcm_chx_iofatal() local
1068 struct net_device *dev = ch->netdev; ctcm_chx_iofatal()
1070 int rd = CHANNEL_DIRECTION(ch->flags); ctcm_chx_iofatal()
1072 fsm_deltimer(&ch->timer); ctcm_chx_iofatal()
1075 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); ctcm_chx_iofatal()
1077 if (IS_MPC(ch)) { ctcm_chx_iofatal()
1217 struct channel *ch = arg; ctcmpc_chx_txdone() local
1218 struct net_device *dev = ch->netdev; ctcmpc_chx_txdone()
1235 duration = done_stamp - ch->prof.send_stamp; ctcmpc_chx_txdone()
1236 if (duration > ch->prof.tx_time) ctcmpc_chx_txdone()
1237 ch->prof.tx_time = duration; ctcmpc_chx_txdone()
1239 if (ch->irb->scsw.cmd.count != 0) ctcmpc_chx_txdone()
1242 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); ctcmpc_chx_txdone()
1243 fsm_deltimer(&ch->timer); ctcmpc_chx_txdone()
1244 while ((skb = skb_dequeue(&ch->io_queue))) { ctcmpc_chx_txdone()
1254 spin_lock(&ch->collect_lock); ctcmpc_chx_txdone()
1255 clear_normalized_cda(&ch->ccw[4]); ctcmpc_chx_txdone()
1256 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { ctcmpc_chx_txdone()
1257 spin_unlock(&ch->collect_lock); ctcmpc_chx_txdone()
1262 if (ctcm_checkalloc_buffer(ch)) { ctcmpc_chx_txdone()
1263 spin_unlock(&ch->collect_lock); ctcmpc_chx_txdone()
1266 ch->trans_skb->data = ch->trans_skb_data; ctcmpc_chx_txdone()
1267 skb_reset_tail_pointer(ch->trans_skb); ctcmpc_chx_txdone()
1268 ch->trans_skb->len = 0; ctcmpc_chx_txdone()
1269 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) ctcmpc_chx_txdone()
1270 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; ctcmpc_chx_txdone()
1271 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) ctcmpc_chx_txdone()
1272 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); ctcmpc_chx_txdone()
1281 while ((skb = skb_dequeue(&ch->collect_queue))) { ctcmpc_chx_txdone()
1282 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len); ctcmpc_chx_txdone()
1284 (skb_tail_pointer(ch->trans_skb) - skb->len); ctcmpc_chx_txdone()
1292 __func__, ch->trans_skb->len); ctcmpc_chx_txdone()
1297 ch->collect_len -= skb->len; ctcmpc_chx_txdone()
1303 peekskb = skb_peek(&ch->collect_queue); ctcmpc_chx_txdone()
1313 spin_unlock(&ch->collect_lock); ctcmpc_chx_txdone()
1318 ch->th_seq_num++; ctcmpc_chx_txdone()
1319 header->th_seq_num = ch->th_seq_num; ctcmpc_chx_txdone()
1322 __func__, ch->th_seq_num); ctcmpc_chx_txdone()
1324 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, ctcmpc_chx_txdone()
1330 __func__, ch->trans_skb->len); ctcmpc_chx_txdone()
1333 CTCM_D3_DUMP((char *)ch->trans_skb->data, ctcmpc_chx_txdone()
1334 min_t(int, ch->trans_skb->len, 50)); ctcmpc_chx_txdone()
1336 spin_unlock(&ch->collect_lock); ctcmpc_chx_txdone()
1337 clear_normalized_cda(&ch->ccw[1]); ctcmpc_chx_txdone()
1340 (void *)(unsigned long)ch->ccw[1].cda, ctcmpc_chx_txdone()
1341 ch->trans_skb->data); ctcmpc_chx_txdone()
1342 ch->ccw[1].count = ch->max_bufsize; ctcmpc_chx_txdone()
1344 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { ctcmpc_chx_txdone()
1345 dev_kfree_skb_any(ch->trans_skb); ctcmpc_chx_txdone()
1346 ch->trans_skb = NULL; ctcmpc_chx_txdone()
1349 CTCM_FUNTAIL, ch->id); ctcmpc_chx_txdone()
1355 (void *)(unsigned long)ch->ccw[1].cda, ctcmpc_chx_txdone()
1356 ch->trans_skb->data); ctcmpc_chx_txdone()
1358 ch->ccw[1].count = ch->trans_skb->len; ctcmpc_chx_txdone()
1359 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ctcmpc_chx_txdone()
1360 ch->prof.send_stamp = jiffies; ctcmpc_chx_txdone()
1362 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); ctcmpc_chx_txdone()
1363 rc = ccw_device_start(ch->cdev, &ch->ccw[0], ctcmpc_chx_txdone()
1364 (unsigned long)ch, 0xff, 0); ctcmpc_chx_txdone()
1365 ch->prof.doios_multi++; ctcmpc_chx_txdone()
1369 fsm_deltimer(&ch->timer); ctcmpc_chx_txdone()
1370 ctcm_ccw_check_rc(ch, rc, "chained TX"); ctcmpc_chx_txdone()
1387 struct channel *ch = arg; ctcmpc_chx_rx() local
1388 struct net_device *dev = ch->netdev; ctcmpc_chx_rx()
1391 struct sk_buff *skb = ch->trans_skb; ctcmpc_chx_rx()
1394 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; ctcmpc_chx_rx()
1398 ch->id, ch->max_bufsize, len); ctcmpc_chx_rx()
1399 fsm_deltimer(&ch->timer); ctcmpc_chx_rx()
1418 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); ctcmpc_chx_rx()
1436 skb_queue_tail(&ch->io_queue, new_skb); ctcmpc_chx_rx()
1437 tasklet_schedule(&ch->ch_tasklet); ctcmpc_chx_rx()
1441 skb_queue_tail(&ch->io_queue, new_skb); ctcmpc_chx_rx()
1442 tasklet_hi_schedule(&ch->ch_tasklet); ctcmpc_chx_rx()
1452 if (ctcm_checkalloc_buffer(ch)) ctcmpc_chx_rx()
1454 ch->trans_skb->data = ch->trans_skb_data; ctcmpc_chx_rx()
1455 skb_reset_tail_pointer(ch->trans_skb); ctcmpc_chx_rx()
1456 ch->trans_skb->len = 0; ctcmpc_chx_rx()
1457 ch->ccw[1].count = ch->max_bufsize; ctcmpc_chx_rx()
1459 ctcmpc_dumpit((char *)&ch->ccw[0], ctcmpc_chx_rx()
1464 get_ccwdev_lock(ch->cdev), saveflags); ctcmpc_chx_rx()
1465 rc = ccw_device_start(ch->cdev, &ch->ccw[0], ctcmpc_chx_rx()
1466 (unsigned long)ch, 0xff, 0); ctcmpc_chx_rx()
1469 get_ccwdev_lock(ch->cdev), saveflags); ctcmpc_chx_rx()
1471 ctcm_ccw_check_rc(ch, rc, "normal RX"); ctcmpc_chx_rx()
1476 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", ctcmpc_chx_rx()
1477 __func__, dev->name, ch, ch->id); ctcmpc_chx_rx()
1490 struct channel *ch = arg; ctcmpc_chx_firstio() local
1491 struct net_device *dev = ch->netdev; ctcmpc_chx_firstio()
1495 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", ctcmpc_chx_firstio()
1496 __func__, ch->id, ch); ctcmpc_chx_firstio()
1500 CTCM_FUNTAIL, ch->id, fsm_getstate(fi), ctcmpc_chx_firstio()
1501 fsm_getstate(gptr->fsm), ch->protocol); ctcmpc_chx_firstio()
1506 fsm_deltimer(&ch->timer); ctcmpc_chx_firstio()
1507 if (ctcm_checkalloc_buffer(ch)) ctcmpc_chx_firstio()
1513 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { ctcmpc_chx_firstio()
1524 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ctcmpc_chx_firstio()
1528 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", ctcmpc_chx_firstio()
1529 __func__, ch->id, ch); ctcmpc_chx_firstio()
1544 struct channel *ch = arg; ctcmpc_chx_rxidle() local
1545 struct net_device *dev = ch->netdev; ctcmpc_chx_rxidle()
1551 fsm_deltimer(&ch->timer); ctcmpc_chx_rxidle()
1553 __func__, ch->id, dev->name, smp_processor_id(), ctcmpc_chx_rxidle()
1562 if (ctcm_checkalloc_buffer(ch)) ctcmpc_chx_rxidle()
1564 ch->trans_skb->data = ch->trans_skb_data; ctcmpc_chx_rxidle()
1565 skb_reset_tail_pointer(ch->trans_skb); ctcmpc_chx_rxidle()
1566 ch->trans_skb->len = 0; ctcmpc_chx_rxidle()
1567 ch->ccw[1].count = ch->max_bufsize; ctcmpc_chx_rxidle()
1568 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); ctcmpc_chx_rxidle()
1571 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ctcmpc_chx_rxidle()
1572 rc = ccw_device_start(ch->cdev, &ch->ccw[0], ctcmpc_chx_rxidle()
1573 (unsigned long)ch, 0xff, 0); ctcmpc_chx_rxidle()
1576 get_ccwdev_lock(ch->cdev), saveflags); ctcmpc_chx_rxidle()
1579 ctcm_ccw_check_rc(ch, rc, "initial RX"); ctcmpc_chx_rxidle()
1599 struct channel *ch = arg; ctcmpc_chx_attn() local
1600 struct net_device *dev = ch->netdev; ctcmpc_chx_attn()
1604 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", ctcmpc_chx_attn()
1605 __func__, dev->name, ch->id, ch, smp_processor_id(), ctcmpc_chx_attn()
1606 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); ctcmpc_chx_attn()
1611 if (!ch->in_mpcgroup) ctcmpc_chx_attn()
1613 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { ctcmpc_chx_attn()
1618 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); ctcmpc_chx_attn()
1620 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) ctcmpc_chx_attn()
1622 fsm_newstate(ch->fsm, CH_XID7_PENDING1); ctcmpc_chx_attn()
1627 /* attn rcvd before xid0 processed on ch ctcmpc_chx_attn()
1629 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) ctcmpc_chx_attn()
1630 fsm_newstate(ch->fsm, CH_XID7_PENDING1); ctcmpc_chx_attn()
1636 switch (fsm_getstate(ch->fsm)) { ctcmpc_chx_attn()
1638 fsm_newstate(ch->fsm, CH_XID7_PENDING1); ctcmpc_chx_attn()
1641 fsm_newstate(ch->fsm, CH_XID7_PENDING3); ctcmpc_chx_attn()
1658 struct channel *ch = arg; ctcmpc_chx_attnbusy() local
1659 struct net_device *dev = ch->netdev; ctcmpc_chx_attnbusy()
1664 __func__, dev->name, ch->id, ctcmpc_chx_attnbusy()
1665 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); ctcmpc_chx_attnbusy()
1667 fsm_deltimer(&ch->timer); ctcmpc_chx_attnbusy()
1692 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { ctcmpc_chx_attnbusy()
1693 fsm_newstate(ch->fsm, CH_XID0_PENDING) ; ctcmpc_chx_attnbusy()
1725 if (ch->in_mpcgroup) ctcmpc_chx_attnbusy()
1726 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); ctcmpc_chx_attnbusy()
1730 CTCM_FUNTAIL, dev->name, ch->id); ctcmpc_chx_attnbusy()
1743 struct channel *ch = arg; ctcmpc_chx_resend() local
1744 struct net_device *dev = ch->netdev; ctcmpc_chx_resend()
1748 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); ctcmpc_chx_resend()
1770 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", ctcmpc_chx_send_sweep()
2081 struct channel *ch = priv->channel[direction]; dev_action_start() local
2082 fsm_event(ch->fsm, CTC_EVENT_START, ch); dev_action_start()
2103 struct channel *ch = priv->channel[direction]; dev_action_stop() local
2104 fsm_event(ch->fsm, CTC_EVENT_STOP, ch); dev_action_stop()
2105 ch->th_seq_num = 0x00; dev_action_stop()
2107 __func__, ch->th_seq_num); dev_action_stop()
697 ctcm_chx_cleanup(fsm_instance *fi, int state, struct channel *ch) ctcm_chx_cleanup() argument
H A Dctcm_main.c71 * ch The channel where this skb has been received.
74 void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) ctcm_unpack_skb() argument
76 struct net_device *dev = ch->netdev; ctcm_unpack_skb()
90 if ((ch->protocol == CTCM_PROTO_S390) && ctcm_unpack_skb()
92 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { ctcm_unpack_skb()
93 ch->logflags |= LOG_FLAG_ILLEGALPKT; ctcm_unpack_skb()
112 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { ctcm_unpack_skb()
118 ch->logflags |= LOG_FLAG_ILLEGALSIZE; ctcm_unpack_skb()
129 if (!(ch->logflags & LOG_FLAG_OVERRUN)) { ctcm_unpack_skb()
134 ch->logflags |= LOG_FLAG_OVERRUN; ctcm_unpack_skb()
146 if (!(ch->logflags & LOG_FLAG_NOMEM)) { ctcm_unpack_skb()
150 ch->logflags |= LOG_FLAG_NOMEM; ctcm_unpack_skb()
165 ch->logflags = 0; ctcm_unpack_skb()
174 ch->logflags |= LOG_FLAG_OVERRUN; ctcm_unpack_skb()
185 * ch Pointer to channel struct to be released.
187 static void channel_free(struct channel *ch) channel_free() argument
189 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id); channel_free()
190 ch->flags &= ~CHANNEL_FLAGS_INUSE; channel_free()
191 fsm_newstate(ch->fsm, CTC_STATE_IDLE); channel_free()
197 * ch Pointer to channel struct to be released.
199 static void channel_remove(struct channel *ch) channel_remove() argument
205 if (ch == NULL) channel_remove()
208 strncpy(chid, ch->id, CTCM_ID_SIZE); channel_remove()
210 channel_free(ch); channel_remove()
212 if (*c == ch) { channel_remove()
213 *c = ch->next; channel_remove()
214 fsm_deltimer(&ch->timer); channel_remove()
215 if (IS_MPC(ch)) channel_remove()
216 fsm_deltimer(&ch->sweep_timer); channel_remove()
218 kfree_fsm(ch->fsm); channel_remove()
219 clear_normalized_cda(&ch->ccw[4]); channel_remove()
220 if (ch->trans_skb != NULL) { channel_remove()
221 clear_normalized_cda(&ch->ccw[1]); channel_remove()
222 dev_kfree_skb_any(ch->trans_skb); channel_remove()
224 if (IS_MPC(ch)) { channel_remove()
225 tasklet_kill(&ch->ch_tasklet); channel_remove()
226 tasklet_kill(&ch->ch_disc_tasklet); channel_remove()
227 kfree(ch->discontact_th); channel_remove()
229 kfree(ch->ccw); channel_remove()
230 kfree(ch->irb); channel_remove()
231 kfree(ch); channel_remove()
254 struct channel *ch = channels; channel_get() local
256 while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type))) channel_get()
257 ch = ch->next; channel_get()
258 if (!ch) { channel_get()
263 if (ch->flags & CHANNEL_FLAGS_INUSE) channel_get()
264 ch = NULL; channel_get()
266 ch->flags |= CHANNEL_FLAGS_INUSE; channel_get()
267 ch->flags &= ~CHANNEL_FLAGS_RWMASK; channel_get()
268 ch->flags |= (direction == CTCM_WRITE) channel_get()
270 fsm_newstate(ch->fsm, CTC_STATE_STOPPED); channel_get()
273 return ch; channel_get()
305 * ch The channel, the sense code belongs to.
308 static inline void ccw_unit_check(struct channel *ch, __u8 sense) ccw_unit_check() argument
312 CTCM_FUNTAIL, ch->id, sense); ccw_unit_check()
316 if (ch->sense_rc != 0x01) { ccw_unit_check()
319 "disconnected\n", ch->id); ccw_unit_check()
320 ch->sense_rc = 0x01; ccw_unit_check()
322 fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); ccw_unit_check()
324 if (ch->sense_rc != SNS0_INTERVENTION_REQ) { ccw_unit_check()
327 "not available\n", ch->id); ccw_unit_check()
328 ch->sense_rc = SNS0_INTERVENTION_REQ; ccw_unit_check()
330 fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); ccw_unit_check()
334 if (ch->sense_rc != SNS0_BUS_OUT_CHECK) { ccw_unit_check()
337 CTCM_FUNTAIL, ch->id, sense); ccw_unit_check()
338 ch->sense_rc = SNS0_BUS_OUT_CHECK; ccw_unit_check()
340 fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch); ccw_unit_check()
342 if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) { ccw_unit_check()
345 CTCM_FUNTAIL, ch->id, sense); ccw_unit_check()
346 ch->sense_rc = SNS0_EQUIPMENT_CHECK; ccw_unit_check()
348 fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch); ccw_unit_check()
351 if (ch->sense_rc != SNS0_BUS_OUT_CHECK) { ccw_unit_check()
354 CTCM_FUNTAIL, ch->id, sense); ccw_unit_check()
355 ch->sense_rc = SNS0_BUS_OUT_CHECK; ccw_unit_check()
358 fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch); ccw_unit_check()
360 fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch); ccw_unit_check()
362 if (ch->sense_rc != SNS0_CMD_REJECT) { ccw_unit_check()
365 CTCM_FUNTAIL, ch->id); ccw_unit_check()
366 ch->sense_rc = SNS0_CMD_REJECT; ccw_unit_check()
371 CTCM_FUNTAIL, ch->id); ccw_unit_check()
372 fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch); ccw_unit_check()
376 CTCM_FUNTAIL, ch->id, sense); ccw_unit_check()
377 fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch); ccw_unit_check()
381 int ctcm_ch_alloc_buffer(struct channel *ch) ctcm_ch_alloc_buffer() argument
383 clear_normalized_cda(&ch->ccw[1]); ctcm_ch_alloc_buffer()
384 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA); ctcm_ch_alloc_buffer()
385 if (ch->trans_skb == NULL) { ctcm_ch_alloc_buffer()
388 CTCM_FUNTAIL, ch->id, ctcm_ch_alloc_buffer()
389 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? ctcm_ch_alloc_buffer()
394 ch->ccw[1].count = ch->max_bufsize; ctcm_ch_alloc_buffer()
395 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { ctcm_ch_alloc_buffer()
396 dev_kfree_skb(ch->trans_skb); ctcm_ch_alloc_buffer()
397 ch->trans_skb = NULL; ctcm_ch_alloc_buffer()
400 CTCM_FUNTAIL, ch->id, ctcm_ch_alloc_buffer()
401 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? ctcm_ch_alloc_buffer()
406 ch->ccw[1].count = 0; ctcm_ch_alloc_buffer()
407 ch->trans_skb_data = ch->trans_skb->data; ctcm_ch_alloc_buffer()
408 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED; ctcm_ch_alloc_buffer()
457 * ch Channel to be used for sending.
464 static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) ctcm_transmit_skb() argument
478 spin_lock_irqsave(&ch->collect_lock, saveflags); ctcm_transmit_skb()
479 if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { ctcm_transmit_skb()
482 if (ch->collect_len + l > ch->max_bufsize - 2) { ctcm_transmit_skb()
483 spin_unlock_irqrestore(&ch->collect_lock, saveflags); ctcm_transmit_skb()
492 skb_queue_tail(&ch->collect_queue, skb); ctcm_transmit_skb()
493 ch->collect_len += l; ctcm_transmit_skb()
495 spin_unlock_irqrestore(&ch->collect_lock, saveflags); ctcm_transmit_skb()
498 spin_unlock_irqrestore(&ch->collect_lock, saveflags); ctcm_transmit_skb()
504 ch->prof.txlen += skb->len; ctcm_transmit_skb()
522 ctcm_clear_busy(ch->netdev); ctcm_transmit_skb()
533 ch->ccw[4].count = block_len; ctcm_transmit_skb()
534 if (set_normalized_cda(&ch->ccw[4], skb->data)) { ctcm_transmit_skb()
540 if (ctcm_checkalloc_buffer(ch)) { ctcm_transmit_skb()
547 ctcm_clear_busy(ch->netdev); ctcm_transmit_skb()
551 skb_reset_tail_pointer(ch->trans_skb); ctcm_transmit_skb()
552 ch->trans_skb->len = 0; ctcm_transmit_skb()
553 ch->ccw[1].count = skb->len; ctcm_transmit_skb()
555 skb_put(ch->trans_skb, skb->len), skb->len); ctcm_transmit_skb()
560 skb_queue_tail(&ch->io_queue, skb); ctcm_transmit_skb()
564 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx], ctcm_transmit_skb()
566 ch->retry = 0; ctcm_transmit_skb()
567 fsm_newstate(ch->fsm, CTC_STATE_TX); ctcm_transmit_skb()
568 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ctcm_transmit_skb()
569 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ctcm_transmit_skb()
570 ch->prof.send_stamp = jiffies; ctcm_transmit_skb()
571 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], ctcm_transmit_skb()
572 (unsigned long)ch, 0xff, 0); ctcm_transmit_skb()
573 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); ctcm_transmit_skb()
575 ch->prof.doios_single++; ctcm_transmit_skb()
577 fsm_deltimer(&ch->timer); ctcm_transmit_skb()
578 ctcm_ccw_check_rc(ch, rc, "single skb TX"); ctcm_transmit_skb()
580 skb_dequeue_tail(&ch->io_queue); ctcm_transmit_skb()
587 struct net_device *dev = ch->netdev; ctcm_transmit_skb()
593 ctcm_clear_busy(ch->netdev); ctcm_transmit_skb()
604 struct channel *ch; ctcmpc_send_sweep_req() local
609 ch = priv->channel[CTCM_WRITE]; ctcmpc_send_sweep_req()
639 header->sw.th_last_seq = ch->th_seq_num; ctcmpc_send_sweep_req()
646 skb_queue_tail(&ch->sweep_queue, sweep_skb); ctcmpc_send_sweep_req()
648 fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); ctcmpc_send_sweep_req()
663 static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) ctcmpc_transmit_skb() argument
666 struct net_device *dev = ch->netdev; ctcmpc_transmit_skb()
676 CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n", ctcmpc_transmit_skb()
677 __func__, dev->name, smp_processor_id(), ch, ctcmpc_transmit_skb()
678 ch->id, fsm_getstate_str(ch->fsm)); ctcmpc_transmit_skb()
680 if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) { ctcmpc_transmit_skb()
681 spin_lock_irqsave(&ch->collect_lock, saveflags); ctcmpc_transmit_skb()
686 spin_unlock_irqrestore(&ch->collect_lock, saveflags); ctcmpc_transmit_skb()
707 skb_queue_tail(&ch->collect_queue, skb); ctcmpc_transmit_skb()
708 ch->collect_len += skb->len; ctcmpc_transmit_skb()
711 spin_unlock_irqrestore(&ch->collect_lock, saveflags); ctcmpc_transmit_skb()
757 if (ch->collect_len > 0) { ctcmpc_transmit_skb()
758 spin_lock_irqsave(&ch->collect_lock, saveflags); ctcmpc_transmit_skb()
759 skb_queue_tail(&ch->collect_queue, skb); ctcmpc_transmit_skb()
760 ch->collect_len += skb->len; ctcmpc_transmit_skb()
761 skb = skb_dequeue(&ch->collect_queue); ctcmpc_transmit_skb()
762 ch->collect_len -= skb->len; ctcmpc_transmit_skb()
763 spin_unlock_irqrestore(&ch->collect_lock, saveflags); ctcmpc_transmit_skb()
769 ch->prof.txlen += skb->len - PDU_HEADER_LENGTH; ctcmpc_transmit_skb()
779 ch->th_seq_num++; ctcmpc_transmit_skb()
780 header->th_seq_num = ch->th_seq_num; ctcmpc_transmit_skb()
783 __func__, dev->name, ch->th_seq_num); ctcmpc_transmit_skb()
795 ch->ccw[4].count = skb->len; ctcmpc_transmit_skb()
796 if (set_normalized_cda(&ch->ccw[4], skb->data)) { ctcmpc_transmit_skb()
801 if (ctcm_checkalloc_buffer(ch)) { ctcmpc_transmit_skb()
809 skb_reset_tail_pointer(ch->trans_skb); ctcmpc_transmit_skb()
810 ch->trans_skb->len = 0; ctcmpc_transmit_skb()
811 ch->ccw[1].count = skb->len; ctcmpc_transmit_skb()
812 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len); ctcmpc_transmit_skb()
818 __func__, dev->name, ch->trans_skb->len); ctcmpc_transmit_skb()
819 CTCM_D3_DUMP((char *)ch->trans_skb->data, ctcmpc_transmit_skb()
820 min_t(int, 32, ch->trans_skb->len)); ctcmpc_transmit_skb()
822 skb_queue_tail(&ch->io_queue, skb); ctcmpc_transmit_skb()
825 ch->retry = 0; ctcmpc_transmit_skb()
826 fsm_newstate(ch->fsm, CTC_STATE_TX); ctcmpc_transmit_skb()
827 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); ctcmpc_transmit_skb()
830 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx], ctcmpc_transmit_skb()
833 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); ctcmpc_transmit_skb()
834 ch->prof.send_stamp = jiffies; ctcmpc_transmit_skb()
835 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], ctcmpc_transmit_skb()
836 (unsigned long)ch, 0xff, 0); ctcmpc_transmit_skb()
837 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); ctcmpc_transmit_skb()
839 ch->prof.doios_single++; ctcmpc_transmit_skb()
841 fsm_deltimer(&ch->timer); ctcmpc_transmit_skb()
842 ctcm_ccw_check_rc(ch, rc, "single skb TX"); ctcmpc_transmit_skb()
844 skb_dequeue_tail(&ch->io_queue); ctcmpc_transmit_skb()
849 if (ch->th_seq_num > 0xf0000000) /* Chose at random. */ ctcmpc_transmit_skb()
850 ctcmpc_send_sweep_req(ch); ctcmpc_transmit_skb()
856 CTCM_FUNTAIL, ch->id); ctcmpc_transmit_skb()
1201 struct channel *ch; ctcm_irq_handler() local
1233 ch = priv->channel[CTCM_READ]; ctcm_irq_handler()
1235 ch = priv->channel[CTCM_WRITE]; ctcm_irq_handler()
1245 dev = ch->netdev; ctcm_irq_handler()
1248 "%s Internal error: net_device is NULL, ch = 0x%p\n", ctcm_irq_handler()
1249 __func__, ch); ctcm_irq_handler()
1255 memcpy(ch->irb, irb, sizeof(struct irb)); ctcm_irq_handler()
1259 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); ctcm_irq_handler()
1261 "%s(%s): sub-ch check %s: cs=%02x ds=%02x", ctcm_irq_handler()
1262 CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); ctcm_irq_handler()
1270 if ((irb->ecw[0] & ch->sense_rc) == 0) ctcm_irq_handler()
1274 CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); ctcm_irq_handler()
1275 ccw_unit_check(ch, irb->ecw[0]); ctcm_irq_handler()
1280 fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); ctcm_irq_handler()
1282 fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); ctcm_irq_handler()
1286 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); ctcm_irq_handler()
1293 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); ctcm_irq_handler()
1295 fsm_event(ch->fsm, CTC_EVENT_IRQ, ch); ctcm_irq_handler()
1354 struct channel *ch; add_channel() local
1362 ch = kzalloc(sizeof(struct channel), GFP_KERNEL); add_channel()
1363 if (ch == NULL) add_channel()
1366 ch->protocol = priv->protocol; add_channel()
1368 ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type()); add_channel()
1369 if (ch->discontact_th == NULL) add_channel()
1372 ch->discontact_th->th_blk_flag = TH_DISCONTACT; add_channel()
1373 tasklet_init(&ch->ch_disc_tasklet, add_channel()
1374 mpc_action_send_discontact, (unsigned long)ch); add_channel()
1376 tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch); add_channel()
1377 ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35); add_channel()
1382 ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); add_channel()
1383 if (ch->ccw == NULL) add_channel()
1386 ch->cdev = cdev; add_channel()
1387 snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev)); add_channel()
1388 ch->type = type; add_channel()
1406 * ch->ccw[0..5] are initialized in ch_action_start because add_channel()
1410 * ch-ccw[8-14] need to be used for the XID exchange either add_channel()
1433 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED; add_channel()
1434 ch->ccw[6].flags = CCW_FLAG_SLI; add_channel()
1436 ch->ccw[7].cmd_code = CCW_CMD_NOOP; add_channel()
1437 ch->ccw[7].flags = CCW_FLAG_SLI; add_channel()
1440 ch->ccw[15].cmd_code = CCW_CMD_WRITE; add_channel()
1441 ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC; add_channel()
1442 ch->ccw[15].count = TH_HEADER_LENGTH; add_channel()
1443 ch->ccw[15].cda = virt_to_phys(ch->discontact_th); add_channel()
1445 ch->ccw[16].cmd_code = CCW_CMD_NOOP; add_channel()
1446 ch->ccw[16].flags = CCW_FLAG_SLI; add_channel()
1448 ch->fsm = init_fsm(ch->id, ctc_ch_state_names, add_channel()
1453 ch->fsm = init_fsm(ch->id, ctc_ch_state_names, add_channel()
1458 if (ch->fsm == NULL) add_channel()
1461 fsm_newstate(ch->fsm, CTC_STATE_IDLE); add_channel()
1463 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL); add_channel()
1464 if (ch->irb == NULL) add_channel()
1467 while (*c && ctcm_less_than((*c)->id, ch->id)) add_channel()
1470 if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) { add_channel()
1478 spin_lock_init(&ch->collect_lock); add_channel()
1480 fsm_settimer(ch->fsm, &ch->timer); add_channel()
1481 skb_queue_head_init(&ch->io_queue); add_channel()
1482 skb_queue_head_init(&ch->collect_queue); add_channel()
1485 fsm_settimer(ch->fsm, &ch->sweep_timer); add_channel()
1486 skb_queue_head_init(&ch->sweep_queue); add_channel()
1488 ch->next = *c; add_channel()
1489 *c = ch; add_channel()
1496 kfree(ch->ccw); add_channel()
1497 kfree(ch->discontact_th); add_channel()
1498 kfree_fsm(ch->fsm); add_channel()
1499 kfree(ch->irb); add_channel()
1500 kfree(ch); add_channel()
1552 snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev)); ctcm_new_device()
1553 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev)); ctcm_new_device()
H A Dctcm_mpc.c106 static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
270 ctcm_pr_debug("th->ch : %02x\n", header->th_ch_flag); ctcmpc_dump_skb()
607 struct channel *rch = mpcginfo->ch; mpc_rcvd_sweep_resp()
611 struct channel *ch = priv->channel[CTCM_WRITE]; mpc_rcvd_sweep_resp() local
613 CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); mpc_rcvd_sweep_resp()
620 fsm_deltimer(&ch->sweep_timer); mpc_rcvd_sweep_resp()
623 ch->th_seq_num = 0x00; mpc_rcvd_sweep_resp()
644 struct channel *ch = priv->channel[CTCM_WRITE]; ctcmpc_send_sweep_resp() local
646 CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); ctcmpc_send_sweep_resp()
668 header->sw.th_last_seq = ch->th_seq_num; ctcmpc_send_sweep_resp()
675 skb_queue_tail(&ch->sweep_queue, sweep_skb); ctcmpc_send_sweep_resp()
677 fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); ctcmpc_send_sweep_resp()
694 struct channel *rch = mpcginfo->ch; mpc_rcvd_sweep_req()
698 struct channel *ch = priv->channel[CTCM_WRITE]; mpc_rcvd_sweep_req() local
702 " %s(): ch=0x%p id=%s\n", __func__, ch, ch->id); mpc_rcvd_sweep_req()
714 ctcmpc_send_sweep_resp(ch); mpc_rcvd_sweep_req()
876 struct channel *ch = NULL; mpc_group_ready() local
892 ch = priv->channel[CTCM_READ]; mpc_group_ready()
893 ch->pdu_seq = 0; mpc_group_ready()
895 __func__, ch->pdu_seq); mpc_group_ready()
897 ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); mpc_group_ready()
899 ch = priv->channel[CTCM_WRITE]; mpc_group_ready()
900 if (ch->collect_len > 0) { mpc_group_ready()
901 spin_lock(&ch->collect_lock); mpc_group_ready()
902 ctcm_purge_skb_queue(&ch->collect_queue); mpc_group_ready()
903 ch->collect_len = 0; mpc_group_ready()
904 spin_unlock(&ch->collect_lock); mpc_group_ready()
906 ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch); mpc_group_ready()
927 void mpc_channel_action(struct channel *ch, int direction, int action) mpc_channel_action() argument
929 struct net_device *dev = ch->netdev; mpc_channel_action()
940 CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id); mpc_channel_action()
949 if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { mpc_channel_action()
953 ch->in_mpcgroup = 1; mpc_channel_action()
955 if (ch->xid_skb != NULL) mpc_channel_action()
956 dev_kfree_skb_any(ch->xid_skb); mpc_channel_action()
958 ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, mpc_channel_action()
960 if (ch->xid_skb == NULL) { mpc_channel_action()
962 "%s(%s): Couldn't alloc ch xid_skb\n", mpc_channel_action()
967 ch->xid_skb_data = ch->xid_skb->data; mpc_channel_action()
968 ch->xid_th = (struct th_header *)ch->xid_skb->data; mpc_channel_action()
969 skb_put(ch->xid_skb, TH_HEADER_LENGTH); mpc_channel_action()
970 ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb); mpc_channel_action()
971 skb_put(ch->xid_skb, XID2_LENGTH); mpc_channel_action()
972 ch->xid_id = skb_tail_pointer(ch->xid_skb); mpc_channel_action()
973 ch->xid_skb->data = ch->xid_skb_data; mpc_channel_action()
974 skb_reset_tail_pointer(ch->xid_skb); mpc_channel_action()
975 ch->xid_skb->len = 0; mpc_channel_action()
977 memcpy(skb_put(ch->xid_skb, grp->xid_skb->len), mpc_channel_action()
981 ch->xid->xid2_dlc_type = mpc_channel_action()
982 ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) mpc_channel_action()
985 if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) mpc_channel_action()
986 ch->xid->xid2_buf_len = 0x00; mpc_channel_action()
988 ch->xid_skb->data = ch->xid_skb_data; mpc_channel_action()
989 skb_reset_tail_pointer(ch->xid_skb); mpc_channel_action()
990 ch->xid_skb->len = 0; mpc_channel_action()
992 fsm_newstate(ch->fsm, CH_XID0_PENDING); mpc_channel_action()
1003 (ch->in_mpcgroup == 1)) { mpc_channel_action()
1004 ch->in_mpcgroup = 0; mpc_channel_action()
1008 if (ch->xid_skb != NULL) mpc_channel_action()
1009 dev_kfree_skb_any(ch->xid_skb); mpc_channel_action()
1010 ch->xid_skb = NULL; mpc_channel_action()
1029 CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); mpc_channel_action()
1037 * ch The channel where this skb has been received.
1040 static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) ctcmpc_unpack_skb() argument
1042 struct net_device *dev = ch->netdev; ctcmpc_unpack_skb()
1055 CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n", ctcmpc_unpack_skb()
1056 __func__, dev->name, smp_processor_id(), ch->id); ctcmpc_unpack_skb()
1077 (header->th_seq_num != ch->th_seq_num + 1) && ctcmpc_unpack_skb()
1078 (ch->th_seq_num != 0))) { ctcmpc_unpack_skb()
1087 skb_queue_tail(&ch->io_queue, pskb); ctcmpc_unpack_skb()
1090 ch->th_seq_num + 1, header->th_seq_num); ctcmpc_unpack_skb()
1095 ch->th_seq_num = header->th_seq_num; ctcmpc_unpack_skb()
1098 __func__, ch->th_seq_num); ctcmpc_unpack_skb()
1119 if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) { ctcmpc_unpack_skb()
1158 *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq; ctcmpc_unpack_skb()
1159 ch->pdu_seq++; ctcmpc_unpack_skb()
1163 __func__, ch->pdu_seq); ctcmpc_unpack_skb()
1183 mpcginfo->ch = ch; ctcmpc_unpack_skb()
1223 CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n", ctcmpc_unpack_skb()
1224 __func__, dev->name, ch, ch->id); ctcmpc_unpack_skb()
1230 * ch The channel to work on.
1237 struct channel *ch = (struct channel *)thischan; ctcmpc_bh() local
1239 struct net_device *dev = ch->netdev; ctcmpc_bh()
1244 dev->name, smp_processor_id(), __func__, ch->id); ctcmpc_bh()
1247 (skb = skb_dequeue(&ch->io_queue))) { ctcmpc_bh()
1248 ctcmpc_unpack_skb(ch, skb); ctcmpc_bh()
1257 if (skb == skb_peek(&ch->io_queue)) ctcmpc_bh()
1260 CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n", ctcmpc_bh()
1261 __func__, dev->name, ch, ch->id); ctcmpc_bh()
1492 struct channel *ch = mpcginfo->ch; mpc_action_discontact() local
1497 if (ch) { mpc_action_discontact()
1498 dev = ch->netdev; mpc_action_discontact()
1504 CTCM_FUNTAIL, dev->name, ch->id); mpc_action_discontact()
1523 struct channel *ch = (struct channel *)thischan; mpc_action_send_discontact() local
1526 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); mpc_action_send_discontact()
1527 rc = ccw_device_start(ch->cdev, &ch->ccw[15], mpc_action_send_discontact()
1528 (unsigned long)ch, 0xff, 0); mpc_action_send_discontact()
1529 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); mpc_action_send_discontact()
1532 ctcm_ccw_check_rc(ch, rc, (char *)__func__); mpc_action_send_discontact()
1546 struct channel *ch = mpcginfo->ch; mpc_validate_xid() local
1547 struct net_device *dev = ch->netdev; mpc_validate_xid()
1563 CTCM_FUNTAIL, ch->id); mpc_validate_xid()
1570 if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE : mpc_validate_xid()
1576 CTCM_FUNTAIL, ch->id); mpc_validate_xid()
1612 CTCM_FUNTAIL, ch->id); mpc_validate_xid()
1617 CTCM_FUNTAIL, ch->id); mpc_validate_xid()
1626 CTCM_FUNTAIL, ch->id); mpc_validate_xid()
1633 CTCM_FUNTAIL, ch->id); mpc_validate_xid()
1640 CTCM_FUNTAIL, ch->id); mpc_validate_xid()
1647 CTCM_FUNTAIL, ch->id); mpc_validate_xid()
1668 struct channel *ch = arg; mpc_action_side_xid() local
1674 CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", mpc_action_side_xid()
1675 __func__, smp_processor_id(), ch, ch->id); mpc_action_side_xid()
1677 if (ctcm_checkalloc_buffer(ch)) mpc_action_side_xid()
1683 ch->trans_skb->data = ch->trans_skb_data; mpc_action_side_xid()
1684 skb_reset_tail_pointer(ch->trans_skb); mpc_action_side_xid()
1685 ch->trans_skb->len = 0; mpc_action_side_xid()
1690 memset(ch->trans_skb->data, 0, 16); mpc_action_side_xid()
1691 ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data; mpc_action_side_xid()
1693 skb_put(ch->trans_skb, TH_HEADER_LENGTH); mpc_action_side_xid()
1694 ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb); mpc_action_side_xid()
1696 skb_put(ch->trans_skb, XID2_LENGTH); mpc_action_side_xid()
1697 ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb); mpc_action_side_xid()
1699 ch->trans_skb->data = ch->trans_skb_data; mpc_action_side_xid()
1700 skb_reset_tail_pointer(ch->trans_skb); mpc_action_side_xid()
1701 ch->trans_skb->len = 0; mpc_action_side_xid()
1705 memset(ch->trans_skb->data, 0, 16); mpc_action_side_xid()
1706 ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data; mpc_action_side_xid()
1707 ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH); mpc_action_side_xid()
1708 ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH; mpc_action_side_xid()
1711 ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1712 ch->ccw[8].count = 0; mpc_action_side_xid()
1713 ch->ccw[8].cda = 0x00; mpc_action_side_xid()
1715 if (!(ch->xid_th && ch->xid && ch->xid_id)) mpc_action_side_xid()
1718 CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id); mpc_action_side_xid()
1722 if (ch->xid_th == NULL) mpc_action_side_xid()
1724 ch->ccw[9].cmd_code = CCW_CMD_WRITE; mpc_action_side_xid()
1725 ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1726 ch->ccw[9].count = TH_HEADER_LENGTH; mpc_action_side_xid()
1727 ch->ccw[9].cda = virt_to_phys(ch->xid_th); mpc_action_side_xid()
1729 if (ch->xid == NULL) mpc_action_side_xid()
1731 ch->ccw[10].cmd_code = CCW_CMD_WRITE; mpc_action_side_xid()
1732 ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1733 ch->ccw[10].count = XID2_LENGTH; mpc_action_side_xid()
1734 ch->ccw[10].cda = virt_to_phys(ch->xid); mpc_action_side_xid()
1736 ch->ccw[11].cmd_code = CCW_CMD_READ; mpc_action_side_xid()
1737 ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1738 ch->ccw[11].count = TH_HEADER_LENGTH; mpc_action_side_xid()
1739 ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th); mpc_action_side_xid()
1741 ch->ccw[12].cmd_code = CCW_CMD_READ; mpc_action_side_xid()
1742 ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1743 ch->ccw[12].count = XID2_LENGTH; mpc_action_side_xid()
1744 ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid); mpc_action_side_xid()
1746 ch->ccw[13].cmd_code = CCW_CMD_READ; mpc_action_side_xid()
1747 ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id); mpc_action_side_xid()
1750 ch->ccw[9].cmd_code = CCW_CMD_READ; mpc_action_side_xid()
1751 ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1752 ch->ccw[9].count = TH_HEADER_LENGTH; mpc_action_side_xid()
1753 ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th); mpc_action_side_xid()
1755 ch->ccw[10].cmd_code = CCW_CMD_READ; mpc_action_side_xid()
1756 ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1757 ch->ccw[10].count = XID2_LENGTH; mpc_action_side_xid()
1758 ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid); mpc_action_side_xid()
1760 if (ch->xid_th == NULL) mpc_action_side_xid()
1762 ch->ccw[11].cmd_code = CCW_CMD_WRITE; mpc_action_side_xid()
1763 ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1764 ch->ccw[11].count = TH_HEADER_LENGTH; mpc_action_side_xid()
1765 ch->ccw[11].cda = virt_to_phys(ch->xid_th); mpc_action_side_xid()
1767 if (ch->xid == NULL) mpc_action_side_xid()
1769 ch->ccw[12].cmd_code = CCW_CMD_WRITE; mpc_action_side_xid()
1770 ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1771 ch->ccw[12].count = XID2_LENGTH; mpc_action_side_xid()
1772 ch->ccw[12].cda = virt_to_phys(ch->xid); mpc_action_side_xid()
1774 if (ch->xid_id == NULL) mpc_action_side_xid()
1776 ch->ccw[13].cmd_code = CCW_CMD_WRITE; mpc_action_side_xid()
1777 ch->ccw[13].cda = virt_to_phys(ch->xid_id); mpc_action_side_xid()
1780 ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC; mpc_action_side_xid()
1781 ch->ccw[13].count = 4; mpc_action_side_xid()
1783 ch->ccw[14].cmd_code = CCW_CMD_NOOP; mpc_action_side_xid()
1784 ch->ccw[14].flags = CCW_FLAG_SLI; mpc_action_side_xid()
1785 ch->ccw[14].count = 0; mpc_action_side_xid()
1786 ch->ccw[14].cda = 0; mpc_action_side_xid()
1788 CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7); mpc_action_side_xid()
1789 CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH); mpc_action_side_xid()
1790 CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH); mpc_action_side_xid()
1791 CTCM_D3_DUMP((char *)ch->xid_id, 4); mpc_action_side_xid()
1797 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); mpc_action_side_xid()
1801 fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch); mpc_action_side_xid()
1802 rc = ccw_device_start(ch->cdev, &ch->ccw[8], mpc_action_side_xid()
1803 (unsigned long)ch, 0xff, 0); mpc_action_side_xid()
1806 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); mpc_action_side_xid()
1809 ctcm_ccw_check_rc(ch, rc, mpc_action_side_xid()
1814 CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n", mpc_action_side_xid()
1815 __func__, ch, ch->id); mpc_action_side_xid()
1844 struct channel *ch = arg; mpc_action_doxid0() local
1845 struct net_device *dev = ch->netdev; mpc_action_doxid0()
1849 CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", mpc_action_doxid0()
1850 __func__, smp_processor_id(), ch, ch->id); mpc_action_doxid0()
1852 if (ch->xid == NULL) { mpc_action_doxid0()
1854 "%s(%s): ch->xid == NULL", mpc_action_doxid0()
1859 fsm_newstate(ch->fsm, CH_XID0_INPROGRESS); mpc_action_doxid0()
1861 ch->xid->xid2_option = XID2_0; mpc_action_doxid0()
1866 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; mpc_action_doxid0()
1870 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; mpc_action_doxid0()
1874 fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); mpc_action_doxid0()
1897 struct channel *ch = priv->channel[direction]; mpc_action_doxid7() local
1898 struct xid2 *thisxid = ch->xid; mpc_action_doxid7()
1899 ch->xid_skb->data = ch->xid_skb_data; mpc_action_doxid7()
1900 skb_reset_tail_pointer(ch->xid_skb); mpc_action_doxid7()
1901 ch->xid_skb->len = 0; mpc_action_doxid7()
1908 if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) { mpc_action_doxid7()
1909 fsm_newstate(ch->fsm, CH_XID7_PENDING2); mpc_action_doxid7()
1910 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; mpc_action_doxid7()
1911 memcpy(skb_put(ch->xid_skb, mpc_action_doxid7()
1916 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) { mpc_action_doxid7()
1917 fsm_newstate(ch->fsm, CH_XID7_PENDING2); mpc_action_doxid7()
1918 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; mpc_action_doxid7()
1919 memcpy(skb_put(ch->xid_skb, mpc_action_doxid7()
1927 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) { mpc_action_doxid7()
1928 fsm_newstate(ch->fsm, CH_XID7_PENDING4); mpc_action_doxid7()
1929 memcpy(skb_put(ch->xid_skb, mpc_action_doxid7()
1932 ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; mpc_action_doxid7()
1935 } else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) { mpc_action_doxid7()
1936 fsm_newstate(ch->fsm, CH_XID7_PENDING4); mpc_action_doxid7()
1937 ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; mpc_action_doxid7()
1938 memcpy(skb_put(ch->xid_skb, TH_HEADER_LENGTH), mpc_action_doxid7()
1945 fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); mpc_action_doxid7()
1959 struct channel *ch = mpcginfo->ch; mpc_action_rcvd_xid0() local
1960 struct net_device *dev = ch->netdev; mpc_action_rcvd_xid0()
1964 CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n", mpc_action_rcvd_xid0()
1965 __func__, ch->id, grp->outstanding_xid2, mpc_action_rcvd_xid0()
1968 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING) mpc_action_rcvd_xid0()
1969 fsm_newstate(ch->fsm, CH_XID7_PENDING); mpc_action_rcvd_xid0()
2004 __func__, ch->id, grp->outstanding_xid2, mpc_action_rcvd_xid0()
2007 __func__, ch->id, mpc_action_rcvd_xid0()
2008 fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm)); mpc_action_rcvd_xid0()
2021 struct channel *ch = mpcginfo->ch; mpc_action_rcvd_xid7() local
2022 struct net_device *dev = ch->netdev; mpc_action_rcvd_xid7()
2026 CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", mpc_action_rcvd_xid7()
2027 __func__, smp_processor_id(), ch, ch->id); mpc_action_rcvd_xid7()
2032 ch->xid_skb->data = ch->xid_skb_data; mpc_action_rcvd_xid7()
2033 skb_reset_tail_pointer(ch->xid_skb); mpc_action_rcvd_xid7()
2034 ch->xid_skb->len = 0; mpc_action_rcvd_xid7()
H A Dctcm_main.h258 void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb);
277 int ctcm_ch_alloc_buffer(struct channel *ch);
279 static inline int ctcm_checkalloc_buffer(struct channel *ch) ctcm_checkalloc_buffer() argument
281 if (ch->trans_skb == NULL) ctcm_checkalloc_buffer()
282 return ctcm_ch_alloc_buffer(ch); ctcm_checkalloc_buffer()
283 if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) { ctcm_checkalloc_buffer()
284 dev_kfree_skb(ch->trans_skb); ctcm_checkalloc_buffer()
285 return ctcm_ch_alloc_buffer(ch); ctcm_checkalloc_buffer()
/linux-4.1.27/arch/unicore32/include/mach/
H A Dregs-dmac.h36 * Source Addr DMAC_SRCADDR(ch).
38 #define DMAC_SRCADDR(ch) (PKUNITY_DMAC_BASE + (ch)*DMASp + 0x00)
40 * Destination Addr DMAC_DESTADDR(ch).
42 #define DMAC_DESTADDR(ch) (PKUNITY_DMAC_BASE + (ch)*DMASp + 0x04)
44 * Control Reg DMAC_CONTROL(ch).
46 #define DMAC_CONTROL(ch) (PKUNITY_DMAC_BASE + (ch)*DMASp + 0x0C)
48 * Configuration Reg DMAC_CONFIG(ch).
50 #define DMAC_CONFIG(ch) (PKUNITY_DMAC_BASE + (ch)*DMASp + 0x10)
54 * select channel (ch)
56 #define DMAC_CHANNEL(ch) FIELD(1, 1, (ch))
H A Ddma.h38 static inline void puv3_stop_dma(int ch) puv3_stop_dma() argument
40 writel(readl(DMAC_CONFIG(ch)) & ~DMAC_CONFIG_EN, DMAC_CONFIG(ch)); puv3_stop_dma()
43 static inline void puv3_resume_dma(int ch) puv3_resume_dma() argument
45 writel(readl(DMAC_CONFIG(ch)) | DMAC_CONFIG_EN, DMAC_CONFIG(ch)); puv3_resume_dma()
H A Docd.h31 #define putc(ch) ocd_putc(ch)
33 #define putc(ch)
/linux-4.1.27/drivers/isdn/mISDN/
H A Dhwchannel.c55 if (likely(bch->ch.peer)) { bchannel_bh()
56 err = bch->ch.recv(bch->ch.peer, skb); bchannel_bh()
66 mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf) mISDN_initdchannel() argument
68 test_and_set_bit(FLG_HDLC, &ch->Flags); mISDN_initdchannel()
69 ch->maxlen = maxlen; mISDN_initdchannel()
70 ch->hw = NULL; mISDN_initdchannel()
71 ch->rx_skb = NULL; mISDN_initdchannel()
72 ch->tx_skb = NULL; mISDN_initdchannel()
73 ch->tx_idx = 0; mISDN_initdchannel()
74 ch->phfunc = phf; mISDN_initdchannel()
75 skb_queue_head_init(&ch->squeue); mISDN_initdchannel()
76 skb_queue_head_init(&ch->rqueue); mISDN_initdchannel()
77 INIT_LIST_HEAD(&ch->dev.bchannels); mISDN_initdchannel()
78 INIT_WORK(&ch->workq, dchannel_bh); mISDN_initdchannel()
84 mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen, mISDN_initbchannel() argument
87 ch->Flags = 0; mISDN_initbchannel()
88 ch->minlen = minlen; mISDN_initbchannel()
89 ch->next_minlen = minlen; mISDN_initbchannel()
90 ch->init_minlen = minlen; mISDN_initbchannel()
91 ch->maxlen = maxlen; mISDN_initbchannel()
92 ch->next_maxlen = maxlen; mISDN_initbchannel()
93 ch->init_maxlen = maxlen; mISDN_initbchannel()
94 ch->hw = NULL; mISDN_initbchannel()
95 ch->rx_skb = NULL; mISDN_initbchannel()
96 ch->tx_skb = NULL; mISDN_initbchannel()
97 ch->tx_idx = 0; mISDN_initbchannel()
98 skb_queue_head_init(&ch->rqueue); mISDN_initbchannel()
99 ch->rcount = 0; mISDN_initbchannel()
100 ch->next_skb = NULL; mISDN_initbchannel()
101 INIT_WORK(&ch->workq, bchannel_bh); mISDN_initbchannel()
107 mISDN_freedchannel(struct dchannel *ch) mISDN_freedchannel() argument
109 if (ch->tx_skb) { mISDN_freedchannel()
110 dev_kfree_skb(ch->tx_skb); mISDN_freedchannel()
111 ch->tx_skb = NULL; mISDN_freedchannel()
113 if (ch->rx_skb) { mISDN_freedchannel()
114 dev_kfree_skb(ch->rx_skb); mISDN_freedchannel()
115 ch->rx_skb = NULL; mISDN_freedchannel()
117 skb_queue_purge(&ch->squeue); mISDN_freedchannel()
118 skb_queue_purge(&ch->rqueue); mISDN_freedchannel()
119 flush_work(&ch->workq); mISDN_freedchannel()
125 mISDN_clear_bchannel(struct bchannel *ch) mISDN_clear_bchannel() argument
127 if (ch->tx_skb) { mISDN_clear_bchannel()
128 dev_kfree_skb(ch->tx_skb); mISDN_clear_bchannel()
129 ch->tx_skb = NULL; mISDN_clear_bchannel()
131 ch->tx_idx = 0; mISDN_clear_bchannel()
132 if (ch->rx_skb) { mISDN_clear_bchannel()
133 dev_kfree_skb(ch->rx_skb); mISDN_clear_bchannel()
134 ch->rx_skb = NULL; mISDN_clear_bchannel()
136 if (ch->next_skb) { mISDN_clear_bchannel()
137 dev_kfree_skb(ch->next_skb); mISDN_clear_bchannel()
138 ch->next_skb = NULL; mISDN_clear_bchannel()
140 test_and_clear_bit(FLG_TX_BUSY, &ch->Flags); mISDN_clear_bchannel()
141 test_and_clear_bit(FLG_TX_NEXT, &ch->Flags); mISDN_clear_bchannel()
142 test_and_clear_bit(FLG_ACTIVE, &ch->Flags); mISDN_clear_bchannel()
143 test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags); mISDN_clear_bchannel()
144 test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags); mISDN_clear_bchannel()
145 test_and_clear_bit(FLG_RX_OFF, &ch->Flags); mISDN_clear_bchannel()
146 ch->dropcnt = 0; mISDN_clear_bchannel()
147 ch->minlen = ch->init_minlen; mISDN_clear_bchannel()
148 ch->next_minlen = ch->init_minlen; mISDN_clear_bchannel()
149 ch->maxlen = ch->init_maxlen; mISDN_clear_bchannel()
150 ch->next_maxlen = ch->init_maxlen; mISDN_clear_bchannel()
151 skb_queue_purge(&ch->rqueue); mISDN_clear_bchannel()
152 ch->rcount = 0; mISDN_clear_bchannel()
157 mISDN_freebchannel(struct bchannel *ch) mISDN_freebchannel() argument
159 cancel_work_sync(&ch->workq); mISDN_freebchannel()
160 mISDN_clear_bchannel(ch); mISDN_freebchannel()
393 queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb) queue_ch_frame() argument
398 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC); queue_ch_frame()
400 if (ch->peer) { queue_ch_frame()
404 if (!ch->recv(ch->peer, skb)) queue_ch_frame()
413 dchannel_senddata(struct dchannel *ch, struct sk_buff *skb) dchannel_senddata() argument
420 if (skb->len > ch->maxlen) { dchannel_senddata()
422 __func__, skb->len, ch->maxlen); dchannel_senddata()
426 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) { dchannel_senddata()
427 skb_queue_tail(&ch->squeue, skb); dchannel_senddata()
431 ch->tx_skb = skb; dchannel_senddata()
432 ch->tx_idx = 0; dchannel_senddata()
439 bchannel_senddata(struct bchannel *ch, struct sk_buff *skb) bchannel_senddata() argument
447 if (skb->len > ch->maxlen) { bchannel_senddata()
449 __func__, skb->len, ch->maxlen); bchannel_senddata()
454 if (ch->next_skb) { bchannel_senddata()
457 __func__, skb->len, ch->next_skb->len); bchannel_senddata()
460 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) { bchannel_senddata()
461 test_and_set_bit(FLG_TX_NEXT, &ch->Flags); bchannel_senddata()
462 ch->next_skb = skb; bchannel_senddata()
466 ch->tx_skb = skb; bchannel_senddata()
467 ch->tx_idx = 0; bchannel_senddata()
468 confirm_Bsend(ch); bchannel_senddata()
H A Dstack.c42 mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb) mISDN_queue_message() argument
44 _queue_message(ch->st, skb); mISDN_queue_message()
51 struct mISDNchannel *ch; get_channel4id() local
54 list_for_each_entry(ch, &st->layer2, list) { get_channel4id()
55 if (id == ch->nr) get_channel4id()
58 ch = NULL; get_channel4id()
61 return ch; get_channel4id()
93 struct mISDNchannel *ch; send_layer2() local
100 list_for_each_entry(ch, &st->layer2, list) { send_layer2()
101 if (list_is_last(&ch->list, &st->layer2)) { send_layer2()
108 ret = ch->send(ch, cskb); send_layer2()
112 "%s ch%d prim(%x) addr(%x)" send_layer2()
114 __func__, ch->nr, send_layer2()
115 hh->prim, ch->addr, ret); send_layer2()
119 printk(KERN_WARNING "%s ch%d addr %x no mem\n", send_layer2()
120 __func__, ch->nr, ch->addr); send_layer2()
125 list_for_each_entry(ch, &st->layer2, list) { send_layer2()
126 if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) { send_layer2()
127 ret = ch->send(ch, skb); send_layer2()
151 struct mISDNchannel *ch; send_msg_to_layer() local
170 ch = get_channel4id(st, hh->id); send_msg_to_layer()
171 if (ch) send_msg_to_layer()
172 return ch->send(ch, skb); send_msg_to_layer()
180 ch = get_channel4id(st, hh->id); send_msg_to_layer()
181 if (ch) send_msg_to_layer()
182 return ch->send(ch, skb); send_msg_to_layer()
333 l1_receive(struct mISDNchannel *ch, struct sk_buff *skb) l1_receive() argument
335 if (!ch->st) l1_receive()
338 _queue_message(ch->st, skb); l1_receive()
343 set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei) set_channel_address() argument
345 ch->addr = sapi | (tei << 8); set_channel_address()
349 __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st) __add_layer2() argument
351 list_add_tail(&ch->list, &st->layer2); __add_layer2()
355 add_layer2(struct mISDNchannel *ch, struct mISDNstack *st) add_layer2() argument
358 __add_layer2(ch, st); add_layer2()
363 st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) st_own_ctrl() argument
365 if (!ch->st || !ch->st->layer1) st_own_ctrl()
367 return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg); st_own_ctrl()
425 connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch, connect_layer1() argument
428 struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch); connect_layer1()
442 ch->recv = mISDN_queue_message; connect_layer1()
443 ch->peer = &dev->D.st->own; connect_layer1()
444 ch->st = dev->D.st; connect_layer1()
463 connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch, connect_Bstack() argument
475 ch->st = dev->D.st; connect_Bstack()
483 ch->recv = rq.ch->send; connect_Bstack()
484 ch->peer = rq.ch; connect_Bstack()
485 rq.ch->recv = ch->send; connect_Bstack()
486 rq.ch->peer = ch; connect_Bstack()
487 rq.ch->st = dev->D.st; connect_Bstack()
494 rq2.ch = ch; connect_Bstack()
498 ch->recv = rq2.ch->send; connect_Bstack()
499 ch->peer = rq2.ch; connect_Bstack()
500 rq2.ch->st = dev->D.st; connect_Bstack()
505 rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL); connect_Bstack()
508 rq2.ch->recv = rq.ch->send; connect_Bstack()
509 rq2.ch->peer = rq.ch; connect_Bstack()
510 rq.ch->recv = rq2.ch->send; connect_Bstack()
511 rq.ch->peer = rq2.ch; connect_Bstack()
512 rq.ch->st = dev->D.st; connect_Bstack()
514 ch->protocol = protocol; connect_Bstack()
515 ch->nr = rq.ch->nr; connect_Bstack()
520 create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch, create_l2entity() argument
540 ch->recv = mISDN_queue_message; create_l2entity()
541 ch->peer = &dev->D.st->own; create_l2entity()
542 ch->st = dev->D.st; create_l2entity()
550 rq.ch = ch; create_l2entity()
554 if ((protocol == ISDN_P_LAPD_NT) && !rq.ch) create_l2entity()
556 add_layer2(rq.ch, dev->D.st); create_l2entity()
557 rq.ch->recv = mISDN_queue_message; create_l2entity()
558 rq.ch->peer = &dev->D.st->own; create_l2entity()
559 rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */ create_l2entity()
569 delete_channel(struct mISDNchannel *ch) delete_channel() argument
571 struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch); delete_channel()
574 if (!ch->st) { delete_channel()
580 dev_name(&ch->st->dev->dev), ch->protocol); delete_channel()
581 if (ch->protocol >= ISDN_P_B_START) { delete_channel()
582 if (ch->peer) { delete_channel()
583 ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL); delete_channel()
584 ch->peer = NULL; delete_channel()
588 switch (ch->protocol) { delete_channel()
593 write_lock_bh(&ch->st->l1sock.lock); delete_channel()
595 write_unlock_bh(&ch->st->l1sock.lock); delete_channel()
596 ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL); delete_channel()
599 pch = get_channel4id(ch->st, ch->nr); delete_channel()
601 mutex_lock(&ch->st->lmutex); delete_channel()
603 mutex_unlock(&ch->st->lmutex); delete_channel()
605 pch = ch->st->dev->teimgr; delete_channel()
612 pch = ch->st->dev->teimgr; delete_channel()
H A Dtei.c93 printk(KERN_DEBUG "mgr(%d): %pV\n", mgr->ch.st->dev->id, &vaf); da_debug()
171 _queue_data(&mgr->ch, PH_DEACTIVATE_REQ, MISDN_ID_ANY, 0, NULL, da_timer()
258 if (l2->ch.nr > 63) { get_free_id()
264 __set_bit(l2->ch.nr, ids); get_free_id()
282 if (l2->ch.nr == 0) get_free_tei()
284 if ((l2->ch.addr & 0xff) != 0) get_free_tei()
286 i = l2->ch.addr >> 8; get_free_tei()
313 hh->id = (mgr->ch.nr << 16) | mgr->ch.addr; teiup_create()
352 if (mgr->ch.recv(mgr->ch.peer, skb)) { do_send()
371 if (!mgr->ch.recv(mgr->ch.peer, skb)) do_ack()
387 _queue_data(&mgr->ch, PH_ACTIVATE_REQ, MISDN_ID_ANY, 0, mgr_send_down()
400 _queue_data(&mgr->ch, PH_ACTIVATE_REQ, MISDN_ID_ANY, 0, dl_unit_data()
664 list_del(&l2->ch.list); tei_l2remove()
665 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); tei_l2remove()
803 if (mgr->ch.st->dev->Dprotocols & ((1 << ISDN_P_TE_E1) | create_new_tei()
835 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); create_new_tei()
839 l2->ch.nr = id; create_new_tei()
840 __add_layer2(&l2->ch, mgr->ch.st); create_new_tei()
841 l2->ch.recv = mgr->ch.recv; create_new_tei()
842 l2->ch.peer = mgr->ch.peer; create_new_tei()
843 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL); create_new_tei()
845 rq.adr.dev = mgr->ch.st->dev->id; create_new_tei()
846 id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq); create_new_tei()
849 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); create_new_tei()
1002 __func__, dev_name(&mgr->ch.st->dev->dev), create_teimgr()
1034 if (mgr->ch.st->dev->Dprotocols create_teimgr()
1038 mgr->up = crq->ch; create_teimgr()
1043 crq->ch = NULL; create_teimgr()
1048 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL); create_teimgr()
1054 l2 = create_l2(crq->ch, crq->protocol, opt, create_teimgr()
1092 l2->ch.nr = id; create_teimgr()
1094 crq->ch = &l2->ch; create_teimgr()
1096 id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, create_teimgr()
1100 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); create_teimgr()
1105 mgr_send(struct mISDNchannel *ch, struct sk_buff *skb) mgr_send() argument
1111 mgr = container_of(ch, struct manager, ch); mgr_send()
1159 mutex_lock(&mgr->ch.st->lmutex); free_teimanager()
1160 list_del(&l2->ch.list); free_teimanager()
1161 mutex_unlock(&mgr->ch.st->lmutex); free_teimanager()
1162 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); free_teimanager()
1175 mgr->ch.st->dev->D.ctrl(&mgr->ch.st->dev->D, CLOSE_CHANNEL, NULL); free_teimanager()
1243 ret = l2->ch.send(&l2->ch, skb); check_data()
1248 delete_teimanager(struct mISDNchannel *ch) delete_teimanager() argument
1253 mgr = container_of(ch, struct manager, ch); delete_teimanager()
1256 mutex_lock(&mgr->ch.st->lmutex); delete_teimanager()
1257 list_del(&l2->ch.list); delete_teimanager()
1258 mutex_unlock(&mgr->ch.st->lmutex); delete_teimanager()
1259 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL); delete_teimanager()
1261 list_del(&mgr->ch.list); delete_teimanager()
1268 mgr_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) mgr_ctrl() argument
1273 mgr = container_of(ch, struct manager, ch); mgr_ctrl()
1294 mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb) mgr_bcast() argument
1296 struct manager *mgr = container_of(ch, struct manager, bcast); mgr_bcast()
1306 (l2->ch.addr & MISDN_ID_SAPI_MASK)) { mgr_bcast()
1321 hhc->id = l2->ch.nr; mgr_bcast()
1322 ret = ch->st->own.recv(&ch->st->own, cskb); mgr_bcast()
1326 "%s ch%d prim(%x) addr(%x)" mgr_bcast()
1328 __func__, l2->ch.nr, mgr_bcast()
1329 hh->prim, l2->ch.addr, ret); mgr_bcast()
1333 printk(KERN_WARNING "%s ch%d addr %x no mem\n", mgr_bcast()
1334 __func__, ch->nr, ch->addr); mgr_bcast()
1349 mgr_bcast_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) mgr_bcast_ctrl() argument
1368 mgr->ch.send = mgr_send; create_teimanager()
1369 mgr->ch.ctrl = mgr_ctrl; create_teimanager()
1370 mgr->ch.st = dev->D.st; create_teimanager()
1371 set_channel_address(&mgr->ch, TEI_SAPI, GROUP_TEI); create_teimanager()
1372 add_layer2(&mgr->ch, dev->D.st); create_teimanager()
1384 dev->teimgr = &mgr->ch; create_teimanager()
H A Dsocket.c70 mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb) mISDN_send() argument
75 msk = container_of(ch, struct mISDN_sock, ch); mISDN_send()
88 mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) mISDN_ctrl() argument
92 msk = container_of(ch, struct mISDN_sock, ch); mISDN_ctrl()
94 printk(KERN_DEBUG "%s(%p, %x, %p)\n", __func__, ch, cmd, arg); mISDN_ctrl()
124 printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n", mISDN_sock_recvmsg()
125 __func__, (int)len, flags, _pms(sk)->ch.nr, mISDN_sock_recvmsg()
148 maddr->channel = _pms(sk)->ch.nr; mISDN_sock_recvmsg()
149 maddr->sapi = _pms(sk)->ch.addr & 0xFF; mISDN_sock_recvmsg()
150 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; mISDN_sock_recvmsg()
183 printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n", mISDN_sock_sendmsg()
184 __func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr, mISDN_sock_sendmsg()
220 mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr; mISDN_sock_sendmsg()
228 if (!_pms(sk)->ch.peer) mISDN_sock_sendmsg()
230 err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb); mISDN_sock_sendmsg()
260 delete_channel(&_pms(sk)->ch); data_sock_release()
272 delete_channel(&_pms(sk)->ch); data_sock_release()
520 _pms(sk)->ch.send = mISDN_send; data_sock_bind()
521 _pms(sk)->ch.ctrl = mISDN_ctrl; data_sock_bind()
529 err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch, data_sock_bind()
536 err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch, data_sock_bind()
545 err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch, data_sock_bind()
554 _pms(sk)->ch.protocol = sk->sk_protocol; data_sock_bind()
576 maddr->channel = _pms(sk)->ch.nr; data_sock_getname()
577 maddr->sapi = _pms(sk)->ch.addr & 0xff; data_sock_getname()
578 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff; data_sock_getname()
H A Dl1oip_core.c453 queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb); l1oip_socket_recv()
886 handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb) handle_dmsg() argument
888 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); handle_dmsg()
919 queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); handle_dmsg()
927 queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); handle_dmsg()
929 queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); handle_dmsg()
938 queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); handle_dmsg()
940 queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); handle_dmsg()
1014 rq->ch = &dch->dev.D; open_dchannel()
1024 int ch; open_bchannel() local
1030 ch = rq->adr.channel; /* BRI: 1=B1 2=B2 PRI: 1..15,17.. */ open_bchannel()
1031 bch = hc->chan[ch].bch; open_bchannel()
1033 printk(KERN_ERR "%s:internal error ch %d has no bch\n", open_bchannel()
1034 __func__, ch); open_bchannel()
1039 bch->ch.protocol = rq->protocol; open_bchannel()
1040 rq->ch = &bch->ch; open_bchannel()
1047 l1oip_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) l1oip_dctrl() argument
1049 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); l1oip_dctrl()
1102 handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb) handle_bmsg() argument
1104 struct bchannel *bch = container_of(ch, struct bchannel, ch); handle_bmsg()
1131 queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); handle_bmsg()
1142 queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); handle_bmsg()
1158 queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb); handle_bmsg()
1167 queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb); handle_bmsg()
1176 queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb); handle_bmsg()
1213 l1oip_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) l1oip_bctrl() argument
1215 struct bchannel *bch = container_of(ch, struct bchannel, ch); l1oip_bctrl()
1225 ch->protocol = ISDN_P_NONE; l1oip_bctrl()
1226 ch->peer = NULL; l1oip_bctrl()
1247 int ch; release_card() local
1262 for (ch = 0; ch < 128; ch++) { release_card()
1263 if (hc->chan[ch].dch) { release_card()
1264 mISDN_freedchannel(hc->chan[ch].dch); release_card()
1265 kfree(hc->chan[ch].dch); release_card()
1267 if (hc->chan[ch].bch) { release_card()
1268 mISDN_freebchannel(hc->chan[ch].bch); release_card()
1269 kfree(hc->chan[ch].bch); release_card()
1271 if (hc->chan[ch].disorder_skb) release_card()
1272 dev_kfree_skb(hc->chan[ch].disorder_skb); release_card()
1305 int i, ch; init_card() local
1404 for (ch = 0; ch < dch->dev.nrbchan; ch++) { init_card()
1405 if (ch == 15) init_card()
1413 bch->nr = i + ch; init_card()
1414 bch->slot = i + ch; init_card()
1418 bch->ch.send = handle_bmsg; init_card()
1419 bch->ch.ctrl = l1oip_bctrl; init_card()
1420 bch->ch.nr = i + ch; init_card()
1421 list_add(&bch->ch.list, &dch->dev.bchannels); init_card()
1422 hc->chan[i + ch].bch = bch; init_card()
H A Ddsp_core.c218 if (!dsp->ch.peer) { dsp_rx_off_member()
226 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { dsp_rx_off_member()
263 if (!dsp->ch.peer) { dsp_fill_empty()
272 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { dsp_fill_empty()
628 get_features(struct mISDNchannel *ch) get_features() argument
630 struct dsp *dsp = container_of(ch, struct dsp, ch); get_features()
633 if (!ch->peer) { get_features()
641 if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq) < 0) { get_features()
655 if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq)) { get_features()
666 dsp_function(struct mISDNchannel *ch, struct sk_buff *skb) dsp_function() argument
668 struct dsp *dsp = container_of(ch, struct dsp, ch); dsp_function()
913 get_features(ch); dsp_function()
919 if (ch->peer) dsp_function()
920 return ch->recv(ch->peer, skb); dsp_function()
939 if (ch->peer) dsp_function()
940 return ch->recv(ch->peer, skb); dsp_function()
954 dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) dsp_ctrl() argument
956 struct dsp *dsp = container_of(ch, struct dsp, ch); dsp_ctrl()
967 if (dsp->ch.peer) dsp_ctrl()
968 dsp->ch.peer->ctrl(dsp->ch.peer, CLOSE_CHANNEL, NULL); dsp_ctrl()
1037 if (dsp->ch.peer) { dsp_send_bh()
1039 if (dsp->ch.recv(dsp->ch.peer, skb)) { dsp_send_bh()
1069 ndsp->ch.send = dsp_function; dspcreate()
1070 ndsp->ch.ctrl = dsp_ctrl; dspcreate()
1071 ndsp->up = crq->ch; dspcreate()
1072 crq->ch = &ndsp->ch; dspcreate()
H A Ddsp_hwec.c100 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { dsp_hwec_enable()
120 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { dsp_hwec_disable()
H A Dlayer2.c114 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf); l2m_debug()
154 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr; l2up()
158 mISDNDevName4ch(&l2->ch), err); l2up()
177 hh->id = (l2->ch.nr << 16) | l2->ch.addr; l2up_create()
183 mISDNDevName4ch(&l2->ch), err); l2up_create()
192 ret = l2->ch.recv(l2->ch.peer, skb); l2down_skb()
195 mISDNDevName4ch(&l2->ch), ret); l2down_skb()
296 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, l2_timeout()
297 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203"); l2_timeout()
302 hh->id = l2->ch.nr; l2_timeout()
305 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, l2_timeout()
306 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203"); l2_timeout()
307 if (l2->ch.st) l2_timeout()
308 l2->ch.st->own.recv(&l2->ch.st->own, skb); l2_timeout()
316 mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c); l2mgr()
639 mISDNDevName4ch(&l2->ch), __func__); send_uframe()
1125 mISDNDevName4ch(&l2->ch), __func__); enquiry_cr()
1185 mISDNDevName4ch(&l2->ch), p1); invoke_retransmission()
1360 set_channel_address(&l2->ch, l2->sapi, l2->tei); l2_got_tei()
1497 mISDNDevName4ch(&l2->ch), p1); l2_pull_iqueue()
1518 mISDNDevName4ch(&l2->ch), i, p1); l2_pull_iqueue()
1524 mISDNDevName4ch(&l2->ch), __func__); l2_pull_iqueue()
1901 mISDNDevName4ch(&l2->ch)); ph_data_indication()
1910 mISDNDevName4ch(&l2->ch), psapi, ph_data_indication()
1919 mISDNDevName4ch(&l2->ch), ptei, l2->tei); ph_data_indication()
1961 mISDNDevName4ch(&l2->ch), c); ph_data_indication()
1968 l2_send(struct mISDNchannel *ch, struct sk_buff *skb) l2_send() argument
1970 struct layer2 *l2 = container_of(ch, struct layer2, ch); l2_send()
1976 __func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id, l2_send()
1984 mISDNDevName4ch(&l2->ch), hh->prim, hh->id); l2_send()
2066 mISDNDevName4ch(&l2->ch), cmd, __func__); tei_l2()
2080 mISDNDevName4ch(&l2->ch)); tei_l2()
2098 if (l2->ch.st) release_l2()
2099 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, release_l2()
2106 l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) l2_ctrl() argument
2108 struct layer2 *l2 = container_of(ch, struct layer2, ch); l2_ctrl()
2113 mISDNDevName4ch(ch), __func__, cmd); l2_ctrl()
2118 set_channel_address(&l2->ch, l2->sapi, l2->tei); l2_ctrl()
2125 if (l2->ch.peer) l2_ctrl()
2126 l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL); l2_ctrl()
2134 create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei, create_l2() argument
2147 l2->up = ch; create_l2()
2148 l2->ch.st = ch->st; create_l2()
2149 l2->ch.send = l2_send; create_l2()
2150 l2->ch.ctrl = l2_ctrl; create_l2()
2175 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq); create_l2()
2200 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq); create_l2()
2247 l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0); x75create()
2250 crq->ch = &l2->ch; x75create()
/linux-4.1.27/drivers/gpu/ipu-v3/
H A Dipu-cpmem.c98 ipu_get_cpmem(struct ipuv3_channel *ch) ipu_get_cpmem() argument
100 struct ipu_cpmem *cpmem = ch->ipu->cpmem_priv; ipu_get_cpmem()
102 return cpmem->base + ch->num; ipu_get_cpmem()
105 static void ipu_ch_param_write_field(struct ipuv3_channel *ch, u32 wbs, u32 v) ipu_ch_param_write_field() argument
107 struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch); ipu_ch_param_write_field()
131 static u32 ipu_ch_param_read_field(struct ipuv3_channel *ch, u32 wbs) ipu_ch_param_read_field() argument
133 struct ipu_ch_param __iomem *base = ipu_get_cpmem(ch); ipu_ch_param_read_field()
209 void ipu_cpmem_zero(struct ipuv3_channel *ch) ipu_cpmem_zero() argument
211 struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch); ipu_cpmem_zero()
220 void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres) ipu_cpmem_set_resolution() argument
222 ipu_ch_param_write_field(ch, IPU_FIELD_FW, xres - 1); ipu_cpmem_set_resolution()
223 ipu_ch_param_write_field(ch, IPU_FIELD_FH, yres - 1); ipu_cpmem_set_resolution()
227 void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride) ipu_cpmem_set_stride() argument
229 ipu_ch_param_write_field(ch, IPU_FIELD_SLY, stride - 1); ipu_cpmem_set_stride()
233 void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch) ipu_cpmem_set_high_priority() argument
235 struct ipu_soc *ipu = ch->ipu; ipu_cpmem_set_high_priority()
239 ipu_ch_param_write_field(ch, IPU_FIELD_ID, 1); ipu_cpmem_set_high_priority()
241 val = ipu_idmac_read(ipu, IDMAC_CHA_PRI(ch->num)); ipu_cpmem_set_high_priority()
242 val |= 1 << (ch->num % 32); ipu_cpmem_set_high_priority()
243 ipu_idmac_write(ipu, val, IDMAC_CHA_PRI(ch->num)); ipu_cpmem_set_high_priority()
247 void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf) ipu_cpmem_set_buffer() argument
250 ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3); ipu_cpmem_set_buffer()
252 ipu_ch_param_write_field(ch, IPU_FIELD_EBA0, buf >> 3); ipu_cpmem_set_buffer()
256 void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride) ipu_cpmem_interlaced_scan() argument
258 ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1); ipu_cpmem_interlaced_scan()
259 ipu_ch_param_write_field(ch, IPU_FIELD_ILO, stride / 8); ipu_cpmem_interlaced_scan()
260 ipu_ch_param_write_field(ch, IPU_FIELD_SLY, (stride * 2) - 1); ipu_cpmem_interlaced_scan()
264 void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id) ipu_cpmem_set_axi_id() argument
267 ipu_ch_param_write_field(ch, IPU_FIELD_ID, id); ipu_cpmem_set_axi_id()
271 void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize) ipu_cpmem_set_burstsize() argument
273 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, burstsize - 1); ipu_cpmem_set_burstsize()
277 void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch) ipu_cpmem_set_block_mode() argument
279 ipu_ch_param_write_field(ch, IPU_FIELD_BM, 1); ipu_cpmem_set_block_mode()
283 void ipu_cpmem_set_rotation(struct ipuv3_channel *ch, ipu_cpmem_set_rotation() argument
288 ipu_ch_param_write_field(ch, IPU_FIELD_ROT_HF_VF, temp_rot); ipu_cpmem_set_rotation()
292 int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch, ipu_cpmem_set_format_rgb() argument
302 ipu_ch_param_write_field(ch, IPU_FIELD_WID0, rgb->red.length - 1); ipu_cpmem_set_format_rgb()
303 ipu_ch_param_write_field(ch, IPU_FIELD_OFS0, ro); ipu_cpmem_set_format_rgb()
304 ipu_ch_param_write_field(ch, IPU_FIELD_WID1, rgb->green.length - 1); ipu_cpmem_set_format_rgb()
305 ipu_ch_param_write_field(ch, IPU_FIELD_OFS1, go); ipu_cpmem_set_format_rgb()
306 ipu_ch_param_write_field(ch, IPU_FIELD_WID2, rgb->blue.length - 1); ipu_cpmem_set_format_rgb()
307 ipu_ch_param_write_field(ch, IPU_FIELD_OFS2, bo); ipu_cpmem_set_format_rgb()
310 ipu_ch_param_write_field(ch, IPU_FIELD_WID3, ipu_cpmem_set_format_rgb()
312 ipu_ch_param_write_field(ch, IPU_FIELD_OFS3, to); ipu_cpmem_set_format_rgb()
314 ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7); ipu_cpmem_set_format_rgb()
315 ipu_ch_param_write_field(ch, IPU_FIELD_OFS3, ipu_cpmem_set_format_rgb()
339 ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp); ipu_cpmem_set_format_rgb()
340 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb); ipu_cpmem_set_format_rgb()
341 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 7); /* rgb mode */ ipu_cpmem_set_format_rgb()
347 int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width) ipu_cpmem_set_format_passthrough() argument
372 ipu_ch_param_write_field(ch, IPU_FIELD_BPP, bpp); ipu_cpmem_set_format_passthrough()
373 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, npb); ipu_cpmem_set_format_passthrough()
374 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 6); /* raw mode */ ipu_cpmem_set_format_passthrough()
380 void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format) ipu_cpmem_set_yuv_interleaved() argument
384 ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */ ipu_cpmem_set_yuv_interleaved()
385 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA);/* pix fmt */ ipu_cpmem_set_yuv_interleaved()
386 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */ ipu_cpmem_set_yuv_interleaved()
389 ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); /* bits/pixel */ ipu_cpmem_set_yuv_interleaved()
390 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8);/* pix fmt */ ipu_cpmem_set_yuv_interleaved()
391 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31);/* burst size */ ipu_cpmem_set_yuv_interleaved()
397 void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, ipu_cpmem_set_yuv_planar_full() argument
404 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1); ipu_cpmem_set_yuv_planar_full()
405 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8); ipu_cpmem_set_yuv_planar_full()
406 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8); ipu_cpmem_set_yuv_planar_full()
409 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1); ipu_cpmem_set_yuv_planar_full()
410 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8); ipu_cpmem_set_yuv_planar_full()
411 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8); ipu_cpmem_set_yuv_planar_full()
415 ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1); ipu_cpmem_set_yuv_planar_full()
416 ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8); ipu_cpmem_set_yuv_planar_full()
417 ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8); ipu_cpmem_set_yuv_planar_full()
423 void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, ipu_cpmem_set_yuv_planar() argument
435 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, ipu_cpmem_set_yuv_planar()
442 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, ipu_cpmem_set_yuv_planar()
448 ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, ipu_cpmem_set_yuv_planar()
519 int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc) ipu_cpmem_set_fmt() argument
525 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 2); ipu_cpmem_set_fmt()
527 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31); ipu_cpmem_set_fmt()
532 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 1); ipu_cpmem_set_fmt()
534 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31); ipu_cpmem_set_fmt()
538 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 4); ipu_cpmem_set_fmt()
540 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31); ipu_cpmem_set_fmt()
544 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 3); ipu_cpmem_set_fmt()
546 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31); ipu_cpmem_set_fmt()
550 ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); ipu_cpmem_set_fmt()
552 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0xA); ipu_cpmem_set_fmt()
554 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31); ipu_cpmem_set_fmt()
558 ipu_ch_param_write_field(ch, IPU_FIELD_BPP, 3); ipu_cpmem_set_fmt()
560 ipu_ch_param_write_field(ch, IPU_FIELD_PFS, 0x8); ipu_cpmem_set_fmt()
562 ipu_ch_param_write_field(ch, IPU_FIELD_NPB, 31); ipu_cpmem_set_fmt()
566 ipu_cpmem_set_format_rgb(ch, &def_bgr_32); ipu_cpmem_set_fmt()
570 ipu_cpmem_set_format_rgb(ch, &def_rgb_32); ipu_cpmem_set_fmt()
573 ipu_cpmem_set_format_rgb(ch, &def_bgr_24); ipu_cpmem_set_fmt()
576 ipu_cpmem_set_format_rgb(ch, &def_rgb_24); ipu_cpmem_set_fmt()
579 ipu_cpmem_set_format_rgb(ch, &def_rgb_16); ipu_cpmem_set_fmt()
582 ipu_cpmem_set_format_rgb(ch, &def_bgr_16); ipu_cpmem_set_fmt()
592 int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image) ipu_cpmem_set_image() argument
601 ipu_cpmem_set_resolution(ch, image->rect.width, image->rect.height); ipu_cpmem_set_image()
602 ipu_cpmem_set_stride(ch, pix->bytesperline); ipu_cpmem_set_image()
604 ipu_cpmem_set_fmt(ch, v4l2_pix_fmt_to_drm_fourcc(pix->pixelformat)); ipu_cpmem_set_image()
615 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, ipu_cpmem_set_image()
626 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, ipu_cpmem_set_image()
636 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, ipu_cpmem_set_image()
646 ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, ipu_cpmem_set_image()
670 ipu_cpmem_set_buffer(ch, 0, image->phys0 + offset); ipu_cpmem_set_image()
671 ipu_cpmem_set_buffer(ch, 1, image->phys1 + offset); ipu_cpmem_set_image()
677 void ipu_cpmem_dump(struct ipuv3_channel *ch) ipu_cpmem_dump() argument
679 struct ipu_ch_param __iomem *p = ipu_get_cpmem(ch); ipu_cpmem_dump()
680 struct ipu_soc *ipu = ch->ipu; ipu_cpmem_dump()
681 int chno = ch->num; ipu_cpmem_dump()
683 dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", chno, ipu_cpmem_dump()
689 dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", chno, ipu_cpmem_dump()
696 ipu_ch_param_read_field(ch, IPU_FIELD_PFS)); ipu_cpmem_dump()
698 ipu_ch_param_read_field(ch, IPU_FIELD_BPP)); ipu_cpmem_dump()
700 ipu_ch_param_read_field(ch, IPU_FIELD_NPB)); ipu_cpmem_dump()
703 ipu_ch_param_read_field(ch, IPU_FIELD_FW)); ipu_cpmem_dump()
705 ipu_ch_param_read_field(ch, IPU_FIELD_FH)); ipu_cpmem_dump()
707 ipu_ch_param_read_field(ch, IPU_FIELD_EBA0) << 3); ipu_cpmem_dump()
709 ipu_ch_param_read_field(ch, IPU_FIELD_EBA1) << 3); ipu_cpmem_dump()
711 ipu_ch_param_read_field(ch, IPU_FIELD_SL)); ipu_cpmem_dump()
713 ipu_ch_param_read_field(ch, IPU_FIELD_SO)); ipu_cpmem_dump()
715 ipu_ch_param_read_field(ch, IPU_FIELD_SLUV)); ipu_cpmem_dump()
717 ipu_ch_param_read_field(ch, IPU_FIELD_UBO) << 3); ipu_cpmem_dump()
719 ipu_ch_param_read_field(ch, IPU_FIELD_VBO) << 3); ipu_cpmem_dump()
722 ipu_ch_param_read_field(ch, IPU_FIELD_WID0)); ipu_cpmem_dump()
724 ipu_ch_param_read_field(ch, IPU_FIELD_WID1)); ipu_cpmem_dump()
726 ipu_ch_param_read_field(ch, IPU_FIELD_WID2)); ipu_cpmem_dump()
728 ipu_ch_param_read_field(ch, IPU_FIELD_WID3)); ipu_cpmem_dump()
730 ipu_ch_param_read_field(ch, IPU_FIELD_OFS0)); ipu_cpmem_dump()
732 ipu_ch_param_read_field(ch, IPU_FIELD_OFS1)); ipu_cpmem_dump()
734 ipu_ch_param_read_field(ch, IPU_FIELD_OFS2)); ipu_cpmem_dump()
736 ipu_ch_param_read_field(ch, IPU_FIELD_OFS3)); ipu_cpmem_dump()
H A Dipu-prv.h61 #define IPU_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
62 #define IPU_ALT_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
63 #define IPU_CHA_CUR_BUF(ch) IPU_CM_REG(0x023C + 4 * ((ch) / 32))
69 #define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0268 + 4 * ((ch) / 32))
70 #define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0270 + 4 * ((ch) / 32))
71 #define IPU_CHA_BUF2_RDY(ch) IPU_CM_REG(0x0288 + 4 * ((ch) / 32))
72 #define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG(0x0278 + 4 * ((ch) / 32))
73 #define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG(0x0280 + 4 * ((ch) / 32))
84 #define IDMAC_CHA_EN(ch) IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
87 #define IDMAC_CHA_PRI(ch) IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
88 #define IDMAC_WM_EN(ch) IPU_IDMAC_REG(0x001c + 4 * ((ch) / 32))
94 #define IDMAC_BAND_EN(ch) IPU_IDMAC_REG(0x0040 + 4 * ((ch) / 32))
95 #define IDMAC_CHA_BUSY(ch) IPU_IDMAC_REG(0x0100 + 4 * ((ch) / 32))
/linux-4.1.27/arch/mn10300/kernel/
H A Dmn10300-debug.c22 char ch; debug_to_serial_mnser() local
25 ch = *p++; debug_to_serial_mnser()
29 SC0TXB = ch; debug_to_serial_mnser()
31 if (ch == 0x0a) { debug_to_serial_mnser()
38 SC1TXB = ch; debug_to_serial_mnser()
40 if (ch == 0x0a) { debug_to_serial_mnser()
47 SC2TXB = ch; debug_to_serial_mnser()
49 if (ch == 0x0a) { debug_to_serial_mnser()
H A Dgdb-stub.c179 static int hex(unsigned char ch);
187 * Convert ch from a hex digit to an int
189 static int hex(unsigned char ch) hex() argument
191 if (ch >= 'a' && ch <= 'f') hex()
192 return ch - 'a' + 10; hex()
193 if (ch >= '0' && ch <= '9') hex()
194 return ch - '0'; hex()
195 if (ch >= 'A' && ch <= 'F') hex()
196 return ch - 'A' + 10; hex()
237 unsigned char ch; getpacket() local
246 gdbstub_io_rx_char(&ch, 0); getpacket()
247 } while (ch != '$'); getpacket()
258 ret = gdbstub_io_rx_char(&ch, 0); getpacket()
262 if (ch == '#') getpacket()
264 checksum += ch; getpacket()
265 buffer[count] = ch; getpacket()
283 ret = gdbstub_io_rx_char(&ch, 0); getpacket()
286 xmitcsum = hex(ch) << 4; getpacket()
288 ret = gdbstub_io_rx_char(&ch, 0); getpacket()
291 xmitcsum |= hex(ch); getpacket()
343 unsigned char ch; putpacket() local
356 while ((ch = buffer[count]) != 0) { putpacket()
357 gdbstub_io_tx_char(ch); putpacket()
358 checksum += ch; putpacket()
366 } while (gdbstub_io_rx_char(&ch, 0), putpacket()
367 ch == '-' && (gdbstub_io("### GDB Rx NAK\n"), 0), putpacket()
368 ch != '-' && ch != '+' && putpacket()
369 (gdbstub_io("### GDB Rx ??? %02x\n", ch), 0), putpacket()
370 ch != '+' && ch != '$'); putpacket()
372 if (ch == '+') { putpacket()
378 gdbstub_rx_unget = ch; putpacket()
795 u8 ch[4]; mem2hex() local
798 if (gdbstub_read_byte(mem, ch) != 0) mem2hex()
800 buf = hex_byte_pack(buf, ch[0]); mem2hex()
806 if (gdbstub_read_word(mem, ch) != 0) mem2hex()
808 buf = hex_byte_pack(buf, ch[0]); mem2hex()
809 buf = hex_byte_pack(buf, ch[1]); mem2hex()
815 if (gdbstub_read_dword(mem, ch) != 0) mem2hex()
817 buf = hex_byte_pack(buf, ch[0]); mem2hex()
818 buf = hex_byte_pack(buf, ch[1]); mem2hex()
819 buf = hex_byte_pack(buf, ch[2]); mem2hex()
820 buf = hex_byte_pack(buf, ch[3]); mem2hex()
826 if (gdbstub_read_word(mem, ch) != 0) mem2hex()
828 buf = hex_byte_pack(buf, ch[0]); mem2hex()
829 buf = hex_byte_pack(buf, ch[1]); mem2hex()
835 if (gdbstub_read_byte(mem, ch) != 0) mem2hex()
837 buf = hex_byte_pack(buf, ch[0]); mem2hex()
857 } ch; hex2mem() local
860 ch.b[0] = hex(*buf++) << 4; hex2mem()
861 ch.b[0] |= hex(*buf++); hex2mem()
862 if (gdbstub_write_byte(ch.val, mem) != 0) hex2mem()
869 ch.b[0] = hex(*buf++) << 4; hex2mem()
870 ch.b[0] |= hex(*buf++); hex2mem()
871 ch.b[1] = hex(*buf++) << 4; hex2mem()
872 ch.b[1] |= hex(*buf++); hex2mem()
873 if (gdbstub_write_word(ch.val, mem) != 0) hex2mem()
880 ch.b[0] = hex(*buf++) << 4; hex2mem()
881 ch.b[0] |= hex(*buf++); hex2mem()
882 ch.b[1] = hex(*buf++) << 4; hex2mem()
883 ch.b[1] |= hex(*buf++); hex2mem()
884 ch.b[2] = hex(*buf++) << 4; hex2mem()
885 ch.b[2] |= hex(*buf++); hex2mem()
886 ch.b[3] = hex(*buf++) << 4; hex2mem()
887 ch.b[3] |= hex(*buf++); hex2mem()
888 if (gdbstub_write_dword(ch.val, mem) != 0) hex2mem()
895 ch.b[0] = hex(*buf++) << 4; hex2mem()
896 ch.b[0] |= hex(*buf++); hex2mem()
897 ch.b[1] = hex(*buf++) << 4; hex2mem()
898 ch.b[1] |= hex(*buf++); hex2mem()
899 if (gdbstub_write_word(ch.val, mem) != 0) hex2mem()
906 ch.b[0] = hex(*buf++) << 4; hex2mem()
907 ch.b[0] |= hex(*buf++); hex2mem()
908 if (gdbstub_write_byte(ch.val, mem) != 0) hex2mem()
1812 unsigned char ch; gdbstub_exit() local
1825 while ((ch = output_buffer[count]) != 0) { gdbstub_exit()
1826 gdbstub_io_tx_char(ch); gdbstub_exit()
1827 checksum += ch; gdbstub_exit()
1847 unsigned char ch; gdbstub_init() local
1870 do { gdbstub_io_rx_char(&ch, 0); } while (ch != '$'); gdbstub_init()
1871 do { gdbstub_io_rx_char(&ch, 0); } while (ch != '#'); gdbstub_init()
1873 do { ret = gdbstub_io_rx_char(&ch, 0); } while (ret != 0); gdbstub_init()
1875 do { ret = gdbstub_io_rx_char(&ch, 0); } while (ret != 0); gdbstub_init()
1908 char ch; gdbstub_rx_irq() local
1914 ret = gdbstub_io_rx_char(&ch, 1); gdbstub_rx_irq()
1917 gdbstub_rx_unget = ch; gdbstub_rx_irq()
H A Dgdb-io-serial.c102 u8 ch, st; gdbstub_io_rx_char() local
129 ch = gdbstub_rx_buffer[ix++]; gdbstub_io_rx_char()
141 gdbstub_proto("### GDB Rx %02x (st=%02x) ###\n", ch, st); gdbstub_io_rx_char()
142 *_ch = ch & 0x7f; gdbstub_io_rx_char()
150 void gdbstub_io_tx_char(unsigned char ch) gdbstub_io_tx_char() argument
156 if (ch == 0x0a) { gdbstub_io_tx_char()
161 GDBPORT_SERIAL_TX = ch; gdbstub_io_tx_char()
H A Dgdb-io-ttysm.c184 u8 ch, st; gdbstub_io_rx_char() local
211 ch = gdbstub_rx_buffer[ix++]; gdbstub_io_rx_char()
222 if (st & SC01STR_FEF && ch == 0) { gdbstub_io_rx_char()
255 switch (ch) { gdbstub_io_rx_char()
273 gdbstub_io("### GDB Rx %02x (st=%02x) ###\n", ch, st); gdbstub_io_rx_char()
274 *_ch = ch & 0x7f; gdbstub_io_rx_char()
282 void gdbstub_io_tx_char(unsigned char ch) gdbstub_io_tx_char() argument
287 if (ch == 0x0a) { gdbstub_io_tx_char()
293 *(u8 *) gdbstub_port->_txb = ch; gdbstub_io_tx_char()
/linux-4.1.27/scripts/basic/
H A Dbin2c.c14 int ch, total = 0; main() local
22 while ((ch = getchar()) != EOF) { main()
24 printf("\\x%02x", ch); main()
29 } while (ch != EOF); main()
/linux-4.1.27/fs/hfs/
H A Dtrans.c48 wchar_t ch; hfs_mac2asc() local
52 size = nls_disk->char2uni(src, srclen, &ch); hfs_mac2asc()
54 ch = '?'; hfs_mac2asc()
60 ch = *src++; hfs_mac2asc()
63 if (ch == '/') hfs_mac2asc()
64 ch = ':'; hfs_mac2asc()
65 size = nls_io->uni2char(ch, dst, dstlen); hfs_mac2asc()
76 char ch; hfs_mac2asc() local
79 *dst++ = (ch = *src++) == '/' ? ':' : ch; hfs_mac2asc()
110 wchar_t ch; hfs_asc2mac() local
113 size = nls_io->char2uni(src, srclen, &ch); hfs_asc2mac()
115 ch = '?'; hfs_asc2mac()
120 if (ch == ':') hfs_asc2mac()
121 ch = '/'; hfs_asc2mac()
123 size = nls_disk->uni2char(ch, dst, dstlen); hfs_asc2mac()
133 *dst++ = ch > 0xff ? '?' : ch; hfs_asc2mac()
138 char ch; hfs_asc2mac() local
143 *dst++ = (ch = *src++) == ':' ? '/' : ch; hfs_asc2mac()
/linux-4.1.27/arch/nios2/boot/compressed/
H A Dconsole.c39 static void jtag_putc(int ch) jtag_putc() argument
43 writeb(ch, uartbase + ALTERA_JTAGUART_DATA_REG); jtag_putc()
46 static void jtag_putc(int ch) jtag_putc() argument
51 writeb(ch, uartbase + ALTERA_JTAGUART_DATA_REG); jtag_putc()
55 static int putchar(int ch) putchar() argument
57 jtag_putc(ch); putchar()
58 return ch; putchar()
77 static void uart_putc(int ch) uart_putc() argument
86 writeb(ch, uartbase + ALTERA_UART_TXDATA_REG); uart_putc()
89 static int putchar(int ch) putchar() argument
91 uart_putc(ch); putchar()
92 if (ch == '\n') putchar()
94 return ch; putchar()
109 static int putchar(int ch) putchar() argument
111 return ch; putchar()
/linux-4.1.27/drivers/staging/dgap/
H A Ddgap.c1360 static uint dgap_get_custom_baud(struct channel_t *ch) dgap_get_custom_baud() argument
1365 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_get_custom_baud()
1368 if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC) dgap_get_custom_baud()
1371 if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS)) dgap_get_custom_baud()
1374 vaddr = ch->ch_bd->re_map_membase; dgap_get_custom_baud()
1383 offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28) dgap_get_custom_baud()
1438 static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, dgap_parity_scan() argument
1450 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_parity_scan()
1455 switch (ch->pscan_state) { dgap_parity_scan()
1458 ch->pscan_state = 0; dgap_parity_scan()
1464 ch->pscan_state = 1; dgap_parity_scan()
1479 ch->pscan_state = 0; dgap_parity_scan()
1482 ch->pscan_savechar = c; dgap_parity_scan()
1483 ch->pscan_state = 2; dgap_parity_scan()
1492 if (ch->pscan_savechar == 0x0) { dgap_parity_scan()
1495 ch->ch_err_break++; dgap_parity_scan()
1498 ch->ch_err_parity++; dgap_parity_scan()
1504 ch->pscan_state = 0; dgap_parity_scan()
1514 * ch - Pointer to channel structure.
1518 static void dgap_input(struct channel_t *ch) dgap_input() argument
1537 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_input()
1540 tp = ch->ch_tun.un_tty; dgap_input()
1542 bs = ch->ch_bs; dgap_input()
1546 bd = ch->ch_bd; dgap_input()
1551 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_input()
1558 rmask = ch->ch_rsize - 1; dgap_input()
1569 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_input()
1580 !(ch->ch_tun.un_flags & UN_ISOPEN) || dgap_input()
1582 (ch->ch_tun.un_flags & UN_CLOSING)) { dgap_input()
1586 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_input()
1594 if (ch->ch_flags & CH_RXBLOCK) { dgap_input()
1596 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_input()
1606 ch->ch_err_overrun++; dgap_input()
1649 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_input()
1656 buf = ch->ch_bd->flipbuf; dgap_input()
1666 s = ((head >= tail) ? head : ch->ch_rsize) - tail; dgap_input()
1672 memcpy_fromio(buf, ch->ch_raddr + tail, s); dgap_input()
1684 ch->ch_rxcount += len; dgap_input()
1696 dgap_parity_scan(ch, ch->ch_bd->flipbuf, dgap_input()
1697 ch->ch_bd->flipflagbuf, &len); dgap_input()
1700 tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf, dgap_input()
1701 ch->ch_bd->flipflagbuf, len); dgap_input()
1704 tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len); dgap_input()
1707 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_input()
1718 static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch, dgap_write_wakeup() argument
1733 spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2); dgap_write_wakeup()
1739 spin_lock_irqsave(&ch->ch_lock, *irq_flags2); dgap_write_wakeup()
1749 static void dgap_carrier(struct channel_t *ch) dgap_carrier() argument
1756 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_carrier()
1759 bd = ch->ch_bd; dgap_carrier()
1765 if (ch->ch_digi.digi_flags & DIGI_ALTPIN) { dgap_carrier()
1766 ch->ch_dsr = DM_CD; dgap_carrier()
1767 ch->ch_cd = DM_DSR; dgap_carrier()
1769 ch->ch_dsr = DM_DSR; dgap_carrier()
1770 ch->ch_cd = DM_CD; dgap_carrier()
1773 if (ch->ch_mistat & D_CD(ch)) dgap_carrier()
1776 if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) dgap_carrier()
1779 if (ch->ch_c_cflag & CLOCAL) dgap_carrier()
1785 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { dgap_carrier()
1792 if (waitqueue_active(&(ch->ch_flags_wait))) dgap_carrier()
1793 wake_up_interruptible(&ch->ch_flags_wait); dgap_carrier()
1799 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { dgap_carrier()
1806 if (waitqueue_active(&(ch->ch_flags_wait))) dgap_carrier()
1807 wake_up_interruptible(&ch->ch_flags_wait); dgap_carrier()
1820 ((ch->ch_flags & CH_CD) != 0) && dgap_carrier()
1835 if (waitqueue_active(&(ch->ch_flags_wait))) dgap_carrier()
1836 wake_up_interruptible(&ch->ch_flags_wait); dgap_carrier()
1838 if (ch->ch_tun.un_open_count > 0) dgap_carrier()
1839 tty_hangup(ch->ch_tun.un_tty); dgap_carrier()
1841 if (ch->ch_pun.un_open_count > 0) dgap_carrier()
1842 tty_hangup(ch->ch_pun.un_tty); dgap_carrier()
1849 ch->ch_flags |= CH_FCAR; dgap_carrier()
1851 ch->ch_flags &= ~CH_FCAR; dgap_carrier()
1854 ch->ch_flags |= CH_CD; dgap_carrier()
1856 ch->ch_flags &= ~CH_CD; dgap_carrier()
1868 struct channel_t *ch; dgap_event() local
1936 ch = bd->channels[port]; dgap_event()
1938 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_event()
1945 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_event()
1947 bs = ch->ch_bs; dgap_event()
1950 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_event()
1964 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_event()
1967 dgap_input(ch); dgap_event()
1970 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_event()
1972 if (ch->ch_flags & CH_RACTIVE) dgap_event()
1973 ch->ch_flags |= CH_RENABLE; dgap_event()
1977 if (ch->ch_flags & CH_RWAIT) { dgap_event()
1978 ch->ch_flags &= ~CH_RWAIT; dgap_event()
1981 (&ch->ch_tun.un_flags_wait); dgap_event()
1989 ch->ch_mistat = modem; dgap_event()
1990 dgap_carrier(ch); dgap_event()
1998 if (ch->ch_tun.un_tty) { dgap_event()
2000 ch->ch_err_break++; dgap_event()
2002 (ch->ch_tun.un_tty->port, 1); dgap_event()
2003 tty_insert_flip_char(ch->ch_tun.un_tty->port, dgap_event()
2005 tty_flip_buffer_push(ch->ch_tun.un_tty->port); dgap_event()
2013 dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW, dgap_event()
2015 dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW, dgap_event()
2017 if (ch->ch_flags & CH_WLOW) { dgap_event()
2018 ch->ch_flags &= ~CH_WLOW; dgap_event()
2019 wake_up_interruptible(&ch->ch_flags_wait); dgap_event()
2027 dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY, dgap_event()
2029 dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY, dgap_event()
2031 if (ch->ch_flags & CH_WEMPTY) { dgap_event()
2032 ch->ch_flags &= ~CH_WEMPTY; dgap_event()
2033 wake_up_interruptible(&ch->ch_flags_wait); dgap_event()
2037 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_event()
2395 * ch - Pointer to channel structure.
2403 static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1, dgap_cmdb() argument
2413 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_cmdb()
2419 if (ch->ch_bd->state == BOARD_FAILED) dgap_cmdb()
2426 vaddr = ch->ch_bd->re_map_membase; dgap_cmdb()
2438 ch->ch_bd->state = BOARD_FAILED; dgap_cmdb()
2446 writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1)); dgap_cmdb()
2471 ch->ch_bd->state = BOARD_FAILED; dgap_cmdb()
2482 * ch - Pointer to channel structure.
2489 static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds) dgap_cmdw() argument
2498 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_cmdw()
2504 if (ch->ch_bd->state == BOARD_FAILED) dgap_cmdw()
2511 vaddr = ch->ch_bd->re_map_membase; dgap_cmdw()
2522 ch->ch_bd->state = BOARD_FAILED; dgap_cmdw()
2530 writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1)); dgap_cmdw()
2554 ch->ch_bd->state = BOARD_FAILED; dgap_cmdw()
2565 * ch - Pointer to channel structure.
2572 static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds) dgap_cmdw_ext() argument
2581 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_cmdw_ext()
2587 if (ch->ch_bd->state == BOARD_FAILED) dgap_cmdw_ext()
2594 vaddr = ch->ch_bd->re_map_membase; dgap_cmdw_ext()
2605 ch->ch_bd->state = BOARD_FAILED; dgap_cmdw_ext()
2616 writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1)); dgap_cmdw_ext()
2649 ch->ch_bd->state = BOARD_FAILED; dgap_cmdw_ext()
2660 * ch - Pointer to channel structure.
2665 static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt) dgap_wmove() argument
2672 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_wmove()
2678 bs = ch->ch_bs; dgap_wmove()
2684 if ((cnt > ch->ch_tsize) || dgap_wmove()
2685 (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize) dgap_wmove()
2693 n = ch->ch_tstart + ch->ch_tsize - head; dgap_wmove()
2697 taddr = ch->ch_taddr + head; dgap_wmove()
2699 head = ch->ch_tstart; dgap_wmove()
2706 taddr = ch->ch_taddr + head; dgap_wmove()
2717 static void dgap_firmware_reset_port(struct channel_t *ch) dgap_firmware_reset_port() argument
2719 dgap_cmdb(ch, CHRESET, 0, 0, 0); dgap_firmware_reset_port()
2729 ch->ch_fepiflag = 0; dgap_firmware_reset_port()
2730 ch->ch_fepcflag = 0; dgap_firmware_reset_port()
2731 ch->ch_fepoflag = 0; dgap_firmware_reset_port()
2732 ch->ch_fepstartc = 0; dgap_firmware_reset_port()
2733 ch->ch_fepstopc = 0; dgap_firmware_reset_port()
2734 ch->ch_fepastartc = 0; dgap_firmware_reset_port()
2735 ch->ch_fepastopc = 0; dgap_firmware_reset_port()
2736 ch->ch_mostat = 0; dgap_firmware_reset_port()
2737 ch->ch_hflow = 0; dgap_firmware_reset_port()
2747 static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type) dgap_param() argument
2758 if ((ch->ch_c_cflag & (CBAUD)) == 0) { dgap_param()
2761 head = readw(&(ch->ch_bs->rx_head)); dgap_param()
2762 writew(head, &(ch->ch_bs->rx_tail)); dgap_param()
2765 head = readw(&(ch->ch_bs->tx_head)); dgap_param()
2766 writew(head, &(ch->ch_bs->tx_tail)); dgap_param()
2768 ch->ch_flags |= (CH_BAUD0); dgap_param()
2771 ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch)); dgap_param()
2772 mval = D_DTR(ch) | D_RTS(ch); dgap_param()
2773 ch->ch_baud_info = 0; dgap_param()
2775 } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) { dgap_param()
2780 dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0); dgap_param()
2786 ch->ch_custom_speed = dgap_get_custom_baud(ch); dgap_param()
2787 ch->ch_baud_info = ch->ch_custom_speed; dgap_param()
2790 if (ch->ch_flags & CH_BAUD0) { dgap_param()
2791 ch->ch_flags &= ~(CH_BAUD0); dgap_param()
2792 ch->ch_mval |= (D_RTS(ch)|D_DTR(ch)); dgap_param()
2794 mval = D_DTR(ch) | D_RTS(ch); dgap_param()
2833 if (!(ch->ch_tun.un_flags & UN_ISOPEN) && dgap_param()
2835 baud = C_BAUD(ch->ch_pun.un_tty) & 0xff; dgap_param()
2837 baud = C_BAUD(ch->ch_tun.un_tty) & 0xff; dgap_param()
2839 if (ch->ch_c_cflag & CBAUDEX) dgap_param()
2842 if (ch->ch_digi.digi_flags & DIGI_FAST) dgap_param()
2856 ch->ch_baud_info = baud; dgap_param()
2865 cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | dgap_param()
2872 if ((ch->ch_digi.digi_flags & DIGI_FAST) || dgap_param()
2873 (ch->ch_c_cflag & CBAUDEX)) dgap_param()
2876 if ((ch->ch_c_cflag & CBAUDEX) && dgap_param()
2877 !(ch->ch_digi.digi_flags & DIGI_FAST)) { dgap_param()
2885 tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX; dgap_param()
2925 if (cflag != ch->ch_fepcflag) { dgap_param()
2926 ch->ch_fepcflag = (u16) (cflag & 0xffff); dgap_param()
2932 dgap_cmdw(ch, SCFLAG, (u16) cflag, 0); dgap_param()
2936 if (ch->ch_flags & CH_BAUD0) { dgap_param()
2937 ch->ch_flags &= ~(CH_BAUD0); dgap_param()
2938 ch->ch_mval |= (D_RTS(ch)|D_DTR(ch)); dgap_param()
2940 mval = D_DTR(ch) | D_RTS(ch); dgap_param()
2946 iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | dgap_param()
2949 if ((ch->ch_startc == _POSIX_VDISABLE) || dgap_param()
2950 (ch->ch_stopc == _POSIX_VDISABLE)) { dgap_param()
2952 ch->ch_c_iflag &= ~(IXON | IXOFF); dgap_param()
2960 if (ch->ch_digi.digi_flags & DIGI_422) dgap_param()
2961 dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0); dgap_param()
2963 dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0); dgap_param()
2966 if (ch->ch_digi.digi_flags & DIGI_ALTPIN) dgap_param()
2969 if (iflag != ch->ch_fepiflag) { dgap_param()
2970 ch->ch_fepiflag = iflag; dgap_param()
2973 dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0); dgap_param()
2981 if (ch->ch_c_cflag & CRTSCTS) dgap_param()
2982 hflow |= (D_RTS(ch) | D_CTS(ch)); dgap_param()
2983 if (ch->ch_digi.digi_flags & RTSPACE) dgap_param()
2984 hflow |= D_RTS(ch); dgap_param()
2985 if (ch->ch_digi.digi_flags & DTRPACE) dgap_param()
2986 hflow |= D_DTR(ch); dgap_param()
2987 if (ch->ch_digi.digi_flags & CTSPACE) dgap_param()
2988 hflow |= D_CTS(ch); dgap_param()
2989 if (ch->ch_digi.digi_flags & DSRPACE) dgap_param()
2990 hflow |= D_DSR(ch); dgap_param()
2991 if (ch->ch_digi.digi_flags & DCDPACE) dgap_param()
2992 hflow |= D_CD(ch); dgap_param()
2994 if (hflow != ch->ch_hflow) { dgap_param()
2995 ch->ch_hflow = hflow; dgap_param()
2998 dgap_cmdb(ch, SHFLOW, (u8) hflow, 0xff, 0); dgap_param()
3008 if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) dgap_param()
3009 hflow2 |= (D_RTS(ch)); dgap_param()
3010 if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) dgap_param()
3011 hflow2 |= (D_DTR(ch)); dgap_param()
3013 dgap_cmdw_ext(ch, 0xff03, hflow2, 0); dgap_param()
3020 mval ^= ch->ch_mforce & (mval ^ ch->ch_mval); dgap_param()
3022 if (ch->ch_mostat ^ mval) { dgap_param()
3023 ch->ch_mostat = mval; dgap_param()
3026 dgap_cmdb(ch, SMODEM, (u8) mval, D_RTS(ch)|D_DTR(ch), 0); dgap_param()
3032 ch->ch_mistat = readb(&(ch->ch_bs->m_stat)); dgap_param()
3033 dgap_carrier(ch); dgap_param()
3038 if (ch->ch_startc != ch->ch_fepstartc || dgap_param()
3039 ch->ch_stopc != ch->ch_fepstopc) { dgap_param()
3040 ch->ch_fepstartc = ch->ch_startc; dgap_param()
3041 ch->ch_fepstopc = ch->ch_stopc; dgap_param()
3044 dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0); dgap_param()
3050 if (ch->ch_astartc != ch->ch_fepastartc || dgap_param()
3051 ch->ch_astopc != ch->ch_fepastopc) { dgap_param()
3052 ch->ch_fepastartc = ch->ch_astartc; dgap_param()
3053 ch->ch_fepastopc = ch->ch_astopc; dgap_param()
3056 dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0); dgap_param()
3068 struct channel_t *ch) dgap_block_til_ready()
3076 if (!tty || tty->magic != TTY_MAGIC || !file || !ch || dgap_block_til_ready()
3077 ch->magic != DGAP_CHANNEL_MAGIC) dgap_block_til_ready()
3084 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_block_til_ready()
3086 ch->ch_wopen++; dgap_block_til_ready()
3097 if (ch->ch_bd->state == BOARD_FAILED) { dgap_block_til_ready()
3115 if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & dgap_block_til_ready()
3131 if (ch->ch_flags & CH_CD) dgap_block_til_ready()
3134 if (ch->ch_flags & CH_FCAR) dgap_block_til_ready()
3154 old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags; dgap_block_til_ready()
3156 old_flags = ch->ch_flags; dgap_block_til_ready()
3164 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_block_til_ready()
3172 (old_flags != (ch->ch_tun.un_flags | dgap_block_til_ready()
3173 ch->ch_pun.un_flags))); dgap_block_til_ready()
3175 retval = wait_event_interruptible(ch->ch_flags_wait, dgap_block_til_ready()
3176 (old_flags != ch->ch_flags)); dgap_block_til_ready()
3183 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_block_til_ready()
3186 ch->ch_wopen--; dgap_block_til_ready()
3188 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_block_til_ready()
3201 struct channel_t *ch; dgap_tty_flush_buffer() local
3214 ch = un->un_ch; dgap_tty_flush_buffer()
3215 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_flush_buffer()
3218 bd = ch->ch_bd; dgap_tty_flush_buffer()
3223 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_flush_buffer()
3225 ch->ch_flags &= ~CH_STOP; dgap_tty_flush_buffer()
3226 head = readw(&(ch->ch_bs->tx_head)); dgap_tty_flush_buffer()
3227 dgap_cmdw(ch, FLUSHTX, (u16) head, 0); dgap_tty_flush_buffer()
3228 dgap_cmdw(ch, RESUMETX, 0, 0); dgap_tty_flush_buffer()
3229 if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { dgap_tty_flush_buffer()
3230 ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY); dgap_tty_flush_buffer()
3231 wake_up_interruptible(&ch->ch_tun.un_flags_wait); dgap_tty_flush_buffer()
3233 if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { dgap_tty_flush_buffer()
3234 ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY); dgap_tty_flush_buffer()
3235 wake_up_interruptible(&ch->ch_pun.un_flags_wait); dgap_tty_flush_buffer()
3238 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_flush_buffer()
3253 struct channel_t *ch; dgap_tty_hangup() local
3263 ch = un->un_ch; dgap_tty_hangup()
3264 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_hangup()
3267 bd = ch->ch_bd; dgap_tty_hangup()
3286 struct channel_t *ch; dgap_tty_chars_in_buffer() local
3302 ch = un->un_ch; dgap_tty_chars_in_buffer()
3303 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_chars_in_buffer()
3306 bd = ch->ch_bd; dgap_tty_chars_in_buffer()
3310 bs = ch->ch_bs; dgap_tty_chars_in_buffer()
3315 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_chars_in_buffer()
3317 tmask = (ch->ch_tsize - 1); dgap_tty_chars_in_buffer()
3327 chead = readw(&(ch->ch_cm->cm_head)); dgap_tty_chars_in_buffer()
3328 ctail = readw(&(ch->ch_cm->cm_tail)); dgap_tty_chars_in_buffer()
3330 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_chars_in_buffer()
3347 chars = thead - ttail + ch->ch_tsize; dgap_tty_chars_in_buffer()
3362 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_chars_in_buffer()
3365 spin_unlock_irqrestore(&ch->ch_lock, dgap_tty_chars_in_buffer()
3377 struct channel_t *ch; dgap_wait_for_drain() local
3391 ch = un->un_ch; dgap_wait_for_drain()
3392 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_wait_for_drain()
3395 bs = ch->ch_bs; dgap_wait_for_drain()
3408 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_wait_for_drain()
3411 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_wait_for_drain()
3421 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_wait_for_drain()
3423 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_wait_for_drain()
3436 static int dgap_maxcps_room(struct channel_t *ch, struct un_t *un, dgap_maxcps_room() argument
3446 if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) { dgap_maxcps_room()
3450 (HZ * ch->ch_digi.digi_bufsize) / dgap_maxcps_room()
3451 ch->ch_digi.digi_maxcps; dgap_maxcps_room()
3453 if (ch->ch_cpstime < current_time) { dgap_maxcps_room()
3455 ch->ch_cpstime = current_time; /* reset ch_cpstime */ dgap_maxcps_room()
3456 cps_limit = ch->ch_digi.digi_bufsize; dgap_maxcps_room()
3457 } else if (ch->ch_cpstime < buffer_time) { dgap_maxcps_room()
3459 cps_limit = ((buffer_time - ch->ch_cpstime) * dgap_maxcps_room()
3460 ch->ch_digi.digi_maxcps) / HZ; dgap_maxcps_room()
3474 struct channel_t *ch; dgap_set_firmware_event() local
3479 ch = un->un_ch; dgap_set_firmware_event()
3480 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_set_firmware_event()
3482 bs = ch->ch_bs; dgap_set_firmware_event()
3507 struct channel_t *ch; dgap_tty_write_room() local
3521 ch = un->un_ch; dgap_tty_write_room()
3522 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_write_room()
3525 bs = ch->ch_bs; dgap_tty_write_room()
3529 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_write_room()
3531 tmask = ch->ch_tsize - 1; dgap_tty_write_room()
3537 ret += ch->ch_tsize; dgap_tty_write_room()
3540 ret = dgap_maxcps_room(ch, un, ret); dgap_tty_write_room()
3547 if (!(ch->ch_flags & CH_PRON)) dgap_tty_write_room()
3548 ret -= ch->ch_digi.digi_onlen; dgap_tty_write_room()
3549 ret -= ch->ch_digi.digi_offlen; dgap_tty_write_room()
3551 if (ch->ch_flags & CH_PRON) dgap_tty_write_room()
3552 ret -= ch->ch_digi.digi_offlen; dgap_tty_write_room()
3566 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_write_room()
3580 struct channel_t *ch; dgap_tty_write() local
3595 ch = un->un_ch; dgap_tty_write()
3596 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_write()
3599 bs = ch->ch_bs; dgap_tty_write()
3606 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_write()
3609 tmask = ch->ch_tsize - 1; dgap_tty_write()
3615 bufcount += ch->ch_tsize; dgap_tty_write()
3621 bufcount = dgap_maxcps_room(ch, un, bufcount); dgap_tty_write()
3634 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_write()
3642 if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) { dgap_tty_write()
3643 dgap_wmove(ch, ch->ch_digi.digi_onstr, dgap_tty_write()
3644 (int) ch->ch_digi.digi_onlen); dgap_tty_write()
3646 ch->ch_flags |= CH_PRON; dgap_tty_write()
3653 if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) { dgap_tty_write()
3654 dgap_wmove(ch, ch->ch_digi.digi_offstr, dgap_tty_write()
3655 (int) ch->ch_digi.digi_offlen); dgap_tty_write()
3657 ch->ch_flags &= ~CH_PRON; dgap_tty_write()
3667 remain = ch->ch_tstart + ch->ch_tsize - head; dgap_tty_write()
3671 vaddr = ch->ch_taddr + head; dgap_tty_write()
3675 head = ch->ch_tstart; dgap_tty_write()
3684 vaddr = ch->ch_taddr + head; dgap_tty_write()
3693 ch->ch_txcount += count; dgap_tty_write()
3707 if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) { dgap_tty_write()
3714 dgap_wmove(ch, ch->ch_digi.digi_offstr, dgap_tty_write()
3715 (int) ch->ch_digi.digi_offlen); dgap_tty_write()
3717 ch->ch_flags &= ~CH_PRON; dgap_tty_write()
3722 if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0) dgap_tty_write()
3723 && (ch->ch_digi.digi_bufsize > 0)) { dgap_tty_write()
3724 ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps; dgap_tty_write()
3727 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_write()
3735 * Put a character into ch->ch_buf
3753 struct channel_t *ch; dgap_tty_tiocmget() local
3766 ch = un->un_ch; dgap_tty_tiocmget()
3767 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_tiocmget()
3770 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_tiocmget()
3772 mstat = readb(&(ch->ch_bs->m_stat)); dgap_tty_tiocmget()
3774 mstat |= ch->ch_mostat; dgap_tty_tiocmget()
3776 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_tiocmget()
3780 if (mstat & D_DTR(ch)) dgap_tty_tiocmget()
3782 if (mstat & D_RTS(ch)) dgap_tty_tiocmget()
3784 if (mstat & D_CTS(ch)) dgap_tty_tiocmget()
3786 if (mstat & D_DSR(ch)) dgap_tty_tiocmget()
3788 if (mstat & D_RI(ch)) dgap_tty_tiocmget()
3790 if (mstat & D_CD(ch)) dgap_tty_tiocmget()
3805 struct channel_t *ch; dgap_tty_tiocmset() local
3817 ch = un->un_ch; dgap_tty_tiocmset()
3818 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_tiocmset()
3821 bd = ch->ch_bd; dgap_tty_tiocmset()
3826 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_tiocmset()
3829 ch->ch_mforce |= D_RTS(ch); dgap_tty_tiocmset()
3830 ch->ch_mval |= D_RTS(ch); dgap_tty_tiocmset()
3834 ch->ch_mforce |= D_DTR(ch); dgap_tty_tiocmset()
3835 ch->ch_mval |= D_DTR(ch); dgap_tty_tiocmset()
3839 ch->ch_mforce |= D_RTS(ch); dgap_tty_tiocmset()
3840 ch->ch_mval &= ~(D_RTS(ch)); dgap_tty_tiocmset()
3844 ch->ch_mforce |= D_DTR(ch); dgap_tty_tiocmset()
3845 ch->ch_mval &= ~(D_DTR(ch)); dgap_tty_tiocmset()
3848 dgap_param(ch, bd, un->un_type); dgap_tty_tiocmset()
3850 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_tiocmset()
3864 struct channel_t *ch; dgap_tty_send_break() local
3876 ch = un->un_ch; dgap_tty_send_break()
3877 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_send_break()
3880 bd = ch->ch_bd; dgap_tty_send_break()
3897 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_send_break()
3899 dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0); dgap_tty_send_break()
3901 dgap_cmdw(ch, SBREAK, (u16) msec, 0); dgap_tty_send_break()
3903 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_send_break()
3927 struct channel_t *ch; dgap_tty_send_xchar() local
3939 ch = un->un_ch; dgap_tty_send_xchar()
3940 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_send_xchar()
3943 bd = ch->ch_bd; dgap_tty_send_xchar()
3948 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_send_xchar()
3959 dgap_cmdw(ch, RPAUSE, 0, 0); dgap_tty_send_xchar()
3961 dgap_cmdw(ch, RRESUME, 0, 0); dgap_tty_send_xchar()
3963 dgap_wmove(ch, &c, 1); dgap_tty_send_xchar()
3965 dgap_wmove(ch, &c, 1); dgap_tty_send_xchar()
3968 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_send_xchar()
3975 static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value) dgap_get_modem_info() argument
3981 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_get_modem_info()
3983 mstat = readb(&(ch->ch_bs->m_stat)); dgap_get_modem_info()
3985 mstat |= ch->ch_mostat; dgap_get_modem_info()
3987 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_get_modem_info()
3991 if (mstat & D_DTR(ch)) dgap_get_modem_info()
3993 if (mstat & D_RTS(ch)) dgap_get_modem_info()
3995 if (mstat & D_CTS(ch)) dgap_get_modem_info()
3997 if (mstat & D_DSR(ch)) dgap_get_modem_info()
3999 if (mstat & D_RI(ch)) dgap_get_modem_info()
4001 if (mstat & D_CD(ch)) dgap_get_modem_info()
4012 static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd, dgap_set_modem_info() argument
4028 ch->ch_mforce |= D_RTS(ch); dgap_set_modem_info()
4029 ch->ch_mval |= D_RTS(ch); dgap_set_modem_info()
4033 ch->ch_mforce |= D_DTR(ch); dgap_set_modem_info()
4034 ch->ch_mval |= D_DTR(ch); dgap_set_modem_info()
4041 ch->ch_mforce |= D_RTS(ch); dgap_set_modem_info()
4042 ch->ch_mval &= ~(D_RTS(ch)); dgap_set_modem_info()
4046 ch->ch_mforce |= D_DTR(ch); dgap_set_modem_info()
4047 ch->ch_mval &= ~(D_DTR(ch)); dgap_set_modem_info()
4053 ch->ch_mforce = D_DTR(ch)|D_RTS(ch); dgap_set_modem_info()
4056 ch->ch_mval |= D_RTS(ch); dgap_set_modem_info()
4058 ch->ch_mval &= ~(D_RTS(ch)); dgap_set_modem_info()
4061 ch->ch_mval |= (D_DTR(ch)); dgap_set_modem_info()
4063 ch->ch_mval &= ~(D_DTR(ch)); dgap_set_modem_info()
4072 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_set_modem_info()
4074 dgap_param(ch, bd, un->un_type); dgap_set_modem_info()
4076 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_set_modem_info()
4090 static int dgap_tty_digigeta(struct channel_t *ch, dgap_tty_digigeta() argument
4101 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_digigeta()
4102 memcpy(&tmp, &ch->ch_digi, sizeof(tmp)); dgap_tty_digigeta()
4103 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_digigeta()
4119 static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd, dgap_tty_digiseta() argument
4130 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_digiseta()
4132 memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t)); dgap_tty_digiseta()
4134 if (ch->ch_digi.digi_maxcps < 1) dgap_tty_digiseta()
4135 ch->ch_digi.digi_maxcps = 1; dgap_tty_digiseta()
4137 if (ch->ch_digi.digi_maxcps > 10000) dgap_tty_digiseta()
4138 ch->ch_digi.digi_maxcps = 10000; dgap_tty_digiseta()
4140 if (ch->ch_digi.digi_bufsize < 10) dgap_tty_digiseta()
4141 ch->ch_digi.digi_bufsize = 10; dgap_tty_digiseta()
4143 if (ch->ch_digi.digi_maxchar < 1) dgap_tty_digiseta()
4144 ch->ch_digi.digi_maxchar = 1; dgap_tty_digiseta()
4146 if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize) dgap_tty_digiseta()
4147 ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize; dgap_tty_digiseta()
4149 if (ch->ch_digi.digi_onlen > DIGI_PLEN) dgap_tty_digiseta()
4150 ch->ch_digi.digi_onlen = DIGI_PLEN; dgap_tty_digiseta()
4152 if (ch->ch_digi.digi_offlen > DIGI_PLEN) dgap_tty_digiseta()
4153 ch->ch_digi.digi_offlen = DIGI_PLEN; dgap_tty_digiseta()
4155 dgap_param(ch, bd, un->un_type); dgap_tty_digiseta()
4157 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_digiseta()
4173 struct channel_t *ch; dgap_tty_digigetedelay() local
4188 ch = un->un_ch; dgap_tty_digigetedelay()
4189 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_digigetedelay()
4194 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_digigetedelay()
4195 tmp = readw(&(ch->ch_bs->edelay)); dgap_tty_digigetedelay()
4196 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_digigetedelay()
4210 static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd, dgap_tty_digisetedelay() argument
4221 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_digisetedelay()
4223 writew((u16) new_digi, &(ch->ch_bs->edelay)); dgap_tty_digisetedelay()
4225 dgap_param(ch, bd, un->un_type); dgap_tty_digisetedelay()
4227 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_digisetedelay()
4238 static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un, dgap_tty_digigetcustombaud() argument
4249 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_digigetcustombaud()
4250 tmp = dgap_get_custom_baud(ch); dgap_tty_digigetcustombaud()
4251 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_digigetcustombaud()
4264 static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd, dgap_tty_digisetcustombaud() argument
4277 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_digisetcustombaud()
4279 ch->ch_custom_speed = new_rate; dgap_tty_digisetcustombaud()
4281 dgap_param(ch, bd, un->un_type); dgap_tty_digisetcustombaud()
4283 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_digisetcustombaud()
4297 struct channel_t *ch; dgap_tty_set_termios() local
4309 ch = un->un_ch; dgap_tty_set_termios()
4310 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_set_termios()
4313 bd = ch->ch_bd; dgap_tty_set_termios()
4318 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_set_termios()
4320 ch->ch_c_cflag = tty->termios.c_cflag; dgap_tty_set_termios()
4321 ch->ch_c_iflag = tty->termios.c_iflag; dgap_tty_set_termios()
4322 ch->ch_c_oflag = tty->termios.c_oflag; dgap_tty_set_termios()
4323 ch->ch_c_lflag = tty->termios.c_lflag; dgap_tty_set_termios()
4324 ch->ch_startc = tty->termios.c_cc[VSTART]; dgap_tty_set_termios()
4325 ch->ch_stopc = tty->termios.c_cc[VSTOP]; dgap_tty_set_termios()
4327 dgap_carrier(ch); dgap_tty_set_termios()
4328 dgap_param(ch, bd, un->un_type); dgap_tty_set_termios()
4330 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_set_termios()
4337 struct channel_t *ch; dgap_tty_throttle() local
4349 ch = un->un_ch; dgap_tty_throttle()
4350 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_throttle()
4353 bd = ch->ch_bd; dgap_tty_throttle()
4358 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_throttle()
4360 ch->ch_flags |= (CH_RXBLOCK); dgap_tty_throttle()
4362 dgap_cmdw(ch, RPAUSE, 0, 0); dgap_tty_throttle()
4365 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_throttle()
4373 struct channel_t *ch; dgap_tty_unthrottle() local
4385 ch = un->un_ch; dgap_tty_unthrottle()
4386 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_unthrottle()
4389 bd = ch->ch_bd; dgap_tty_unthrottle()
4394 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_unthrottle()
4396 ch->ch_flags &= ~(CH_RXBLOCK); dgap_tty_unthrottle()
4399 dgap_cmdw(ch, RRESUME, 0, 0); dgap_tty_unthrottle()
4402 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_unthrottle()
4436 struct channel_t *ch; dgap_tty_open() local
4477 ch = brd->channels[minor]; dgap_tty_open()
4478 if (!ch) { dgap_tty_open()
4484 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_open()
4494 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_open()
4505 bs = ch->ch_bs; dgap_tty_open()
4507 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_open()
4525 if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) { dgap_tty_open()
4527 ch->ch_mforce = 0; dgap_tty_open()
4528 ch->ch_mval = 0; dgap_tty_open()
4536 ch->ch_flags = 0; dgap_tty_open()
4537 ch->pscan_state = 0; dgap_tty_open()
4538 ch->pscan_savechar = 0; dgap_tty_open()
4540 ch->ch_c_cflag = tty->termios.c_cflag; dgap_tty_open()
4541 ch->ch_c_iflag = tty->termios.c_iflag; dgap_tty_open()
4542 ch->ch_c_oflag = tty->termios.c_oflag; dgap_tty_open()
4543 ch->ch_c_lflag = tty->termios.c_lflag; dgap_tty_open()
4544 ch->ch_startc = tty->termios.c_cc[VSTART]; dgap_tty_open()
4545 ch->ch_stopc = tty->termios.c_cc[VSTOP]; dgap_tty_open()
4550 dgap_carrier(ch); dgap_tty_open()
4554 dgap_param(ch, brd, un->un_type); dgap_tty_open()
4560 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_open()
4563 rc = dgap_block_til_ready(tty, file, ch); dgap_tty_open()
4569 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_open()
4570 ch->ch_open_count++; dgap_tty_open()
4573 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_open()
4586 struct channel_t *ch; dgap_tty_close() local
4597 ch = un->un_ch; dgap_tty_close()
4598 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_close()
4601 bd = ch->ch_bd; dgap_tty_close()
4607 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_close()
4627 ch->ch_open_count--; dgap_tty_close()
4629 if (ch->ch_open_count && un->un_open_count) { dgap_tty_close()
4630 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_close()
4644 if ((ch->ch_open_count == 0) && dgap_tty_close()
4645 !(ch->ch_digi.digi_flags & DIGI_PRINTER)) { dgap_tty_close()
4647 ch->ch_flags &= ~(CH_RXBLOCK); dgap_tty_close()
4649 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_close()
4659 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_close()
4666 if (ch->ch_c_cflag & HUPCL) { dgap_tty_close()
4667 ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch)); dgap_tty_close()
4668 dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0); dgap_tty_close()
4674 spin_unlock_irqrestore(&ch->ch_lock, dgap_tty_close()
4680 spin_lock_irqsave(&ch->ch_lock, lock_flags); dgap_tty_close()
4683 ch->pscan_state = 0; dgap_tty_close()
4684 ch->pscan_savechar = 0; dgap_tty_close()
4685 ch->ch_baud_info = 0; dgap_tty_close()
4692 if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) { dgap_tty_close()
4693 dgap_wmove(ch, ch->ch_digi.digi_offstr, dgap_tty_close()
4694 (int) ch->ch_digi.digi_offlen); dgap_tty_close()
4695 ch->ch_flags &= ~CH_PRON; dgap_tty_close()
4702 wake_up_interruptible(&ch->ch_flags_wait); dgap_tty_close()
4705 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); dgap_tty_close()
4711 struct channel_t *ch; dgap_tty_start() local
4723 ch = un->un_ch; dgap_tty_start()
4724 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_start()
4727 bd = ch->ch_bd; dgap_tty_start()
4732 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_start()
4734 dgap_cmdw(ch, RESUMETX, 0, 0); dgap_tty_start()
4736 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_start()
4743 struct channel_t *ch; dgap_tty_stop() local
4755 ch = un->un_ch; dgap_tty_stop()
4756 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_stop()
4759 bd = ch->ch_bd; dgap_tty_stop()
4764 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_stop()
4766 dgap_cmdw(ch, PAUSETX, 0, 0); dgap_tty_stop()
4768 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_stop()
4788 struct channel_t *ch; dgap_tty_flush_chars() local
4800 ch = un->un_ch; dgap_tty_flush_chars()
4801 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_flush_chars()
4804 bd = ch->ch_bd; dgap_tty_flush_chars()
4809 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_flush_chars()
4813 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_flush_chars()
4832 struct channel_t *ch; dgap_tty_ioctl() local
4847 ch = un->un_ch; dgap_tty_ioctl()
4848 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_ioctl()
4851 bd = ch->ch_bd; dgap_tty_ioctl()
4856 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4859 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4878 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4889 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4892 dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0); dgap_tty_ioctl()
4894 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4907 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4917 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4919 dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0); dgap_tty_ioctl()
4921 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4934 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4944 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4946 dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0); dgap_tty_ioctl()
4948 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4960 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4966 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4974 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4982 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4985 dgap_param(ch, bd, un->un_type); dgap_tty_ioctl()
4986 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4992 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
4994 return dgap_get_modem_info(ch, uarg); dgap_tty_ioctl()
4999 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5001 return dgap_set_modem_info(ch, bd, un, cmd, uarg); dgap_tty_ioctl()
5019 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5026 head = readw(&(ch->ch_bs->rx_head)); dgap_tty_ioctl()
5027 writew(head, &(ch->ch_bs->rx_tail)); dgap_tty_ioctl()
5028 writeb(0, &(ch->ch_bs->orun)); dgap_tty_ioctl()
5034 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5040 ch->ch_flags &= ~CH_STOP; dgap_tty_ioctl()
5041 head = readw(&(ch->ch_bs->tx_head)); dgap_tty_ioctl()
5042 dgap_cmdw(ch, FLUSHTX, (u16) head, 0); dgap_tty_ioctl()
5043 dgap_cmdw(ch, RESUMETX, 0, 0); dgap_tty_ioctl()
5044 if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) { dgap_tty_ioctl()
5045 ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY); dgap_tty_ioctl()
5046 wake_up_interruptible(&ch->ch_tun.un_flags_wait); dgap_tty_ioctl()
5048 if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) { dgap_tty_ioctl()
5049 ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY); dgap_tty_ioctl()
5050 wake_up_interruptible(&ch->ch_pun.un_flags_wait); dgap_tty_ioctl()
5056 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5076 ch->ch_flags &= ~CH_STOP; dgap_tty_ioctl()
5077 head = readw(&(ch->ch_bs->rx_head)); dgap_tty_ioctl()
5078 writew(head, &(ch->ch_bs->rx_tail)); dgap_tty_ioctl()
5082 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5093 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5112 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5120 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5125 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5130 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5135 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5140 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5147 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5149 return dgap_tty_digigeta(ch, uarg); dgap_tty_ioctl()
5157 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5163 spin_lock_irqsave(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5169 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5171 return dgap_tty_digiseta(ch, bd, un, uarg); dgap_tty_ioctl()
5174 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5179 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5181 return dgap_tty_digisetedelay(ch, bd, un, uarg); dgap_tty_ioctl()
5184 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5186 return dgap_tty_digigetcustombaud(ch, un, uarg); dgap_tty_ioctl()
5189 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5191 return dgap_tty_digisetcustombaud(ch, bd, un, uarg); dgap_tty_ioctl()
5194 dgap_firmware_reset_port(ch); dgap_tty_ioctl()
5195 dgap_param(ch, bd, un->un_type); dgap_tty_ioctl()
5196 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5201 spin_unlock_irqrestore(&ch->ch_lock, lock_flags2); dgap_tty_ioctl()
5593 struct channel_t *ch; dgap_tty_state_show() local
5601 ch = un->un_ch; dgap_tty_state_show()
5602 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_state_show()
5604 bd = ch->ch_bd; dgap_tty_state_show()
5620 struct channel_t *ch; dgap_tty_baud_show() local
5628 ch = un->un_ch; dgap_tty_baud_show()
5629 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_baud_show()
5631 bd = ch->ch_bd; dgap_tty_baud_show()
5637 return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info); dgap_tty_baud_show()
5646 struct channel_t *ch; dgap_tty_msignals_show() local
5654 ch = un->un_ch; dgap_tty_msignals_show()
5655 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_msignals_show()
5657 bd = ch->ch_bd; dgap_tty_msignals_show()
5663 if (ch->ch_open_count) { dgap_tty_msignals_show()
5665 (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "", dgap_tty_msignals_show()
5666 (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "", dgap_tty_msignals_show()
5667 (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "", dgap_tty_msignals_show()
5668 (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "", dgap_tty_msignals_show()
5669 (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "", dgap_tty_msignals_show()
5670 (ch->ch_mistat & UART_MSR_RI) ? "RI" : ""); dgap_tty_msignals_show()
5681 struct channel_t *ch; dgap_tty_iflag_show() local
5689 ch = un->un_ch; dgap_tty_iflag_show()
5690 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_iflag_show()
5692 bd = ch->ch_bd; dgap_tty_iflag_show()
5698 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag); dgap_tty_iflag_show()
5707 struct channel_t *ch; dgap_tty_cflag_show() local
5715 ch = un->un_ch; dgap_tty_cflag_show()
5716 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_cflag_show()
5718 bd = ch->ch_bd; dgap_tty_cflag_show()
5724 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag); dgap_tty_cflag_show()
5733 struct channel_t *ch; dgap_tty_oflag_show() local
5741 ch = un->un_ch; dgap_tty_oflag_show()
5742 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_oflag_show()
5744 bd = ch->ch_bd; dgap_tty_oflag_show()
5750 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag); dgap_tty_oflag_show()
5759 struct channel_t *ch; dgap_tty_lflag_show() local
5767 ch = un->un_ch; dgap_tty_lflag_show()
5768 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_lflag_show()
5770 bd = ch->ch_bd; dgap_tty_lflag_show()
5776 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag); dgap_tty_lflag_show()
5785 struct channel_t *ch; dgap_tty_digi_flag_show() local
5793 ch = un->un_ch; dgap_tty_digi_flag_show()
5794 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_digi_flag_show()
5796 bd = ch->ch_bd; dgap_tty_digi_flag_show()
5802 return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags); dgap_tty_digi_flag_show()
5811 struct channel_t *ch; dgap_tty_rxcount_show() local
5819 ch = un->un_ch; dgap_tty_rxcount_show()
5820 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_rxcount_show()
5822 bd = ch->ch_bd; dgap_tty_rxcount_show()
5828 return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount); dgap_tty_rxcount_show()
5837 struct channel_t *ch; dgap_tty_txcount_show() local
5845 ch = un->un_ch; dgap_tty_txcount_show()
5846 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_txcount_show()
5848 bd = ch->ch_bd; dgap_tty_txcount_show()
5854 return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount); dgap_tty_txcount_show()
5863 struct channel_t *ch; dgap_tty_name_show() local
5878 ch = un->un_ch; dgap_tty_name_show()
5879 if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) dgap_tty_name_show()
5881 bd = ch->ch_bd; dgap_tty_name_show()
5888 cn = ch->ch_portnum; dgap_tty_name_show()
6471 struct channel_t *ch; dgap_tty_register_ports() local
6492 ch = brd->channels[0]; dgap_tty_register_ports()
6493 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { dgap_tty_register_ports()
6506 dgap_create_tty_sysfs(&ch->ch_tun, classp); dgap_tty_register_ports()
6507 ch->ch_tun.un_sysfs = classp; dgap_tty_register_ports()
6518 dgap_create_tty_sysfs(&ch->ch_pun, classp); dgap_tty_register_ports()
6519 ch->ch_pun.un_sysfs = classp; dgap_tty_register_ports()
6527 ch = brd->channels[i]; dgap_tty_register_ports()
6528 if (ch->ch_tun.un_sysfs) { dgap_tty_register_ports()
6529 dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs); dgap_tty_register_ports()
6533 if (ch->ch_pun.un_sysfs) { dgap_tty_register_ports()
6534 dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs); dgap_tty_register_ports()
6756 struct channel_t *ch; dgap_tty_init() local
6812 ch = brd->channels[0]; dgap_tty_init()
6821 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) { dgap_tty_init()
6823 spin_lock_init(&ch->ch_lock); dgap_tty_init()
6826 ch->magic = DGAP_CHANNEL_MAGIC; dgap_tty_init()
6827 ch->ch_tun.magic = DGAP_UNIT_MAGIC; dgap_tty_init()
6828 ch->ch_tun.un_type = DGAP_SERIAL; dgap_tty_init()
6829 ch->ch_tun.un_ch = ch; dgap_tty_init()
6830 ch->ch_tun.un_dev = i; dgap_tty_init()
6832 ch->ch_pun.magic = DGAP_UNIT_MAGIC; dgap_tty_init()
6833 ch->ch_pun.un_type = DGAP_PRINT; dgap_tty_init()
6834 ch->ch_pun.un_ch = ch; dgap_tty_init()
6835 ch->ch_pun.un_dev = i; dgap_tty_init()
6837 ch->ch_vaddr = vaddr; dgap_tty_init()
6838 ch->ch_bs = bs; dgap_tty_init()
6839 ch->ch_cm = cm; dgap_tty_init()
6840 ch->ch_bd = brd; dgap_tty_init()
6841 ch->ch_portnum = i; dgap_tty_init()
6842 ch->ch_digi = dgap_digi_init; dgap_tty_init()
6848 ch->ch_dsr = DM_CD; dgap_tty_init()
6849 ch->ch_cd = DM_DSR; dgap_tty_init()
6850 ch->ch_digi.digi_flags |= DIGI_ALTPIN; dgap_tty_init()
6852 ch->ch_cd = DM_CD; dgap_tty_init()
6853 ch->ch_dsr = DM_DSR; dgap_tty_init()
6856 ch->ch_taddr = vaddr + (ioread16(&(ch->ch_bs->tx_seg)) << 4); dgap_tty_init()
6857 ch->ch_raddr = vaddr + (ioread16(&(ch->ch_bs->rx_seg)) << 4); dgap_tty_init()
6858 ch->ch_tx_win = 0; dgap_tty_init()
6859 ch->ch_rx_win = 0; dgap_tty_init()
6860 ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1; dgap_tty_init()
6861 ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1; dgap_tty_init()
6862 ch->ch_tstart = 0; dgap_tty_init()
6863 ch->ch_rstart = 0; dgap_tty_init()
6869 tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : dgap_tty_init()
6870 ch->ch_tsize / 2; dgap_tty_init()
6871 ch->ch_tlw = tlw; dgap_tty_init()
6873 dgap_cmdw(ch, STLOW, tlw, 0); dgap_tty_init()
6875 dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0); dgap_tty_init()
6877 dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0); dgap_tty_init()
6879 ch->ch_mistat = readb(&(ch->ch_bs->m_stat)); dgap_tty_init()
6881 init_waitqueue_head(&ch->ch_flags_wait); dgap_tty_init()
6882 init_waitqueue_head(&ch->ch_tun.un_flags_wait); dgap_tty_init()
6883 init_waitqueue_head(&ch->ch_pun.un_flags_wait); dgap_tty_init()
6887 writeb(modem, &(ch->ch_bs->m_int)); dgap_tty_init()
6894 writew(0, &(ch->ch_bs->edelay)); dgap_tty_init()
6896 writew(100, &(ch->ch_bs->edelay)); dgap_tty_init()
6898 writeb(1, &(ch->ch_bs->idata)); dgap_tty_init()
3067 dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch) dgap_block_til_ready() argument
/linux-4.1.27/arch/sparc/include/asm/
H A Dchafsr.h8 * ch --> cheetah
9 * ch+ --> cheetah plus
25 #define CHPAFSR_DTO (1UL << 59UL) /* ch+ */
30 #define CHPAFSR_DBERR (1UL << 58UL) /* ch+ */
33 #define CHPAFSR_THCE (1UL << 57UL) /* ch+ */
38 #define CHPAFSR_TSCE (1UL << 56UL) /* ch+ */
43 #define CHPAFSR_TUE (1UL << 55UL) /* ch+ */
50 #define CHPAFSR_DUE (1UL << 54UL) /* ch+ */
68 #define CHAFSR_ME (1UL << 53UL) /* ch,ch+,jp */
73 #define CHAFSR_PRIV (1UL << 52UL) /* ch,ch+,jp */
84 #define CHAFSR_PERR (1UL << 51UL) /* ch,ch+,jp */
90 #define CHAFSR_IERR (1UL << 50UL) /* ch,ch+,jp */
93 #define CHAFSR_ISAP (1UL << 49UL) /* ch,ch+,jp */
96 #define CHAFSR_EMC (1UL << 48UL) /* ch,ch+ */
101 #define CHAFSR_EMU (1UL << 47UL) /* ch,ch+ */
106 #define CHAFSR_IVC (1UL << 46UL) /* ch,ch+ */
111 #define CHAFSR_IVU (1UL << 45UL) /* ch,ch+,jp */
114 #define CHAFSR_TO (1UL << 44UL) /* ch,ch+,jp */
117 #define CHAFSR_BERR (1UL << 43UL) /* ch,ch+,jp */
122 #define CHAFSR_UCC (1UL << 42UL) /* ch,ch+,jp */
127 #define CHAFSR_UCU (1UL << 41UL) /* ch,ch+,jp */
130 #define CHAFSR_CPC (1UL << 40UL) /* ch,ch+,jp */
133 #define CHAFSR_CPU (1UL << 39UL) /* ch,ch+,jp */
136 #define CHAFSR_WDC (1UL << 38UL) /* ch,ch+,jp */
139 #define CHAFSR_WDU (1UL << 37UL) /* ch,ch+,jp */
142 #define CHAFSR_EDC (1UL << 36UL) /* ch,ch+,jp */
145 #define CHAFSR_EDU (1UL << 35UL) /* ch,ch+,jp */
148 #define CHAFSR_UE (1UL << 34UL) /* ch,ch+,jp */
151 #define CHAFSR_CE (1UL << 33UL) /* ch,ch+,jp */
208 #define CHAFSR_M_SYNDROME (0xfUL << 16UL) /* ch,ch+,jp */
221 #define CHAFSR_E_SYNDROME (0x1ffUL << 0UL) /* ch,ch+,jp */
/linux-4.1.27/drivers/video/fbdev/
H A Dsh_mobile_lcdcfb.c214 struct sh_mobile_lcdc_chan ch[2]; member in struct:sh_mobile_lcdc_priv
407 struct sh_mobile_lcdc_chan *ch = handle; lcdc_sys_write_index() local
409 lcdc_write(ch->lcdc, _LDDWD0R, data | LDDWDxR_WDACT); lcdc_sys_write_index()
410 lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0); lcdc_sys_write_index()
411 lcdc_write(ch->lcdc, _LDDWAR, LDDWAR_WA | lcdc_sys_write_index()
412 (lcdc_chan_is_sublcd(ch) ? 2 : 0)); lcdc_sys_write_index()
413 lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0); lcdc_sys_write_index()
418 struct sh_mobile_lcdc_chan *ch = handle; lcdc_sys_write_data() local
420 lcdc_write(ch->lcdc, _LDDWD0R, data | LDDWDxR_WDACT | LDDWDxR_RSW); lcdc_sys_write_data()
421 lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0); lcdc_sys_write_data()
422 lcdc_write(ch->lcdc, _LDDWAR, LDDWAR_WA | lcdc_sys_write_data()
423 (lcdc_chan_is_sublcd(ch) ? 2 : 0)); lcdc_sys_write_data()
424 lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0); lcdc_sys_write_data()
429 struct sh_mobile_lcdc_chan *ch = handle; lcdc_sys_read_data() local
431 lcdc_write(ch->lcdc, _LDDRDR, LDDRDR_RSR); lcdc_sys_read_data()
432 lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0); lcdc_sys_read_data()
433 lcdc_write(ch->lcdc, _LDDRAR, LDDRAR_RA | lcdc_sys_read_data()
434 (lcdc_chan_is_sublcd(ch) ? 2 : 0)); lcdc_sys_read_data()
436 lcdc_wait_bit(ch->lcdc, _LDSR, LDSR_AS, 0); lcdc_sys_read_data()
438 return lcdc_read(ch->lcdc, _LDDRDR) & LDDRDR_DRD_MASK; lcdc_sys_read_data()
450 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_sginit() local
451 unsigned int nr_pages_max = ch->fb_size >> PAGE_SHIFT; sh_mobile_lcdc_sginit()
455 sg_init_table(ch->sglist, nr_pages_max); sh_mobile_lcdc_sginit()
458 sg_set_page(&ch->sglist[nr_pages++], page, PAGE_SIZE, 0); sh_mobile_lcdc_sginit()
466 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_deferred_io() local
467 const struct sh_mobile_lcdc_panel_cfg *panel = &ch->cfg->panel_cfg; sh_mobile_lcdc_deferred_io()
470 sh_mobile_lcdc_clk_on(ch->lcdc); sh_mobile_lcdc_deferred_io()
491 dma_map_sg(ch->lcdc->dev, ch->sglist, nr_pages, DMA_TO_DEVICE); sh_mobile_lcdc_deferred_io()
493 panel->start_transfer(ch, &sh_mobile_lcdc_sys_bus_ops); sh_mobile_lcdc_deferred_io()
494 lcdc_write_chan(ch, LDSM2R, LDSM2R_OSTRG); sh_mobile_lcdc_deferred_io()
495 dma_unmap_sg(ch->lcdc->dev, ch->sglist, nr_pages, sh_mobile_lcdc_deferred_io()
499 panel->start_transfer(ch, &sh_mobile_lcdc_sys_bus_ops); sh_mobile_lcdc_deferred_io()
500 lcdc_write_chan(ch, LDSM2R, LDSM2R_OSTRG); sh_mobile_lcdc_deferred_io()
512 static void sh_mobile_lcdc_display_on(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_display_on() argument
514 const struct sh_mobile_lcdc_panel_cfg *panel = &ch->cfg->panel_cfg; sh_mobile_lcdc_display_on()
516 if (ch->tx_dev) { sh_mobile_lcdc_display_on()
519 ret = ch->tx_dev->ops->display_on(ch->tx_dev); sh_mobile_lcdc_display_on()
524 ch->info->state = FBINFO_STATE_SUSPENDED; sh_mobile_lcdc_display_on()
532 static void sh_mobile_lcdc_display_off(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_display_off() argument
534 const struct sh_mobile_lcdc_panel_cfg *panel = &ch->cfg->panel_cfg; sh_mobile_lcdc_display_off()
539 if (ch->tx_dev) sh_mobile_lcdc_display_off()
540 ch->tx_dev->ops->display_off(ch->tx_dev); sh_mobile_lcdc_display_off()
544 sh_mobile_lcdc_must_reconfigure(struct sh_mobile_lcdc_chan *ch, sh_mobile_lcdc_must_reconfigure() argument
547 dev_dbg(ch->info->dev, "Old %ux%u, new %ux%u\n", sh_mobile_lcdc_must_reconfigure()
548 ch->display.mode.xres, ch->display.mode.yres, sh_mobile_lcdc_must_reconfigure()
552 if (fb_mode_is_equal(&ch->display.mode, new_mode)) sh_mobile_lcdc_must_reconfigure()
555 dev_dbg(ch->info->dev, "Switching %u -> %u lines\n", sh_mobile_lcdc_must_reconfigure()
556 ch->display.mode.yres, new_mode->yres); sh_mobile_lcdc_must_reconfigure()
557 ch->display.mode = *new_mode; sh_mobile_lcdc_must_reconfigure()
565 static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, sh_mobile_lcdc_display_notify() argument
570 struct fb_info *info = ch->info; sh_mobile_lcdc_display_notify()
581 ch->display.width = monspec->max_x * 10; sh_mobile_lcdc_display_notify()
582 ch->display.height = monspec->max_y * 10; sh_mobile_lcdc_display_notify()
584 if (!sh_mobile_lcdc_must_reconfigure(ch, mode) && sh_mobile_lcdc_display_notify()
590 info->var.width = ch->display.width; sh_mobile_lcdc_display_notify()
591 info->var.height = ch->display.height; sh_mobile_lcdc_display_notify()
592 sh_mobile_lcdc_display_on(ch); sh_mobile_lcdc_display_notify()
728 struct sh_mobile_lcdc_chan *ch; sh_mobile_lcdc_irq() local
741 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { sh_mobile_lcdc_irq()
742 ch = &priv->ch[k]; sh_mobile_lcdc_irq()
744 if (!ch->enabled) sh_mobile_lcdc_irq()
749 if (is_sub == lcdc_chan_is_sublcd(ch)) { sh_mobile_lcdc_irq()
750 ch->frame_end = 1; sh_mobile_lcdc_irq()
751 wake_up(&ch->frame_end_wait); sh_mobile_lcdc_irq()
759 complete(&ch->vsync_completion); sh_mobile_lcdc_irq()
765 static int sh_mobile_lcdc_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_wait_for_vsync() argument
773 ldintr = lcdc_read(ch->lcdc, _LDINTR); sh_mobile_lcdc_wait_for_vsync()
775 lcdc_write(ch->lcdc, _LDINTR, ldintr); sh_mobile_lcdc_wait_for_vsync()
777 ret = wait_for_completion_interruptible_timeout(&ch->vsync_completion, sh_mobile_lcdc_wait_for_vsync()
798 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) sh_mobile_lcdc_start_stop()
799 if (lcdc_read(priv, _LDCNT2R) & priv->ch[k].enabled) sh_mobile_lcdc_start_stop()
801 tmp = lcdc_read_chan(&priv->ch[k], LDPMR) sh_mobile_lcdc_start_stop()
814 static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_geometry() argument
816 const struct fb_var_screeninfo *var = &ch->info->var; sh_mobile_lcdc_geometry()
817 const struct fb_videomode *mode = &ch->display.mode; sh_mobile_lcdc_geometry()
821 tmp = ch->ldmt1r_value; sh_mobile_lcdc_geometry()
824 tmp |= (ch->cfg->flags & LCDC_FLAGS_DWPOL) ? LDMT1R_DWPOL : 0; sh_mobile_lcdc_geometry()
825 tmp |= (ch->cfg->flags & LCDC_FLAGS_DIPOL) ? LDMT1R_DIPOL : 0; sh_mobile_lcdc_geometry()
826 tmp |= (ch->cfg->flags & LCDC_FLAGS_DAPOL) ? LDMT1R_DAPOL : 0; sh_mobile_lcdc_geometry()
827 tmp |= (ch->cfg->flags & LCDC_FLAGS_HSCNT) ? LDMT1R_HSCNT : 0; sh_mobile_lcdc_geometry()
828 tmp |= (ch->cfg->flags & LCDC_FLAGS_DWCNT) ? LDMT1R_DWCNT : 0; sh_mobile_lcdc_geometry()
829 lcdc_write_chan(ch, LDMT1R, tmp); sh_mobile_lcdc_geometry()
832 lcdc_write_chan(ch, LDMT2R, ch->cfg->sys_bus_cfg.ldmt2r); sh_mobile_lcdc_geometry()
833 lcdc_write_chan(ch, LDMT3R, ch->cfg->sys_bus_cfg.ldmt3r); sh_mobile_lcdc_geometry()
839 tmp |= (min(mode->xres, ch->xres) / 8) << 16; /* HDCN */ sh_mobile_lcdc_geometry()
840 lcdc_write_chan(ch, LDHCNR, tmp); sh_mobile_lcdc_geometry()
845 lcdc_write_chan(ch, LDHSYNR, tmp); sh_mobile_lcdc_geometry()
850 tmp |= min(mode->yres, ch->yres) << 16; /* VDLN */ sh_mobile_lcdc_geometry()
851 lcdc_write_chan(ch, LDVLNR, tmp); sh_mobile_lcdc_geometry()
855 lcdc_write_chan(ch, LDVSYNR, tmp); sh_mobile_lcdc_geometry()
862 lcdc_write_chan(ch, LDHAJR, tmp); sh_mobile_lcdc_geometry()
863 lcdc_write_chan_mirror(ch, LDHAJR, tmp); sh_mobile_lcdc_geometry()
965 struct sh_mobile_lcdc_chan *ch; __sh_mobile_lcdc_start() local
972 lcdc_write(priv, _LDCNT2R, priv->ch[0].enabled | priv->ch[1].enabled); __sh_mobile_lcdc_start()
980 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { __sh_mobile_lcdc_start()
981 ch = &priv->ch[k]; __sh_mobile_lcdc_start()
982 if (!ch->enabled) __sh_mobile_lcdc_start()
986 lcdc_write_chan(ch, LDPMR, 0); __sh_mobile_lcdc_start()
988 m = ch->cfg->clock_divider; __sh_mobile_lcdc_start()
995 lcdc_write_chan(ch, LDDCKPAT1R, 0); __sh_mobile_lcdc_start()
996 lcdc_write_chan(ch, LDDCKPAT2R, (1 << (m/2)) - 1); __sh_mobile_lcdc_start()
1000 tmp |= m << (lcdc_chan_is_sublcd(ch) ? 8 : 0); __sh_mobile_lcdc_start()
1008 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { __sh_mobile_lcdc_start()
1009 ch = &priv->ch[k]; __sh_mobile_lcdc_start()
1010 if (!ch->enabled) __sh_mobile_lcdc_start()
1013 sh_mobile_lcdc_geometry(ch); __sh_mobile_lcdc_start()
1015 tmp = ch->format->lddfr; __sh_mobile_lcdc_start()
1017 if (ch->format->yuv) { __sh_mobile_lcdc_start()
1018 switch (ch->colorspace) { __sh_mobile_lcdc_start()
1028 lcdc_write_chan(ch, LDDFR, tmp); __sh_mobile_lcdc_start()
1029 lcdc_write_chan(ch, LDMLSR, ch->line_size); __sh_mobile_lcdc_start()
1030 lcdc_write_chan(ch, LDSA1R, ch->base_addr_y); __sh_mobile_lcdc_start()
1031 if (ch->format->yuv) __sh_mobile_lcdc_start()
1032 lcdc_write_chan(ch, LDSA2R, ch->base_addr_c); __sh_mobile_lcdc_start()
1038 if (ch->ldmt1r_value & LDMT1R_IFM && __sh_mobile_lcdc_start()
1039 ch->cfg->sys_bus_cfg.deferred_io_msec) { __sh_mobile_lcdc_start()
1040 lcdc_write_chan(ch, LDSM1R, LDSM1R_OS); __sh_mobile_lcdc_start()
1043 lcdc_write_chan(ch, LDSM1R, 0); __sh_mobile_lcdc_start()
1048 switch (priv->ch[0].format->fourcc) { __sh_mobile_lcdc_start()
1077 struct sh_mobile_lcdc_chan *ch; sh_mobile_lcdc_start() local
1083 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { sh_mobile_lcdc_start()
1084 if (priv->ch[k].enabled) sh_mobile_lcdc_start()
1092 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { sh_mobile_lcdc_start()
1095 ch = &priv->ch[k]; sh_mobile_lcdc_start()
1096 if (!ch->enabled) sh_mobile_lcdc_start()
1099 panel = &ch->cfg->panel_cfg; sh_mobile_lcdc_start()
1101 ret = panel->setup_sys(ch, &sh_mobile_lcdc_sys_bus_ops); sh_mobile_lcdc_start()
1108 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { sh_mobile_lcdc_start()
1112 ch = &priv->ch[k]; sh_mobile_lcdc_start()
1113 if (!ch->enabled) sh_mobile_lcdc_start()
1116 ch->base_addr_y = ch->dma_handle; sh_mobile_lcdc_start()
1117 ch->base_addr_c = ch->dma_handle sh_mobile_lcdc_start()
1118 + ch->xres_virtual * ch->yres_virtual; sh_mobile_lcdc_start()
1119 ch->line_size = ch->pitch; sh_mobile_lcdc_start()
1122 if (mdev == NULL || ch->cfg->meram_cfg == NULL) sh_mobile_lcdc_start()
1126 if (ch->cache) { sh_mobile_lcdc_start()
1127 sh_mobile_meram_cache_free(mdev, ch->cache); sh_mobile_lcdc_start()
1128 ch->cache = NULL; sh_mobile_lcdc_start()
1131 switch (ch->format->fourcc) { sh_mobile_lcdc_start()
1150 cache = sh_mobile_meram_cache_alloc(mdev, ch->cfg->meram_cfg, sh_mobile_lcdc_start()
1151 ch->pitch, ch->yres, pixelformat, sh_mobile_lcdc_start()
1152 &ch->line_size); sh_mobile_lcdc_start()
1155 ch->base_addr_y, ch->base_addr_c, sh_mobile_lcdc_start()
1156 &ch->base_addr_y, &ch->base_addr_c); sh_mobile_lcdc_start()
1157 ch->cache = cache; sh_mobile_lcdc_start()
1172 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { sh_mobile_lcdc_start()
1173 ch = &priv->ch[k]; sh_mobile_lcdc_start()
1174 if (!ch->enabled) sh_mobile_lcdc_start()
1177 tmp = ch->cfg->sys_bus_cfg.deferred_io_msec; sh_mobile_lcdc_start()
1178 if (ch->ldmt1r_value & LDMT1R_IFM && tmp) { sh_mobile_lcdc_start()
1179 ch->defio.deferred_io = sh_mobile_lcdc_deferred_io; sh_mobile_lcdc_start()
1180 ch->defio.delay = msecs_to_jiffies(tmp); sh_mobile_lcdc_start()
1181 ch->info->fbdefio = &ch->defio; sh_mobile_lcdc_start()
1182 fb_deferred_io_init(ch->info); sh_mobile_lcdc_start()
1185 sh_mobile_lcdc_display_on(ch); sh_mobile_lcdc_start()
1187 if (ch->bl) { sh_mobile_lcdc_start()
1188 ch->bl->props.power = FB_BLANK_UNBLANK; sh_mobile_lcdc_start()
1189 backlight_update_status(ch->bl); sh_mobile_lcdc_start()
1198 struct sh_mobile_lcdc_chan *ch; sh_mobile_lcdc_stop() local
1202 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { sh_mobile_lcdc_stop()
1203 ch = &priv->ch[k]; sh_mobile_lcdc_stop()
1204 if (!ch->enabled) sh_mobile_lcdc_stop()
1211 if (ch->info && ch->info->fbdefio) { sh_mobile_lcdc_stop()
1212 ch->frame_end = 0; sh_mobile_lcdc_stop()
1213 schedule_delayed_work(&ch->info->deferred_work, 0); sh_mobile_lcdc_stop()
1214 wait_event(ch->frame_end_wait, ch->frame_end); sh_mobile_lcdc_stop()
1215 fb_deferred_io_cleanup(ch->info); sh_mobile_lcdc_stop()
1216 ch->info->fbdefio = NULL; sh_mobile_lcdc_stop()
1220 if (ch->bl) { sh_mobile_lcdc_stop()
1221 ch->bl->props.power = FB_BLANK_POWERDOWN; sh_mobile_lcdc_stop()
1222 backlight_update_status(ch->bl); sh_mobile_lcdc_stop()
1225 sh_mobile_lcdc_display_off(ch); sh_mobile_lcdc_stop()
1228 if (ch->cache) { sh_mobile_lcdc_stop()
1229 sh_mobile_meram_cache_free(priv->meram_dev, ch->cache); sh_mobile_lcdc_stop()
1230 ch->cache = NULL; sh_mobile_lcdc_stop()
1242 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) sh_mobile_lcdc_stop()
1243 if (priv->ch[k].enabled) sh_mobile_lcdc_stop()
1823 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_pan() local
1824 struct sh_mobile_lcdc_priv *priv = ch->lcdc; sh_mobile_lcdc_pan()
1830 if (!ch->format->yuv) { sh_mobile_lcdc_pan()
1831 y_offset = (var->yoffset * ch->xres_virtual + var->xoffset) sh_mobile_lcdc_pan()
1832 * ch->format->bpp / 8; sh_mobile_lcdc_pan()
1835 unsigned int xsub = ch->format->bpp < 24 ? 2 : 1; sh_mobile_lcdc_pan()
1836 unsigned int ysub = ch->format->bpp < 16 ? 2 : 1; sh_mobile_lcdc_pan()
1838 y_offset = var->yoffset * ch->xres_virtual + var->xoffset; sh_mobile_lcdc_pan()
1839 c_offset = var->yoffset / ysub * ch->xres_virtual * 2 / xsub sh_mobile_lcdc_pan()
1846 if (y_offset == ch->pan_y_offset) sh_mobile_lcdc_pan()
1850 base_addr_y = ch->dma_handle + y_offset; sh_mobile_lcdc_pan()
1851 base_addr_c = ch->dma_handle + ch->xres_virtual * ch->yres_virtual sh_mobile_lcdc_pan()
1854 if (ch->cache) sh_mobile_lcdc_pan()
1855 sh_mobile_meram_cache_update(priv->meram_dev, ch->cache, sh_mobile_lcdc_pan()
1859 ch->base_addr_y = base_addr_y; sh_mobile_lcdc_pan()
1860 ch->base_addr_c = base_addr_c; sh_mobile_lcdc_pan()
1861 ch->pan_y_offset = y_offset; sh_mobile_lcdc_pan()
1863 lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y); sh_mobile_lcdc_pan()
1864 if (ch->format->yuv) sh_mobile_lcdc_pan()
1865 lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c); sh_mobile_lcdc_pan()
1868 if (lcdc_chan_is_sublcd(ch)) sh_mobile_lcdc_pan()
1869 lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS); sh_mobile_lcdc_pan()
1871 lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_MRS); sh_mobile_lcdc_pan()
1882 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_ioctl() local
1887 retval = sh_mobile_lcdc_wait_for_vsync(ch); sh_mobile_lcdc_ioctl()
1899 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_fb_reconfig() local
1905 if (ch->use_count > 1 || (ch->use_count == 1 && !info->fbcon_par)) sh_mobile_fb_reconfig()
1911 if (fb_mode_is_equal(&ch->display.mode, &mode)) sh_mobile_fb_reconfig()
1916 fb_videomode_to_var(&var, &ch->display.mode); sh_mobile_fb_reconfig()
1917 var.width = ch->display.width; sh_mobile_fb_reconfig()
1918 var.height = ch->display.height; sh_mobile_fb_reconfig()
1931 event.data = &ch->display.mode; sh_mobile_fb_reconfig()
1941 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_release() local
1943 mutex_lock(&ch->open_lock); sh_mobile_lcdc_release()
1944 dev_dbg(info->dev, "%s(): %d users\n", __func__, ch->use_count); sh_mobile_lcdc_release()
1946 ch->use_count--; sh_mobile_lcdc_release()
1955 mutex_unlock(&ch->open_lock); sh_mobile_lcdc_release()
1962 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_open() local
1964 mutex_lock(&ch->open_lock); sh_mobile_lcdc_open()
1965 ch->use_count++; sh_mobile_lcdc_open()
1967 dev_dbg(info->dev, "%s(): %d users\n", __func__, ch->use_count); sh_mobile_lcdc_open()
1968 mutex_unlock(&ch->open_lock); sh_mobile_lcdc_open()
1976 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_check_var() local
1977 struct sh_mobile_lcdc_priv *p = ch->lcdc; sh_mobile_lcdc_check_var()
1989 for (i = 0; i < ch->cfg->num_modes; ++i) { sh_mobile_lcdc_check_var()
1990 const struct fb_videomode *mode = &ch->cfg->lcd_modes[i]; sh_mobile_lcdc_check_var()
2009 if (ch->cfg->num_modes != 0) { sh_mobile_lcdc_check_var()
2031 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_set_par() local
2034 sh_mobile_lcdc_stop(ch->lcdc); sh_mobile_lcdc_set_par()
2036 ch->format = sh_mobile_format_info(sh_mobile_format_fourcc(&info->var)); sh_mobile_lcdc_set_par()
2037 ch->colorspace = info->var.colorspace; sh_mobile_lcdc_set_par()
2039 ch->xres = info->var.xres; sh_mobile_lcdc_set_par()
2040 ch->xres_virtual = info->var.xres_virtual; sh_mobile_lcdc_set_par()
2041 ch->yres = info->var.yres; sh_mobile_lcdc_set_par()
2042 ch->yres_virtual = info->var.yres_virtual; sh_mobile_lcdc_set_par()
2044 if (ch->format->yuv) sh_mobile_lcdc_set_par()
2045 ch->pitch = info->var.xres_virtual; sh_mobile_lcdc_set_par()
2047 ch->pitch = info->var.xres_virtual * ch->format->bpp / 8; sh_mobile_lcdc_set_par()
2049 ret = sh_mobile_lcdc_start(ch->lcdc); sh_mobile_lcdc_set_par()
2053 info->fix.line_length = ch->pitch; sh_mobile_lcdc_set_par()
2076 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_blank() local
2077 struct sh_mobile_lcdc_priv *p = ch->lcdc; sh_mobile_lcdc_blank()
2080 if (blank > FB_BLANK_UNBLANK && ch->blank_status == FB_BLANK_UNBLANK) { sh_mobile_lcdc_blank()
2082 .width = ch->xres, sh_mobile_lcdc_blank()
2083 .height = ch->yres, sh_mobile_lcdc_blank()
2088 if (blank <= FB_BLANK_NORMAL && ch->blank_status > FB_BLANK_NORMAL) { sh_mobile_lcdc_blank()
2092 if (blank > FB_BLANK_NORMAL && ch->blank_status <= FB_BLANK_NORMAL) { sh_mobile_lcdc_blank()
2099 sh_mobile_lcdc_wait_for_vsync(ch); sh_mobile_lcdc_blank()
2100 sh_mobile_lcdc_wait_for_vsync(ch); sh_mobile_lcdc_blank()
2105 ch->blank_status = blank; sh_mobile_lcdc_blank()
2112 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_mmap() local
2114 return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem, sh_mobile_lcdc_mmap()
2115 ch->dma_handle, ch->fb_size); sh_mobile_lcdc_mmap()
2137 sh_mobile_lcdc_channel_fb_unregister(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_channel_fb_unregister() argument
2139 if (ch->info && ch->info->dev) sh_mobile_lcdc_channel_fb_unregister()
2140 unregister_framebuffer(ch->info); sh_mobile_lcdc_channel_fb_unregister()
2144 sh_mobile_lcdc_channel_fb_register(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_channel_fb_register() argument
2146 struct fb_info *info = ch->info; sh_mobile_lcdc_channel_fb_register()
2150 ch->sglist = vmalloc(sizeof(struct scatterlist) * sh_mobile_lcdc_channel_fb_register()
2151 ch->fb_size >> PAGE_SHIFT); sh_mobile_lcdc_channel_fb_register()
2152 if (!ch->sglist) { sh_mobile_lcdc_channel_fb_register()
2153 dev_err(ch->lcdc->dev, "cannot allocate sglist\n"); sh_mobile_lcdc_channel_fb_register()
2158 info->bl_dev = ch->bl; sh_mobile_lcdc_channel_fb_register()
2164 dev_info(ch->lcdc->dev, "registered %s/%s as %dx%d %dbpp.\n", sh_mobile_lcdc_channel_fb_register()
2165 dev_name(ch->lcdc->dev), (ch->cfg->chan == LCDC_CHAN_MAINLCD) ? sh_mobile_lcdc_channel_fb_register()
2171 sh_mobile_lcdc_clk_off(ch->lcdc); sh_mobile_lcdc_channel_fb_register()
2177 sh_mobile_lcdc_channel_fb_cleanup(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_channel_fb_cleanup() argument
2179 struct fb_info *info = ch->info; sh_mobile_lcdc_channel_fb_cleanup()
2184 vfree(ch->sglist); sh_mobile_lcdc_channel_fb_cleanup()
2191 sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, sh_mobile_lcdc_channel_fb_init() argument
2195 struct sh_mobile_lcdc_priv *priv = ch->lcdc; sh_mobile_lcdc_channel_fb_init()
2209 ch->info = info; sh_mobile_lcdc_channel_fb_init()
2214 info->screen_base = ch->fb_mem; sh_mobile_lcdc_channel_fb_init()
2215 info->pseudo_palette = &ch->pseudo_palette; sh_mobile_lcdc_channel_fb_init()
2216 info->par = ch; sh_mobile_lcdc_channel_fb_init()
2230 info->fix.smem_start = ch->dma_handle; sh_mobile_lcdc_channel_fb_init()
2231 info->fix.smem_len = ch->fb_size; sh_mobile_lcdc_channel_fb_init()
2232 info->fix.line_length = ch->pitch; sh_mobile_lcdc_channel_fb_init()
2234 if (ch->format->yuv) sh_mobile_lcdc_channel_fb_init()
2239 switch (ch->format->fourcc) { sh_mobile_lcdc_channel_fb_init()
2253 var->width = ch->display.width; sh_mobile_lcdc_channel_fb_init()
2254 var->height = ch->display.height; sh_mobile_lcdc_channel_fb_init()
2255 var->xres_virtual = ch->xres_virtual; sh_mobile_lcdc_channel_fb_init()
2256 var->yres_virtual = ch->yres_virtual; sh_mobile_lcdc_channel_fb_init()
2262 if (!ch->format->yuv) sh_mobile_lcdc_channel_fb_init()
2263 var->bits_per_pixel = ch->format->bpp; sh_mobile_lcdc_channel_fb_init()
2265 var->grayscale = ch->format->fourcc; sh_mobile_lcdc_channel_fb_init()
2280 struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev); sh_mobile_lcdc_update_bl() local
2287 ch->bl_brightness = brightness; sh_mobile_lcdc_update_bl()
2288 return ch->cfg->bl_info.set_brightness(brightness); sh_mobile_lcdc_update_bl()
2293 struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev); sh_mobile_lcdc_get_brightness() local
2295 return ch->bl_brightness; sh_mobile_lcdc_get_brightness()
2312 struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_bl_probe()
2316 bl = backlight_device_register(ch->cfg->bl_info.name, parent, ch, sh_mobile_lcdc_bl_probe()
2324 bl->props.max_brightness = ch->cfg->bl_info.max_brightness; sh_mobile_lcdc_bl_probe()
2393 struct sh_mobile_lcdc_chan *ch = info->par; sh_mobile_lcdc_notify() local
2395 if (&ch->lcdc->notifier != nb) sh_mobile_lcdc_notify()
2403 sh_mobile_lcdc_display_off(ch); sh_mobile_lcdc_notify()
2404 sh_mobile_lcdc_stop(ch->lcdc); sh_mobile_lcdc_notify()
2407 mutex_lock(&ch->open_lock); sh_mobile_lcdc_notify()
2409 mutex_unlock(&ch->open_lock); sh_mobile_lcdc_notify()
2411 sh_mobile_lcdc_display_on(ch); sh_mobile_lcdc_notify()
2412 sh_mobile_lcdc_start(ch->lcdc); sh_mobile_lcdc_notify()
2449 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) sh_mobile_lcdc_remove()
2450 sh_mobile_lcdc_channel_fb_unregister(&priv->ch[i]); sh_mobile_lcdc_remove()
2464 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { sh_mobile_lcdc_remove()
2465 struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; sh_mobile_lcdc_remove() local
2467 if (ch->tx_dev) { sh_mobile_lcdc_remove()
2468 ch->tx_dev->lcdc = NULL; sh_mobile_lcdc_remove()
2469 module_put(ch->cfg->tx_dev->dev.driver->owner); sh_mobile_lcdc_remove()
2472 sh_mobile_lcdc_channel_fb_cleanup(ch); sh_mobile_lcdc_remove()
2474 if (ch->fb_mem) sh_mobile_lcdc_remove()
2475 dma_free_coherent(&pdev->dev, ch->fb_size, sh_mobile_lcdc_remove()
2476 ch->fb_mem, ch->dma_handle); sh_mobile_lcdc_remove()
2479 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { sh_mobile_lcdc_remove()
2480 struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; sh_mobile_lcdc_remove() local
2482 if (ch->bl) sh_mobile_lcdc_remove()
2483 sh_mobile_lcdc_bl_remove(ch->bl); sh_mobile_lcdc_remove()
2484 mutex_destroy(&ch->open_lock); sh_mobile_lcdc_remove()
2501 static int sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_check_interface() argument
2503 int interface_type = ch->cfg->interface_type; sh_mobile_lcdc_check_interface()
2530 if (lcdc_chan_is_sublcd(ch)) { sh_mobile_lcdc_check_interface()
2537 ch->ldmt1r_value = interface_type; sh_mobile_lcdc_check_interface()
2597 sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_channel_init() argument
2600 const struct sh_mobile_lcdc_chan_cfg *cfg = ch->cfg; sh_mobile_lcdc_channel_init()
2601 struct device *dev = ch->lcdc->dev; sh_mobile_lcdc_channel_init()
2608 ch->notify = sh_mobile_lcdc_display_notify; sh_mobile_lcdc_channel_init()
2657 ch->format = format; sh_mobile_lcdc_channel_init()
2658 ch->xres = mode->xres; sh_mobile_lcdc_channel_init()
2659 ch->xres_virtual = mode->xres; sh_mobile_lcdc_channel_init()
2660 ch->yres = mode->yres; sh_mobile_lcdc_channel_init()
2661 ch->yres_virtual = mode->yres * 2; sh_mobile_lcdc_channel_init()
2664 ch->colorspace = V4L2_COLORSPACE_SRGB; sh_mobile_lcdc_channel_init()
2665 ch->pitch = ch->xres_virtual * format->bpp / 8; sh_mobile_lcdc_channel_init()
2667 ch->colorspace = V4L2_COLORSPACE_REC709; sh_mobile_lcdc_channel_init()
2668 ch->pitch = ch->xres_virtual; sh_mobile_lcdc_channel_init()
2671 ch->display.width = cfg->panel_cfg.width; sh_mobile_lcdc_channel_init()
2672 ch->display.height = cfg->panel_cfg.height; sh_mobile_lcdc_channel_init()
2673 ch->display.mode = *mode; sh_mobile_lcdc_channel_init()
2676 ch->fb_size = max_size * format->bpp / 8 * 2; sh_mobile_lcdc_channel_init()
2677 ch->fb_mem = dma_alloc_coherent(dev, ch->fb_size, &ch->dma_handle, sh_mobile_lcdc_channel_init()
2679 if (ch->fb_mem == NULL) { sh_mobile_lcdc_channel_init()
2691 ch->tx_dev = platform_get_drvdata(cfg->tx_dev); sh_mobile_lcdc_channel_init()
2692 ch->tx_dev->lcdc = ch; sh_mobile_lcdc_channel_init()
2693 ch->tx_dev->def_mode = *mode; sh_mobile_lcdc_channel_init()
2696 return sh_mobile_lcdc_channel_fb_init(ch, mode, num_modes); sh_mobile_lcdc_channel_init()
2728 for (i = 0; i < ARRAY_SIZE(priv->ch); i++) sh_mobile_lcdc_probe()
2729 mutex_init(&priv->ch[i].open_lock); sh_mobile_lcdc_probe()
2742 for (i = 0, num_channels = 0; i < ARRAY_SIZE(pdata->ch); i++) { sh_mobile_lcdc_probe()
2743 struct sh_mobile_lcdc_chan *ch = priv->ch + num_channels; sh_mobile_lcdc_probe() local
2745 ch->lcdc = priv; sh_mobile_lcdc_probe()
2746 ch->cfg = &pdata->ch[i]; sh_mobile_lcdc_probe()
2748 error = sh_mobile_lcdc_check_interface(ch); sh_mobile_lcdc_probe()
2753 init_waitqueue_head(&ch->frame_end_wait); sh_mobile_lcdc_probe()
2754 init_completion(&ch->vsync_completion); sh_mobile_lcdc_probe()
2757 if (ch->cfg->bl_info.max_brightness) sh_mobile_lcdc_probe()
2758 ch->bl = sh_mobile_lcdc_bl_probe(&pdev->dev, ch); sh_mobile_lcdc_probe()
2760 switch (pdata->ch[i].chan) { sh_mobile_lcdc_probe()
2762 ch->enabled = LDCNT2R_ME; sh_mobile_lcdc_probe()
2763 ch->reg_offs = lcdc_offs_mainlcd; sh_mobile_lcdc_probe()
2767 ch->enabled = LDCNT2R_SE; sh_mobile_lcdc_probe()
2768 ch->reg_offs = lcdc_offs_sublcd; sh_mobile_lcdc_probe()
2782 priv->forced_fourcc = pdata->ch[0].fourcc; sh_mobile_lcdc_probe()
2798 struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; sh_mobile_lcdc_probe() local
2800 error = sh_mobile_lcdc_channel_init(ch); sh_mobile_lcdc_probe()
2809 ovl->channel = &priv->ch[0]; sh_mobile_lcdc_probe()
2823 struct sh_mobile_lcdc_chan *ch = priv->ch + i; sh_mobile_lcdc_probe() local
2825 error = sh_mobile_lcdc_channel_fb_register(ch); sh_mobile_lcdc_probe()
2311 sh_mobile_lcdc_bl_probe(struct device *parent, struct sh_mobile_lcdc_chan *ch) sh_mobile_lcdc_bl_probe() argument
/linux-4.1.27/sound/pci/emu10k1/
H A Demu10k1_callback.c104 int ch; snd_emu10k1_synth_get_voice() local
106 if ((ch = vp->ch) < 0) { snd_emu10k1_synth_get_voice()
109 "synth_get_voice: ch < 0 (%d) ??", i); snd_emu10k1_synth_get_voice()
114 vp->ch = -1; snd_emu10k1_synth_get_voice()
116 return ch; snd_emu10k1_synth_get_voice()
136 snd_emu10k1_ptr_write(hw, DCYSUSM, vp->ch, dcysusv); release_voice()
138 snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, dcysusv); release_voice()
153 snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, 0x807f | DCYSUSV_CHANNELENABLE_MASK); terminate_voice()
175 if (hw && (vp->ch >= 0)) { free_voice()
176 snd_emu10k1_ptr_write(hw, IFATN, vp->ch, 0xff00); free_voice()
177 snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, 0x807f | DCYSUSV_CHANNELENABLE_MASK); free_voice()
178 // snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, 0); free_voice()
179 snd_emu10k1_ptr_write(hw, VTFT, vp->ch, 0xffff); free_voice()
180 snd_emu10k1_ptr_write(hw, CVCF, vp->ch, 0xffff); free_voice()
181 snd_emu10k1_voice_free(hw, &hw->voices[vp->ch]); free_voice()
183 vp->ch = -1; free_voice()
198 snd_emu10k1_ptr_write(hw, IFATN_ATTENUATION, vp->ch, vp->avol); update_voice()
200 snd_emu10k1_ptr_write(hw, IP, vp->ch, vp->apitch); update_voice()
202 snd_emu10k1_ptr_write(hw, PTRX_FXSENDAMOUNT_A, vp->ch, vp->apan); update_voice()
203 snd_emu10k1_ptr_write(hw, PTRX_FXSENDAMOUNT_B, vp->ch, vp->aaux); update_voice()
208 snd_emu10k1_ptr_write(hw, TREMFRQ, vp->ch, vp->reg.parm.tremfrq); update_voice()
243 if (vp->ch < 0) { lookup_voices()
254 val = snd_emu10k1_ptr_read(hw, CVCF_CURRENTVOL, vp->ch); lookup_voices()
269 val = snd_emu10k1_ptr_read(hw, CCCA_CURRADDR, vp->ch); lookup_voices()
300 if (vp->ch < 0) { get_voice()
305 vp->ch = hwvoice->number; get_voice()
323 int ch; start_voice() local
330 ch = vp->ch; start_voice()
331 if (snd_BUG_ON(ch < 0)) start_voice()
354 snd_emu10k1_ptr_write(hw, A_FXRT1, ch, temp); start_voice()
358 snd_emu10k1_ptr_write(hw, FXRT, ch, temp); start_voice()
362 snd_emu10k1_ptr_write(hw, DCYSUSV, ch, 0x0000); start_voice()
363 snd_emu10k1_ptr_write(hw, VTFT, ch, 0x0000FFFF); start_voice()
364 snd_emu10k1_ptr_write(hw, CVCF, ch, 0x0000FFFF); start_voice()
365 snd_emu10k1_ptr_write(hw, PTRX, ch, 0); start_voice()
366 snd_emu10k1_ptr_write(hw, CPF, ch, 0); start_voice()
369 snd_emu10k1_ptr_write(hw, IP, vp->ch, vp->apitch); start_voice()
372 snd_emu10k1_ptr_write(hw, ENVVAL, ch, vp->reg.parm.moddelay); start_voice()
373 snd_emu10k1_ptr_write(hw, ATKHLDM, ch, vp->reg.parm.modatkhld); start_voice()
374 snd_emu10k1_ptr_write(hw, DCYSUSM, ch, vp->reg.parm.moddcysus); start_voice()
375 snd_emu10k1_ptr_write(hw, ENVVOL, ch, vp->reg.parm.voldelay); start_voice()
376 snd_emu10k1_ptr_write(hw, ATKHLDV, ch, vp->reg.parm.volatkhld); start_voice()
382 snd_emu10k1_ptr_write(hw, IFATN, vp->ch, temp); start_voice()
385 snd_emu10k1_ptr_write(hw, PEFE, ch, vp->reg.parm.pefe); start_voice()
388 snd_emu10k1_ptr_write(hw, LFOVAL1, ch, vp->reg.parm.lfo1delay); start_voice()
389 snd_emu10k1_ptr_write(hw, LFOVAL2, ch, vp->reg.parm.lfo2delay); start_voice()
394 snd_emu10k1_ptr_write(hw, TREMFRQ, vp->ch, vp->reg.parm.tremfrq); start_voice()
403 snd_emu10k1_ptr_write(hw, PSST, vp->ch, (temp << 24) | addr); start_voice()
411 snd_emu10k1_ptr_write(hw, DSL, ch, temp); start_voice()
414 snd_emu10k1_ptr_write(hw, Z1, ch, 0); start_voice()
415 snd_emu10k1_ptr_write(hw, Z2, ch, 0); start_voice()
419 snd_emu10k1_ptr_write(hw, MAPA, ch, temp); start_voice()
420 snd_emu10k1_ptr_write(hw, MAPB, ch, temp); start_voice()
434 snd_emu10k1_ptr_write(hw, CCR, ch, 0x1c << 16); start_voice()
435 snd_emu10k1_ptr_write(hw, CDE, ch, sample); start_voice()
436 snd_emu10k1_ptr_write(hw, CDF, ch, sample); start_voice()
440 snd_emu10k1_ptr_write(hw, MAPA, ch, temp); start_voice()
441 snd_emu10k1_ptr_write(hw, MAPB, ch, temp); start_voice()
447 snd_emu10k1_ptr_write(hw, CCR, ch, val); start_voice()
463 snd_emu10k1_ptr_write(hw, CCCA, ch, temp); start_voice()
467 snd_emu10k1_ptr_write(hw, VTFT, ch, temp | vp->ftarget); start_voice()
468 snd_emu10k1_ptr_write(hw, CVCF, ch, temp | 0xff00); start_voice()
495 snd_emu10k1_ptr_write(hw, PTRX, vp->ch, temp); trigger_voice()
498 snd_emu10k1_ptr_write(hw, CPF, vp->ch, ptarget); trigger_voice()
501 snd_emu10k1_ptr_write(hw, DCYSUSV, vp->ch, vp->reg.parm.voldcysus|DCYSUSV_CHANNELENABLE_MASK); trigger_voice()
521 snd_emu10k1_ptr_write(hw, FMMOD, vp->ch, fmmod); set_fmmod()
539 snd_emu10k1_ptr_write(hw, FM2FRQ2, vp->ch, fm2frq2); set_fm2frq2()
547 val = snd_emu10k1_ptr_read(hw, CCCA, vp->ch) & ~CCCA_RESONANCE; set_filterQ()
549 snd_emu10k1_ptr_write(hw, CCCA, vp->ch, val); set_filterQ()
/linux-4.1.27/drivers/net/ethernet/
H A Dlantiq_etop.c107 struct ltq_etop_chan ch[MAX_DMA_CHAN]; member in struct:ltq_etop_priv
114 ltq_etop_alloc_skb(struct ltq_etop_chan *ch) ltq_etop_alloc_skb() argument
116 ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN); ltq_etop_alloc_skb()
117 if (!ch->skb[ch->dma.desc]) ltq_etop_alloc_skb()
119 ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, ltq_etop_alloc_skb()
120 ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, ltq_etop_alloc_skb()
122 ch->dma.desc_base[ch->dma.desc].addr = ltq_etop_alloc_skb()
123 CPHYSADDR(ch->skb[ch->dma.desc]->data); ltq_etop_alloc_skb()
124 ch->dma.desc_base[ch->dma.desc].ctl = ltq_etop_alloc_skb()
127 skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN); ltq_etop_alloc_skb()
132 ltq_etop_hw_receive(struct ltq_etop_chan *ch) ltq_etop_hw_receive() argument
134 struct ltq_etop_priv *priv = netdev_priv(ch->netdev); ltq_etop_hw_receive()
135 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; ltq_etop_hw_receive()
136 struct sk_buff *skb = ch->skb[ch->dma.desc]; ltq_etop_hw_receive()
141 if (ltq_etop_alloc_skb(ch)) { ltq_etop_hw_receive()
142 netdev_err(ch->netdev, ltq_etop_hw_receive()
144 ltq_dma_close(&ch->dma); ltq_etop_hw_receive()
146 ch->dma.desc++; ltq_etop_hw_receive()
147 ch->dma.desc %= LTQ_DESC_NUM; ltq_etop_hw_receive()
151 skb->protocol = eth_type_trans(skb, ch->netdev); ltq_etop_hw_receive()
158 struct ltq_etop_chan *ch = container_of(napi, ltq_etop_poll_rx() local
164 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; ltq_etop_poll_rx()
167 ltq_etop_hw_receive(ch); ltq_etop_poll_rx()
174 napi_complete(&ch->napi); ltq_etop_poll_rx()
175 ltq_dma_ack_irq(&ch->dma); ltq_etop_poll_rx()
183 struct ltq_etop_chan *ch = ltq_etop_poll_tx() local
185 struct ltq_etop_priv *priv = netdev_priv(ch->netdev); ltq_etop_poll_tx()
187 netdev_get_tx_queue(ch->netdev, ch->idx >> 1); ltq_etop_poll_tx()
191 while ((ch->dma.desc_base[ch->tx_free].ctl & ltq_etop_poll_tx()
193 dev_kfree_skb_any(ch->skb[ch->tx_free]); ltq_etop_poll_tx()
194 ch->skb[ch->tx_free] = NULL; ltq_etop_poll_tx()
195 memset(&ch->dma.desc_base[ch->tx_free], 0, ltq_etop_poll_tx()
197 ch->tx_free++; ltq_etop_poll_tx()
198 ch->tx_free %= LTQ_DESC_NUM; ltq_etop_poll_tx()
204 napi_complete(&ch->napi); ltq_etop_poll_tx()
205 ltq_dma_ack_irq(&ch->dma); ltq_etop_poll_tx()
213 int ch = irq - LTQ_DMA_CH0_INT; ltq_etop_dma_irq() local
215 napi_schedule(&priv->ch[ch].napi); ltq_etop_dma_irq()
220 ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) ltq_etop_free_channel() argument
224 ltq_dma_free(&ch->dma); ltq_etop_free_channel()
225 if (ch->dma.irq) ltq_etop_free_channel()
226 free_irq(ch->dma.irq, priv); ltq_etop_free_channel()
227 if (IS_RX(ch->idx)) { ltq_etop_free_channel()
230 dev_kfree_skb_any(ch->skb[ch->dma.desc]); ltq_etop_free_channel()
243 ltq_etop_free_channel(dev, &priv->ch[i]); ltq_etop_hw_exit()
278 struct ltq_etop_chan *ch = &priv->ch[i]; ltq_etop_hw_init() local
280 ch->idx = ch->dma.nr = i; ltq_etop_hw_init()
283 ltq_dma_alloc_tx(&ch->dma); ltq_etop_hw_init()
286 ltq_dma_alloc_rx(&ch->dma); ltq_etop_hw_init()
287 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; ltq_etop_hw_init()
288 ch->dma.desc++) ltq_etop_hw_init()
289 if (ltq_etop_alloc_skb(ch)) ltq_etop_hw_init()
291 ch->dma.desc = 0; ltq_etop_hw_init()
294 ch->dma.irq = irq; ltq_etop_hw_init()
486 struct ltq_etop_chan *ch = &priv->ch[i]; ltq_etop_open() local
490 ltq_dma_open(&ch->dma); ltq_etop_open()
491 napi_enable(&ch->napi); ltq_etop_open()
507 struct ltq_etop_chan *ch = &priv->ch[i]; ltq_etop_stop() local
511 napi_disable(&ch->napi); ltq_etop_stop()
512 ltq_dma_close(&ch->dma); ltq_etop_stop()
523 struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; ltq_etop_tx() local
524 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; ltq_etop_tx()
531 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { ltq_etop_tx()
540 ch->skb[ch->dma.desc] = skb; ltq_etop_tx()
550 ch->dma.desc++; ltq_etop_tx()
551 ch->dma.desc %= LTQ_DESC_NUM; ltq_etop_tx()
554 if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) ltq_etop_tx()
753 netif_napi_add(dev, &priv->ch[i].napi, ltq_etop_probe()
756 netif_napi_add(dev, &priv->ch[i].napi, ltq_etop_probe()
758 priv->ch[i].netdev = dev; ltq_etop_probe()
/linux-4.1.27/drivers/infiniband/ulp/srp/
H A Dib_srp.c292 static int srp_new_cm_id(struct srp_rdma_ch *ch) srp_new_cm_id() argument
294 struct srp_target_port *target = ch->target; srp_new_cm_id()
298 srp_cm_handler, ch); srp_new_cm_id()
302 if (ch->cm_id) srp_new_cm_id()
303 ib_destroy_cm_id(ch->cm_id); srp_new_cm_id()
304 ch->cm_id = new_cm_id; srp_new_cm_id()
305 ch->path.sgid = target->sgid; srp_new_cm_id()
306 ch->path.dgid = target->orig_dgid; srp_new_cm_id()
307 ch->path.pkey = target->pkey; srp_new_cm_id()
308 ch->path.service_id = target->service_id; srp_new_cm_id()
459 * @ch: SRP RDMA channel.
466 static void srp_destroy_qp(struct srp_rdma_ch *ch) srp_destroy_qp() argument
473 /* Destroying a QP and reusing ch->done is only safe if not connected */ srp_destroy_qp()
474 WARN_ON_ONCE(ch->connected); srp_destroy_qp()
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); srp_destroy_qp()
481 init_completion(&ch->done); srp_destroy_qp()
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr); srp_destroy_qp()
485 wait_for_completion(&ch->done); srp_destroy_qp()
488 ib_destroy_qp(ch->qp); srp_destroy_qp()
491 static int srp_create_ch_ib(struct srp_rdma_ch *ch) srp_create_ch_ib() argument
493 struct srp_target_port *target = ch->target; srp_create_ch_ib()
508 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, srp_create_ch_ib()
509 target->queue_size + 1, ch->comp_vector); srp_create_ch_ib()
515 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, srp_create_ch_ib()
516 m * target->queue_size, ch->comp_vector); srp_create_ch_ib()
552 if (ch->fr_pool) srp_create_ch_ib()
553 srp_destroy_fr_pool(ch->fr_pool); srp_create_ch_ib()
554 ch->fr_pool = fr_pool; srp_create_ch_ib()
563 if (ch->fmr_pool) srp_create_ch_ib()
564 ib_destroy_fmr_pool(ch->fmr_pool); srp_create_ch_ib()
565 ch->fmr_pool = fmr_pool; srp_create_ch_ib()
568 if (ch->qp) srp_create_ch_ib()
569 srp_destroy_qp(ch); srp_create_ch_ib()
570 if (ch->recv_cq) srp_create_ch_ib()
571 ib_destroy_cq(ch->recv_cq); srp_create_ch_ib()
572 if (ch->send_cq) srp_create_ch_ib()
573 ib_destroy_cq(ch->send_cq); srp_create_ch_ib()
575 ch->qp = qp; srp_create_ch_ib()
576 ch->recv_cq = recv_cq; srp_create_ch_ib()
577 ch->send_cq = send_cq; srp_create_ch_ib()
598 * invoked. Hence the ch->[rt]x_ring checks.
601 struct srp_rdma_ch *ch) srp_free_ch_ib()
606 if (!ch->target) srp_free_ch_ib()
609 if (ch->cm_id) { srp_free_ch_ib()
610 ib_destroy_cm_id(ch->cm_id); srp_free_ch_ib()
611 ch->cm_id = NULL; srp_free_ch_ib()
615 if (!ch->qp) srp_free_ch_ib()
619 if (ch->fr_pool) srp_free_ch_ib()
620 srp_destroy_fr_pool(ch->fr_pool); srp_free_ch_ib()
622 if (ch->fmr_pool) srp_free_ch_ib()
623 ib_destroy_fmr_pool(ch->fmr_pool); srp_free_ch_ib()
625 srp_destroy_qp(ch); srp_free_ch_ib()
626 ib_destroy_cq(ch->send_cq); srp_free_ch_ib()
627 ib_destroy_cq(ch->recv_cq); srp_free_ch_ib()
635 ch->target = NULL; srp_free_ch_ib()
637 ch->qp = NULL; srp_free_ch_ib()
638 ch->send_cq = ch->recv_cq = NULL; srp_free_ch_ib()
640 if (ch->rx_ring) { srp_free_ch_ib()
642 srp_free_iu(target->srp_host, ch->rx_ring[i]); srp_free_ch_ib()
643 kfree(ch->rx_ring); srp_free_ch_ib()
644 ch->rx_ring = NULL; srp_free_ch_ib()
646 if (ch->tx_ring) { srp_free_ch_ib()
648 srp_free_iu(target->srp_host, ch->tx_ring[i]); srp_free_ch_ib()
649 kfree(ch->tx_ring); srp_free_ch_ib()
650 ch->tx_ring = NULL; srp_free_ch_ib()
658 struct srp_rdma_ch *ch = ch_ptr; srp_path_rec_completion() local
659 struct srp_target_port *target = ch->target; srp_path_rec_completion()
661 ch->status = status; srp_path_rec_completion()
666 ch->path = *pathrec; srp_path_rec_completion()
667 complete(&ch->done); srp_path_rec_completion()
670 static int srp_lookup_path(struct srp_rdma_ch *ch) srp_lookup_path() argument
672 struct srp_target_port *target = ch->target; srp_lookup_path()
675 ch->path.numb_path = 1; srp_lookup_path()
677 init_completion(&ch->done); srp_lookup_path()
679 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, srp_lookup_path()
682 &ch->path, srp_lookup_path()
691 ch, &ch->path_query); srp_lookup_path()
692 if (ch->path_query_id < 0) srp_lookup_path()
693 return ch->path_query_id; srp_lookup_path()
695 ret = wait_for_completion_interruptible(&ch->done); srp_lookup_path()
699 if (ch->status < 0) srp_lookup_path()
703 return ch->status; srp_lookup_path()
706 static int srp_send_req(struct srp_rdma_ch *ch, bool multich) srp_send_req() argument
708 struct srp_target_port *target = ch->target; srp_send_req()
719 req->param.primary_path = &ch->path; srp_send_req()
722 req->param.qp_num = ch->qp->qp_num; srp_send_req()
723 req->param.qp_type = ch->qp->qp_type; srp_send_req()
789 status = ib_send_cm_req(ch->cm_id, &req->param); srp_send_req()
815 struct srp_rdma_ch *ch; srp_disconnect_target() local
821 ch = &target->ch[i]; srp_disconnect_target()
822 ch->connected = false; srp_disconnect_target()
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { srp_disconnect_target()
831 struct srp_rdma_ch *ch) srp_free_req_data()
838 if (!ch->target || !ch->req_ring) srp_free_req_data()
842 req = &ch->req_ring[i]; srp_free_req_data()
856 kfree(ch->req_ring); srp_free_req_data()
857 ch->req_ring = NULL; srp_free_req_data()
860 static int srp_alloc_req_data(struct srp_rdma_ch *ch) srp_alloc_req_data() argument
862 struct srp_target_port *target = ch->target; srp_alloc_req_data()
870 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), srp_alloc_req_data()
872 if (!ch->req_ring) srp_alloc_req_data()
876 req = &ch->req_ring[i]; srp_alloc_req_data()
924 struct srp_rdma_ch *ch; srp_remove_target() local
936 ch = &target->ch[i]; srp_remove_target()
937 srp_free_ch_ib(target, ch); srp_remove_target()
942 ch = &target->ch[i]; srp_remove_target()
943 srp_free_req_data(target, ch); srp_remove_target()
945 kfree(target->ch); srp_remove_target()
946 target->ch = NULL; srp_remove_target()
981 c += target->ch[i].connected; srp_connected_ch()
986 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) srp_connect_ch() argument
988 struct srp_target_port *target = ch->target; srp_connect_ch()
993 ret = srp_lookup_path(ch); srp_connect_ch()
998 init_completion(&ch->done); srp_connect_ch()
999 ret = srp_send_req(ch, multich); srp_connect_ch()
1002 ret = wait_for_completion_interruptible(&ch->done); srp_connect_ch()
1012 switch (ch->status) { srp_connect_ch()
1014 ch->connected = true; srp_connect_ch()
1018 ret = srp_lookup_path(ch); srp_connect_ch()
1029 ch->status = -ECONNRESET; srp_connect_ch()
1030 return ch->status; srp_connect_ch()
1033 return ch->status; srp_connect_ch()
1038 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) srp_inv_rkey() argument
1050 return ib_post_send(ch->qp, &wr, &bad_wr); srp_inv_rkey()
1054 struct srp_rdma_ch *ch, srp_unmap_data()
1057 struct srp_target_port *target = ch->target; srp_unmap_data()
1071 res = srp_inv_rkey(ch, (*pfr)->mr->rkey); srp_unmap_data()
1081 srp_fr_pool_put(ch->fr_pool, req->fr_list, srp_unmap_data()
1096 * @ch: SRP RDMA channel.
1105 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, srp_claim_req() argument
1112 spin_lock_irqsave(&ch->lock, flags); srp_claim_req()
1121 spin_unlock_irqrestore(&ch->lock, flags); srp_claim_req()
1128 * @ch: SRP RDMA channel.
1133 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, srp_free_req() argument
1138 srp_unmap_data(scmnd, ch, req); srp_free_req()
1140 spin_lock_irqsave(&ch->lock, flags); srp_free_req()
1141 ch->req_lim += req_lim_delta; srp_free_req()
1142 spin_unlock_irqrestore(&ch->lock, flags); srp_free_req()
1145 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, srp_finish_req() argument
1148 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); srp_finish_req()
1151 srp_free_req(ch, req, scmnd, 0); srp_finish_req()
1160 struct srp_rdma_ch *ch; srp_terminate_io() local
1173 ch = &target->ch[i]; shost_for_each_device()
1176 struct srp_request *req = &ch->req_ring[j]; shost_for_each_device()
1178 srp_finish_req(ch, req, NULL, shost_for_each_device()
1196 struct srp_rdma_ch *ch; srp_rport_reconnect() local
1211 ch = &target->ch[i]; srp_rport_reconnect()
1212 if (!ch->target) srp_rport_reconnect()
1214 ret += srp_new_cm_id(ch); srp_rport_reconnect()
1217 ch = &target->ch[i]; srp_rport_reconnect()
1218 if (!ch->target) srp_rport_reconnect()
1221 struct srp_request *req = &ch->req_ring[j]; srp_rport_reconnect()
1223 srp_finish_req(ch, req, NULL, DID_RESET << 16); srp_rport_reconnect()
1227 ch = &target->ch[i]; srp_rport_reconnect()
1228 if (!ch->target) srp_rport_reconnect()
1235 ret += srp_create_ch_ib(ch); srp_rport_reconnect()
1237 INIT_LIST_HEAD(&ch->free_tx); srp_rport_reconnect()
1239 list_add(&ch->tx_ring[j]->list, &ch->free_tx); srp_rport_reconnect()
1245 ch = &target->ch[i]; srp_rport_reconnect()
1246 if (ret || !ch->target) srp_rport_reconnect()
1248 ret = srp_connect_ch(ch, multich); srp_rport_reconnect()
1274 struct srp_rdma_ch *ch) srp_map_finish_fmr()
1279 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, srp_map_finish_fmr()
1293 struct srp_rdma_ch *ch) srp_map_finish_fr()
1295 struct srp_target_port *target = ch->target; srp_map_finish_fr()
1302 desc = srp_fr_pool_get(ch->fr_pool); srp_map_finish_fr()
1331 return ib_post_send(ch->qp, &wr, &bad_wr); srp_map_finish_fr()
1335 struct srp_rdma_ch *ch) srp_finish_mapping()
1337 struct srp_target_port *target = ch->target; srp_finish_mapping()
1348 srp_map_finish_fr(state, ch) : srp_finish_mapping()
1349 srp_map_finish_fmr(state, ch); srp_finish_mapping()
1369 struct srp_rdma_ch *ch, srp_map_sg_entry()
1373 struct srp_target_port *target = ch->target; srp_map_sg_entry()
1402 ret = srp_finish_mapping(state, ch); srp_map_sg_entry()
1423 ret = srp_finish_mapping(state, ch); srp_map_sg_entry()
1447 ret = srp_finish_mapping(state, ch); srp_map_sg_entry()
1454 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, srp_map_sg() argument
1458 struct srp_target_port *target = ch->target; srp_map_sg()
1469 use_mr = !!ch->fr_pool; srp_map_sg()
1472 use_mr = !!ch->fmr_pool; srp_map_sg()
1476 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) { for_each_sg()
1498 if (use_mr && srp_finish_mapping(state, ch))
1506 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, srp_map_data() argument
1509 struct srp_target_port *target = ch->target; srp_map_data()
1571 srp_map_sg(&state, ch, req, scat, count); srp_map_data()
1632 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, srp_put_tx_iu() argument
1637 spin_lock_irqsave(&ch->lock, flags); srp_put_tx_iu()
1638 list_add(&iu->list, &ch->free_tx); srp_put_tx_iu()
1640 ++ch->req_lim; srp_put_tx_iu()
1641 spin_unlock_irqrestore(&ch->lock, flags); srp_put_tx_iu()
1645 * Must be called with ch->lock held to protect req_lim and free_tx.
1657 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, __srp_get_tx_iu() argument
1660 struct srp_target_port *target = ch->target; __srp_get_tx_iu()
1664 srp_send_completion(ch->send_cq, ch); __srp_get_tx_iu()
1666 if (list_empty(&ch->free_tx)) __srp_get_tx_iu()
1671 if (ch->req_lim <= rsv) { __srp_get_tx_iu()
1676 --ch->req_lim; __srp_get_tx_iu()
1679 iu = list_first_entry(&ch->free_tx, struct srp_iu, list); __srp_get_tx_iu()
1684 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) srp_post_send() argument
1686 struct srp_target_port *target = ch->target; srp_post_send()
1701 return ib_post_send(ch->qp, &wr, &bad_wr); srp_post_send()
1704 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) srp_post_recv() argument
1706 struct srp_target_port *target = ch->target; srp_post_recv()
1719 return ib_post_recv(ch->qp, &wr, &bad_wr); srp_post_recv()
1722 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) srp_process_rsp() argument
1724 struct srp_target_port *target = ch->target; srp_process_rsp()
1730 spin_lock_irqsave(&ch->lock, flags); srp_process_rsp()
1731 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); srp_process_rsp()
1732 spin_unlock_irqrestore(&ch->lock, flags); srp_process_rsp()
1734 ch->tsk_mgmt_status = -1; srp_process_rsp()
1736 ch->tsk_mgmt_status = rsp->data[3]; srp_process_rsp()
1737 complete(&ch->tsk_mgmt_done); srp_process_rsp()
1742 scmnd = srp_claim_req(ch, req, NULL, scmnd); srp_process_rsp()
1746 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", srp_process_rsp()
1747 rsp->tag, ch - target->ch, ch->qp->qp_num); srp_process_rsp()
1749 spin_lock_irqsave(&ch->lock, flags); srp_process_rsp()
1750 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); srp_process_rsp()
1751 spin_unlock_irqrestore(&ch->lock, flags); srp_process_rsp()
1773 srp_free_req(ch, req, scmnd, srp_process_rsp()
1781 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, srp_response_common() argument
1784 struct srp_target_port *target = ch->target; srp_response_common()
1790 spin_lock_irqsave(&ch->lock, flags); srp_response_common()
1791 ch->req_lim += req_delta; srp_response_common()
1792 iu = __srp_get_tx_iu(ch, SRP_IU_RSP); srp_response_common()
1793 spin_unlock_irqrestore(&ch->lock, flags); srp_response_common()
1805 err = srp_post_send(ch, iu, len); srp_response_common()
1809 srp_put_tx_iu(ch, iu, SRP_IU_RSP); srp_response_common()
1815 static void srp_process_cred_req(struct srp_rdma_ch *ch, srp_process_cred_req() argument
1824 if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) srp_process_cred_req()
1825 shost_printk(KERN_ERR, ch->target->scsi_host, PFX srp_process_cred_req()
1829 static void srp_process_aer_req(struct srp_rdma_ch *ch, srp_process_aer_req() argument
1832 struct srp_target_port *target = ch->target; srp_process_aer_req()
1842 if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) srp_process_aer_req()
1847 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc) srp_handle_recv() argument
1849 struct srp_target_port *target = ch->target; srp_handle_recv()
1855 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, srp_handle_recv()
1869 srp_process_rsp(ch, iu->buf); srp_handle_recv()
1873 srp_process_cred_req(ch, iu->buf); srp_handle_recv()
1877 srp_process_aer_req(ch, iu->buf); srp_handle_recv()
1892 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, srp_handle_recv()
1895 res = srp_post_recv(ch, iu); srp_handle_recv()
1918 bool send_err, struct srp_rdma_ch *ch) srp_handle_qp_err()
1920 struct srp_target_port *target = ch->target; srp_handle_qp_err()
1923 complete(&ch->done); srp_handle_qp_err()
1927 if (ch->connected && !target->qp_in_error) { srp_handle_qp_err()
1949 struct srp_rdma_ch *ch = ch_ptr; srp_recv_completion() local
1955 srp_handle_recv(ch, &wc); srp_recv_completion()
1957 srp_handle_qp_err(wc.wr_id, wc.status, false, ch); srp_recv_completion()
1964 struct srp_rdma_ch *ch = ch_ptr; srp_send_completion() local
1971 list_add(&iu->list, &ch->free_tx); srp_send_completion()
1973 srp_handle_qp_err(wc.wr_id, wc.status, true, ch); srp_send_completion()
1982 struct srp_rdma_ch *ch; srp_queuecommand() local
2008 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; srp_queuecommand()
2014 spin_lock_irqsave(&ch->lock, flags); srp_queuecommand()
2015 iu = __srp_get_tx_iu(ch, SRP_IU_CMD); srp_queuecommand()
2016 spin_unlock_irqrestore(&ch->lock, flags); srp_queuecommand()
2021 req = &ch->req_ring[idx]; srp_queuecommand()
2039 len = srp_map_data(scmnd, ch, req); srp_queuecommand()
2057 if (srp_post_send(ch, iu, len)) { srp_queuecommand()
2071 srp_unmap_data(scmnd, ch, req); srp_queuecommand()
2074 srp_put_tx_iu(ch, iu, SRP_IU_CMD); srp_queuecommand()
2097 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) srp_alloc_iu_bufs() argument
2099 struct srp_target_port *target = ch->target; srp_alloc_iu_bufs()
2102 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), srp_alloc_iu_bufs()
2104 if (!ch->rx_ring) srp_alloc_iu_bufs()
2106 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), srp_alloc_iu_bufs()
2108 if (!ch->tx_ring) srp_alloc_iu_bufs()
2112 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, srp_alloc_iu_bufs()
2113 ch->max_ti_iu_len, srp_alloc_iu_bufs()
2115 if (!ch->rx_ring[i]) srp_alloc_iu_bufs()
2120 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, srp_alloc_iu_bufs()
2123 if (!ch->tx_ring[i]) srp_alloc_iu_bufs()
2126 list_add(&ch->tx_ring[i]->list, &ch->free_tx); srp_alloc_iu_bufs()
2133 srp_free_iu(target->srp_host, ch->rx_ring[i]); srp_alloc_iu_bufs()
2134 srp_free_iu(target->srp_host, ch->tx_ring[i]); srp_alloc_iu_bufs()
2139 kfree(ch->tx_ring); srp_alloc_iu_bufs()
2140 ch->tx_ring = NULL; srp_alloc_iu_bufs()
2141 kfree(ch->rx_ring); srp_alloc_iu_bufs()
2142 ch->rx_ring = NULL; srp_alloc_iu_bufs()
2176 struct srp_rdma_ch *ch) srp_cm_rep_handler()
2178 struct srp_target_port *target = ch->target; srp_cm_rep_handler()
2185 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); srp_cm_rep_handler()
2186 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); srp_cm_rep_handler()
2193 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, srp_cm_rep_handler()
2205 if (!ch->rx_ring) { srp_cm_rep_handler()
2206 ret = srp_alloc_iu_bufs(ch); srp_cm_rep_handler()
2221 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); srp_cm_rep_handler()
2226 struct srp_iu *iu = ch->rx_ring[i]; srp_cm_rep_handler()
2228 ret = srp_post_recv(ch, iu); srp_cm_rep_handler()
2240 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); srp_cm_rep_handler()
2250 ch->status = ret; srp_cm_rep_handler()
2255 struct srp_rdma_ch *ch) srp_cm_rej_handler()
2257 struct srp_target_port *target = ch->target; srp_cm_rej_handler()
2265 ch->path.dlid = cpi->redirect_lid; srp_cm_rej_handler()
2266 ch->path.pkey = cpi->redirect_pkey; srp_cm_rej_handler()
2268 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16); srp_cm_rej_handler()
2270 ch->status = ch->path.dlid ? srp_cm_rej_handler()
2281 memcpy(ch->path.dgid.raw, srp_cm_rej_handler()
2286 be64_to_cpu(ch->path.dgid.global.subnet_prefix), srp_cm_rej_handler()
2287 be64_to_cpu(ch->path.dgid.global.interface_id)); srp_cm_rej_handler()
2289 ch->status = SRP_PORT_REDIRECT; srp_cm_rej_handler()
2293 ch->status = -ECONNRESET; srp_cm_rej_handler()
2300 ch->status = -ECONNRESET; srp_cm_rej_handler()
2321 ch->status = -ECONNRESET; srp_cm_rej_handler()
2326 ch->status = SRP_STALE_CONN; srp_cm_rej_handler()
2332 ch->status = -ECONNRESET; srp_cm_rej_handler()
2338 struct srp_rdma_ch *ch = cm_id->context; srp_cm_handler() local
2339 struct srp_target_port *target = ch->target; srp_cm_handler()
2347 ch->status = -ECONNRESET; srp_cm_handler()
2352 srp_cm_rep_handler(cm_id, event->private_data, ch); srp_cm_handler()
2359 srp_cm_rej_handler(cm_id, event, ch); srp_cm_handler()
2365 ch->connected = false; srp_cm_handler()
2377 ch->status = 0; srp_cm_handler()
2392 complete(&ch->done); srp_cm_handler()
2412 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, srp_send_tsk_mgmt() argument
2415 struct srp_target_port *target = ch->target; srp_send_tsk_mgmt()
2421 if (!ch->connected || target->qp_in_error) srp_send_tsk_mgmt()
2424 init_completion(&ch->tsk_mgmt_done); srp_send_tsk_mgmt()
2431 spin_lock_irq(&ch->lock); srp_send_tsk_mgmt()
2432 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); srp_send_tsk_mgmt()
2433 spin_unlock_irq(&ch->lock); srp_send_tsk_mgmt()
2454 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { srp_send_tsk_mgmt()
2455 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); srp_send_tsk_mgmt()
2462 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, srp_send_tsk_mgmt()
2475 struct srp_rdma_ch *ch; srp_abort() local
2486 ch = &target->ch[ch_idx]; srp_abort()
2487 if (!srp_claim_req(ch, req, NULL, scmnd)) srp_abort()
2491 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, srp_abort()
2498 srp_free_req(ch, req, scmnd, 0); srp_abort()
2508 struct srp_rdma_ch *ch; srp_reset_device() local
2513 ch = &target->ch[0]; srp_reset_device()
2514 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, srp_reset_device()
2517 if (ch->tsk_mgmt_status) srp_reset_device()
2521 ch = &target->ch[i]; srp_reset_device()
2523 struct srp_request *req = &ch->req_ring[i]; srp_reset_device()
2525 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); srp_reset_device()
2603 struct srp_rdma_ch *ch = &target->ch[0]; show_dgid() local
2605 return sprintf(buf, "%pI6\n", ch->path.dgid.raw); show_dgid()
2620 struct srp_rdma_ch *ch; show_req_lim() local
2624 ch = &target->ch[i]; show_req_lim()
2625 req_lim = min(req_lim, ch->req_lim); show_req_lim()
3138 struct srp_rdma_ch *ch; srp_create_target() local
3222 target->ch = kcalloc(target->ch_count, sizeof(*target->ch), srp_create_target()
3224 if (!target->ch) srp_create_target()
3246 ch = &target->ch[ch_start + cpu_idx]; for_each_online_cpu()
3247 ch->target = target; for_each_online_cpu()
3248 ch->comp_vector = cv_start == cv_end ? cv_start : for_each_online_cpu()
3250 spin_lock_init(&ch->lock); for_each_online_cpu()
3251 INIT_LIST_HEAD(&ch->free_tx); for_each_online_cpu()
3252 ret = srp_new_cm_id(ch); for_each_online_cpu()
3256 ret = srp_create_ch_ib(ch); for_each_online_cpu()
3260 ret = srp_alloc_req_data(ch); for_each_online_cpu()
3264 ret = srp_connect_ch(ch, multich); for_each_online_cpu()
3273 srp_free_ch_ib(target, ch); for_each_online_cpu()
3274 srp_free_req_data(target, ch); for_each_online_cpu()
3275 target->ch_count = ch - target->ch; for_each_online_cpu()
3318 ch = &target->ch[i];
3319 srp_free_ch_ib(target, ch);
3320 srp_free_req_data(target, ch);
3323 kfree(target->ch);
600 srp_free_ch_ib(struct srp_target_port *target, struct srp_rdma_ch *ch) srp_free_ch_ib() argument
830 srp_free_req_data(struct srp_target_port *target, struct srp_rdma_ch *ch) srp_free_req_data() argument
1053 srp_unmap_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, struct srp_request *req) srp_unmap_data() argument
1273 srp_map_finish_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch) srp_map_finish_fmr() argument
1292 srp_map_finish_fr(struct srp_map_state *state, struct srp_rdma_ch *ch) srp_map_finish_fr() argument
1334 srp_finish_mapping(struct srp_map_state *state, struct srp_rdma_ch *ch) srp_finish_mapping() argument
1368 srp_map_sg_entry(struct srp_map_state *state, struct srp_rdma_ch *ch, struct scatterlist *sg, int sg_index, bool use_mr) srp_map_sg_entry() argument
1917 srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, bool send_err, struct srp_rdma_ch *ch) srp_handle_qp_err() argument
2174 srp_cm_rep_handler(struct ib_cm_id *cm_id, struct srp_login_rsp *lrsp, struct srp_rdma_ch *ch) srp_cm_rep_handler() argument
2253 srp_cm_rej_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event, struct srp_rdma_ch *ch) srp_cm_rej_handler() argument
/linux-4.1.27/drivers/clk/berlin/
H A Dberlin2-avpll.c225 struct berlin2_avpll_channel *ch = to_avpll_channel(hw); berlin2_avpll_channel_is_enabled() local
228 if (ch->index == 7) berlin2_avpll_channel_is_enabled()
231 reg = readl_relaxed(ch->base + VCO_CTRL10); berlin2_avpll_channel_is_enabled()
232 reg &= VCO_POWERUP_CH1 << ch->index; berlin2_avpll_channel_is_enabled()
239 struct berlin2_avpll_channel *ch = to_avpll_channel(hw); berlin2_avpll_channel_enable() local
242 reg = readl_relaxed(ch->base + VCO_CTRL10); berlin2_avpll_channel_enable()
243 reg |= VCO_POWERUP_CH1 << ch->index; berlin2_avpll_channel_enable()
244 writel_relaxed(reg, ch->base + VCO_CTRL10); berlin2_avpll_channel_enable()
251 struct berlin2_avpll_channel *ch = to_avpll_channel(hw); berlin2_avpll_channel_disable() local
254 reg = readl_relaxed(ch->base + VCO_CTRL10); berlin2_avpll_channel_disable()
255 reg &= ~(VCO_POWERUP_CH1 << ch->index); berlin2_avpll_channel_disable()
256 writel_relaxed(reg, ch->base + VCO_CTRL10); berlin2_avpll_channel_disable()
265 struct berlin2_avpll_channel *ch = to_avpll_channel(hw); berlin2_avpll_channel_recalc_rate() local
269 reg = readl_relaxed(ch->base + VCO_CTRL30); berlin2_avpll_channel_recalc_rate()
270 if ((reg & (VCO_DPLL_CH1_ENABLE << ch->index)) == 0) berlin2_avpll_channel_recalc_rate()
278 reg = readl_relaxed(ch->base + VCO_SYNC1n(ch->index)); berlin2_avpll_channel_recalc_rate()
280 if (ch->flags & BERLIN2_AVPLL_BIT_QUIRK && ch->index == 0) berlin2_avpll_channel_recalc_rate()
284 reg = readl_relaxed(ch->base + VCO_SYNC2n(ch->index)); berlin2_avpll_channel_recalc_rate()
288 if (ch->index == 7) berlin2_avpll_channel_recalc_rate()
295 reg = readl_relaxed(ch->base + VCO_CTRL11) >> 7; berlin2_avpll_channel_recalc_rate()
296 reg = (reg >> (ch->index * 3)); berlin2_avpll_channel_recalc_rate()
304 if (ch->index == 0) { berlin2_avpll_channel_recalc_rate()
305 reg = readl_relaxed(ch->base + VCO_CTRL11); berlin2_avpll_channel_recalc_rate()
308 reg = readl_relaxed(ch->base + VCO_CTRL12); berlin2_avpll_channel_recalc_rate()
309 reg >>= (ch->index-1) * 3; berlin2_avpll_channel_recalc_rate()
318 if (ch->index < 2) { berlin2_avpll_channel_recalc_rate()
319 reg = readl_relaxed(ch->base + VCO_CTRL12); berlin2_avpll_channel_recalc_rate()
320 reg >>= 18 + (ch->index * 7); berlin2_avpll_channel_recalc_rate()
321 } else if (ch->index < 7) { berlin2_avpll_channel_recalc_rate()
322 reg = readl_relaxed(ch->base + VCO_CTRL13); berlin2_avpll_channel_recalc_rate()
323 reg >>= (ch->index - 2) * 7; berlin2_avpll_channel_recalc_rate()
325 reg = readl_relaxed(ch->base + VCO_CTRL14); berlin2_avpll_channel_recalc_rate()
336 if (ch->index < 6) { berlin2_avpll_channel_recalc_rate()
337 reg = readl_relaxed(ch->base + VCO_CTRL14); berlin2_avpll_channel_recalc_rate()
338 reg >>= 7 + (ch->index * 4); berlin2_avpll_channel_recalc_rate()
340 reg = readl_relaxed(ch->base + VCO_CTRL15); berlin2_avpll_channel_recalc_rate()
371 struct berlin2_avpll_channel *ch; berlin2_avpll_channel_register() local
374 ch = kzalloc(sizeof(*ch), GFP_KERNEL); berlin2_avpll_channel_register()
375 if (!ch) berlin2_avpll_channel_register()
378 ch->base = base; berlin2_avpll_channel_register()
380 ch->index = quirk_index[index]; berlin2_avpll_channel_register()
382 ch->index = index; berlin2_avpll_channel_register()
384 ch->flags = ch_flags; berlin2_avpll_channel_register()
385 ch->hw.init = &init; berlin2_avpll_channel_register()
392 return clk_register(NULL, &ch->hw); berlin2_avpll_channel_register()
/linux-4.1.27/drivers/gpu/host1x/hw/
H A Dcdma_hw.c69 struct host1x_channel *ch = cdma_to_channel(cdma); cdma_start() local
76 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, cdma_start()
80 host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART); cdma_start()
81 host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT); cdma_start()
82 host1x_ch_writel(ch, cdma->push_buffer.phys + cdma_start()
87 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP | cdma_start()
93 host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL); cdma_start()
106 struct host1x_channel *ch = cdma_to_channel(cdma); cdma_timeout_restart() local
113 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, cdma_timeout_restart()
117 host1x_ch_writel(ch, cdma->push_buffer.phys, HOST1X_CHANNEL_DMASTART); cdma_timeout_restart()
118 host1x_ch_writel(ch, cdma->push_buffer.phys + cdma_timeout_restart()
123 host1x_ch_writel(ch, getptr, HOST1X_CHANNEL_DMAPUT); cdma_timeout_restart()
124 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP | cdma_timeout_restart()
131 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET), cdma_timeout_restart()
132 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT), cdma_timeout_restart()
136 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, cdma_timeout_restart()
138 host1x_ch_writel(ch, cdma->push_buffer.pos, HOST1X_CHANNEL_DMAPUT); cdma_timeout_restart()
141 host1x_ch_writel(ch, 0, HOST1X_CHANNEL_DMACTRL); cdma_timeout_restart()
151 struct host1x_channel *ch = cdma_to_channel(cdma); cdma_flush() local
154 host1x_ch_writel(ch, cdma->push_buffer.pos, cdma_flush()
162 struct host1x_channel *ch = cdma_to_channel(cdma); cdma_stop() local
167 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, cdma_stop()
181 struct host1x_channel *ch = cdma_to_channel(cdma); cdma_freeze() local
189 dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id); cdma_freeze()
192 cmdproc_stop |= BIT(ch->id); cdma_freeze()
196 __func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET), cdma_freeze()
197 host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT), cdma_freeze()
200 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, cdma_freeze()
203 host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN); cdma_freeze()
212 struct host1x_channel *ch = cdma_to_channel(cdma); cdma_resume() local
217 ch->id, getptr); cdma_resume()
220 cmdproc_stop &= ~(BIT(ch->id)); cdma_resume()
236 struct host1x_channel *ch; cdma_timeout_handler() local
245 ch = cdma_to_channel(cdma); cdma_timeout_handler()
260 cmdproc_stop = prev_cmdproc | BIT(ch->id); cdma_timeout_handler()
273 cmdproc_stop = prev_cmdproc & ~(BIT(ch->id)); cdma_timeout_handler()
287 host1x_cdma_update_sync_queue(cdma, ch->dev); cdma_timeout_handler()
H A Dchannel_hw.c92 struct host1x_channel *ch = job->channel; channel_submit() local
99 struct host1x *host = dev_get_drvdata(ch->dev->parent); channel_submit()
102 trace_host1x_channel_submit(dev_name(ch->dev), channel_submit()
111 err = mutex_lock_interruptible(&ch->submitlock); channel_submit()
117 mutex_unlock(&ch->submitlock); channel_submit()
123 err = host1x_cdma_begin(&ch->cdma, job); channel_submit()
125 mutex_unlock(&ch->submitlock); channel_submit()
134 host1x_cdma_push(&ch->cdma, channel_submit()
151 host1x_cdma_push(&ch->cdma, channel_submit()
158 host1x_cdma_end(&ch->cdma, job); channel_submit()
160 trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval); channel_submit()
164 HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch, channel_submit()
169 mutex_unlock(&ch->submitlock); channel_submit()
178 static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev, host1x_channel_init() argument
181 ch->id = index; host1x_channel_init()
182 mutex_init(&ch->reflock); host1x_channel_init()
183 mutex_init(&ch->submitlock); host1x_channel_init()
185 ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE; host1x_channel_init()
H A Ddebug_hw.c179 struct host1x_channel *ch, host1x_debug_show_channel_cdma()
182 struct host1x_cdma *cdma = &ch->cdma; host1x_debug_show_channel_cdma()
187 dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT); host1x_debug_show_channel_cdma()
188 dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET); host1x_debug_show_channel_cdma()
189 dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL); host1x_debug_show_channel_cdma()
190 cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id)); host1x_debug_show_channel_cdma()
191 cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id)); host1x_debug_show_channel_cdma()
193 host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev)); host1x_debug_show_channel_cdma()
196 !ch->cdma.push_buffer.mapped) { host1x_debug_show_channel_cdma()
233 struct host1x_channel *ch, host1x_debug_show_channel_fifo()
239 host1x_debug_output(o, "%d: fifo:\n", ch->id); host1x_debug_show_channel_fifo()
241 val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT); host1x_debug_show_channel_fifo()
250 HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id), host1x_debug_show_channel_fifo()
257 val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id)); host1x_debug_show_channel_fifo()
264 HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) | host1x_debug_show_channel_fifo()
178 host1x_debug_show_channel_cdma(struct host1x *host, struct host1x_channel *ch, struct output *o) host1x_debug_show_channel_cdma() argument
232 host1x_debug_show_channel_fifo(struct host1x *host, struct host1x_channel *ch, struct output *o) host1x_debug_show_channel_fifo() argument
/linux-4.1.27/arch/m68k/sun3/prom/
H A Dprintf.c26 char ch, *bptr; prom_printf() local
46 while((ch = *(bptr++)) != 0) { prom_printf()
47 if(ch == '\n') prom_printf()
50 prom_putchar(ch); prom_printf()
/linux-4.1.27/drivers/media/platform/davinci/
H A Dvpif_display.c53 static void vpif_calculate_offsets(struct channel_obj *ch);
54 static void vpif_config_addr(struct channel_obj *ch, int muxmode);
71 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); vpif_buffer_prepare() local
74 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_buffer_prepare()
114 struct channel_obj *ch = vb2_get_drv_priv(vq); vpif_buffer_queue_setup() local
115 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_buffer_queue_setup()
128 vpif_calculate_offsets(ch); vpif_buffer_queue_setup()
142 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); vpif_buffer_queue() local
146 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_buffer_queue()
163 struct channel_obj *ch = vb2_get_drv_priv(vq); vpif_start_streaming() local
164 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_start_streaming()
165 struct vpif_params *vpif = &ch->vpifparams; vpif_start_streaming()
173 ch->field_id = 0; vpif_start_streaming()
177 ret = vpif_config_data->set_clock(ch->vpifparams.std_info. vpif_start_streaming()
178 ycmux_mode, ch->vpifparams.std_info.hd_sd); vpif_start_streaming()
186 ret = vpif_set_video_params(vpif, ch->channel_id + 2); vpif_start_streaming()
191 vpif_config_addr(ch, ret); vpif_start_streaming()
210 channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; vpif_start_streaming()
211 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { vpif_start_streaming()
219 if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) { vpif_start_streaming()
248 struct channel_obj *ch = vb2_get_drv_priv(vq); vpif_stop_streaming() local
252 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_stop_streaming()
255 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { vpif_stop_streaming()
259 if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) { vpif_stop_streaming()
357 struct channel_obj *ch; vpif_channel_isr() local
366 ch = dev->dev[channel_id]; vpif_channel_isr()
368 common = &ch->common[i]; vpif_channel_isr()
371 if (1 == ch->vpifparams.std_info.frm_fmt) { vpif_channel_isr()
403 ch->field_id ^= 1; vpif_channel_isr()
405 fid = vpif_channel_getfid(ch->channel_id + 2); vpif_channel_isr()
407 if (fid != ch->field_id) { vpif_channel_isr()
410 ch->field_id = fid; vpif_channel_isr()
422 static int vpif_update_std_info(struct channel_obj *ch) vpif_update_std_info() argument
424 struct video_obj *vid_ch = &ch->video; vpif_update_std_info()
425 struct vpif_params *vpifparams = &ch->vpifparams; vpif_update_std_info()
450 static int vpif_update_resolution(struct channel_obj *ch) vpif_update_resolution() argument
452 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_update_resolution()
453 struct video_obj *vid_ch = &ch->video; vpif_update_resolution()
454 struct vpif_params *vpifparams = &ch->vpifparams; vpif_update_resolution()
461 if (vpif_update_std_info(ch)) vpif_update_resolution()
481 if (ch->vpifparams.std_info.frm_fmt) vpif_update_resolution()
493 static void vpif_calculate_offsets(struct channel_obj *ch) vpif_calculate_offsets() argument
495 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_calculate_offsets()
496 struct vpif_params *vpifparams = &ch->vpifparams; vpif_calculate_offsets()
498 struct video_obj *vid_ch = &ch->video; vpif_calculate_offsets()
502 if (ch->vpifparams.std_info.frm_fmt) vpif_calculate_offsets()
538 if (ch->vpifparams.std_info.frm_fmt == 1) { vpif_calculate_offsets()
551 ch->vpifparams.video_params.stdid = ch->vpifparams.std_info.stdid; vpif_calculate_offsets()
554 static void vpif_config_addr(struct channel_obj *ch, int muxmode) vpif_config_addr() argument
556 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_config_addr()
558 if (VPIF_CHANNEL3_VIDEO == ch->channel_id) { vpif_config_addr()
608 struct channel_obj *ch = video_get_drvdata(vdev); vpif_g_fmt_vid_out() local
609 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_g_fmt_vid_out()
615 if (vpif_update_resolution(ch)) vpif_g_fmt_vid_out()
625 struct channel_obj *ch = video_get_drvdata(vdev); vpif_try_fmt_vid_out() local
626 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_try_fmt_vid_out()
636 if (vpif_update_resolution(ch)) vpif_try_fmt_vid_out()
653 struct channel_obj *ch = video_get_drvdata(vdev); vpif_s_fmt_vid_out() local
654 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_s_fmt_vid_out()
677 struct channel_obj *ch = video_get_drvdata(vdev); vpif_s_std() local
678 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_s_std()
683 if (config->chan_config[ch->channel_id].outputs == NULL) vpif_s_std()
686 chan_cfg = &config->chan_config[ch->channel_id]; vpif_s_std()
687 output = chan_cfg->outputs[ch->output_idx].output; vpif_s_std()
699 ch->video.stdid = std_id; vpif_s_std()
700 memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); vpif_s_std()
702 if (vpif_update_resolution(ch)) vpif_s_std()
725 struct channel_obj *ch = video_get_drvdata(vdev); vpif_g_std() local
729 if (config->chan_config[ch->channel_id].outputs == NULL) vpif_g_std()
732 chan_cfg = &config->chan_config[ch->channel_id]; vpif_g_std()
733 output = chan_cfg->outputs[ch->output_idx].output; vpif_g_std()
737 *std = ch->video.stdid; vpif_g_std()
747 struct channel_obj *ch = video_get_drvdata(vdev); vpif_enum_output() local
750 chan_cfg = &config->chan_config[ch->channel_id]; vpif_enum_output()
799 * @ch - channel
805 struct channel_obj *ch, int index) vpif_set_output()
808 &vpif_cfg->chan_config[ch->channel_id]; vpif_set_output()
828 ch->output_idx = index; vpif_set_output()
829 ch->sd = sd; vpif_set_output()
832 ch->video_dev.tvnorms = chan_cfg->outputs[index].output.std; vpif_set_output()
840 struct channel_obj *ch = video_get_drvdata(vdev); vpif_s_output() local
842 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_s_output()
847 chan_cfg = &config->chan_config[ch->channel_id]; vpif_s_output()
852 return vpif_set_output(config, ch, i); vpif_s_output()
858 struct channel_obj *ch = video_get_drvdata(vdev); vpif_g_output() local
860 *i = ch->output_idx; vpif_g_output()
877 struct channel_obj *ch = video_get_drvdata(vdev); vpif_enum_dv_timings() local
882 if (config->chan_config[ch->channel_id].outputs == NULL) vpif_enum_dv_timings()
885 chan_cfg = &config->chan_config[ch->channel_id]; vpif_enum_dv_timings()
886 output = chan_cfg->outputs[ch->output_idx].output; vpif_enum_dv_timings()
892 ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings); vpif_enum_dv_timings()
909 struct channel_obj *ch = video_get_drvdata(vdev); vpif_s_dv_timings() local
910 struct vpif_params *vpifparams = &ch->vpifparams; vpif_s_dv_timings()
911 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_s_dv_timings()
913 struct video_obj *vid_ch = &ch->video; vpif_s_dv_timings()
919 if (config->chan_config[ch->channel_id].outputs == NULL) vpif_s_dv_timings()
922 chan_cfg = &config->chan_config[ch->channel_id]; vpif_s_dv_timings()
923 output = chan_cfg->outputs[ch->output_idx].output; vpif_s_dv_timings()
936 ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); vpif_s_dv_timings()
1013 struct channel_obj *ch = video_get_drvdata(vdev); vpif_g_dv_timings() local
1015 struct video_obj *vid_ch = &ch->video; vpif_g_dv_timings()
1018 if (config->chan_config[ch->channel_id].outputs == NULL) vpif_g_dv_timings()
1021 chan_cfg = &config->chan_config[ch->channel_id]; vpif_g_dv_timings()
1022 output = chan_cfg->outputs[ch->output_idx].output; vpif_g_dv_timings()
1136 struct channel_obj *ch; vpif_probe_complete() local
1141 ch = vpif_obj.dev[j]; vpif_probe_complete()
1144 common = &ch->common[k]; vpif_probe_complete()
1156 ch->initialized = 0; vpif_probe_complete()
1158 ch->sd = vpif_obj.sd[0]; vpif_probe_complete()
1159 ch->channel_id = j; vpif_probe_complete()
1161 memset(&ch->vpifparams, 0, sizeof(ch->vpifparams)); vpif_probe_complete()
1163 ch->common[VPIF_VIDEO_INDEX].fmt.type = vpif_probe_complete()
1167 err = vpif_set_output(vpif_obj.config, ch, 0); vpif_probe_complete()
1172 ch->video.stdid = V4L2_STD_525_60; vpif_probe_complete()
1173 memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); vpif_probe_complete()
1174 vpif_update_resolution(ch); vpif_probe_complete()
1180 q->drv_priv = ch; vpif_probe_complete()
1204 ch, &ch->video_dev); vpif_probe_complete()
1207 vdev = &ch->video_dev; vpif_probe_complete()
1216 video_set_drvdata(&ch->video_dev, ch); vpif_probe_complete()
1227 ch = vpif_obj.dev[k]; vpif_probe_complete()
1228 common = &ch->common[k]; vpif_probe_complete()
1230 video_unregister_device(&ch->video_dev); vpif_probe_complete()
1340 struct channel_obj *ch; vpif_remove() local
1349 ch = vpif_obj.dev[i]; vpif_remove()
1350 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_remove()
1353 video_unregister_device(&ch->video_dev); vpif_remove()
1364 struct channel_obj *ch; vpif_suspend() local
1369 ch = vpif_obj.dev[i]; vpif_suspend()
1370 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_suspend()
1377 if (ch->channel_id == VPIF_CHANNEL2_VIDEO) { vpif_suspend()
1381 if (ch->channel_id == VPIF_CHANNEL3_VIDEO || vpif_suspend()
1396 struct channel_obj *ch; vpif_resume() local
1401 ch = vpif_obj.dev[i]; vpif_resume()
1402 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_resume()
1409 if (ch->channel_id == VPIF_CHANNEL2_VIDEO) { vpif_resume()
1413 if (ch->channel_id == VPIF_CHANNEL3_VIDEO || vpif_resume()
804 vpif_set_output(struct vpif_display_config *vpif_cfg, struct channel_obj *ch, int index) vpif_set_output() argument
H A Dvpif_capture.c52 static void vpif_calculate_offsets(struct channel_obj *ch);
53 static void vpif_config_addr(struct channel_obj *ch, int muxmode);
76 struct channel_obj *ch = vb2_get_drv_priv(q); vpif_buffer_prepare() local
82 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_buffer_prepare()
119 struct channel_obj *ch = vb2_get_drv_priv(vq); vpif_buffer_queue_setup() local
122 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_buffer_queue_setup()
137 vpif_calculate_offsets(ch); vpif_buffer_queue_setup()
148 struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); vpif_buffer_queue() local
153 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_buffer_queue()
172 struct channel_obj *ch = vb2_get_drv_priv(vq); vpif_start_streaming() local
173 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_start_streaming()
174 struct vpif_params *vpif = &ch->vpifparams; vpif_start_streaming()
182 ch->field_id = 0; vpif_start_streaming()
194 ret = v4l2_subdev_call(ch->sd, video, s_stream, 1); vpif_start_streaming()
201 ret = vpif_set_video_params(vpif, ch->channel_id); vpif_start_streaming()
208 vpif_config_addr(ch, ret); vpif_start_streaming()
228 channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; vpif_start_streaming()
229 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { vpif_start_streaming()
234 if (VPIF_CHANNEL1_VIDEO == ch->channel_id || vpif_start_streaming()
262 struct channel_obj *ch = vb2_get_drv_priv(vq); vpif_stop_streaming() local
267 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_stop_streaming()
270 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { vpif_stop_streaming()
274 if (VPIF_CHANNEL1_VIDEO == ch->channel_id || vpif_stop_streaming()
282 ret = v4l2_subdev_call(ch->sd, video, s_stream, 0); vpif_stop_streaming()
374 struct channel_obj *ch; vpif_channel_isr() local
382 ch = dev->dev[channel_id]; vpif_channel_isr()
385 common = &ch->common[i]; vpif_channel_isr()
388 if (1 == ch->vpifparams.std_info.frm_fmt) { vpif_channel_isr()
416 ch->field_id ^= 1; vpif_channel_isr()
418 fid = vpif_channel_getfid(ch->channel_id); vpif_channel_isr()
419 if (fid != ch->field_id) { vpif_channel_isr()
425 ch->field_id = fid; vpif_channel_isr()
456 * @ch: ptr to channel object
461 static int vpif_update_std_info(struct channel_obj *ch) vpif_update_std_info() argument
463 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_update_std_info()
464 struct vpif_params *vpifparams = &ch->vpifparams; vpif_update_std_info()
467 struct video_obj *vid_ch = &ch->video; vpif_update_std_info()
508 if (ch->vpifparams.std_info.frm_fmt) vpif_update_std_info()
513 if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) vpif_update_std_info()
525 * @ch : ptr to channel object
530 static void vpif_calculate_offsets(struct channel_obj *ch) vpif_calculate_offsets() argument
533 struct video_obj *vid_ch = &(ch->video); vpif_calculate_offsets()
534 struct vpif_params *vpifparams = &ch->vpifparams; vpif_calculate_offsets()
535 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_calculate_offsets()
591 ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid; vpif_calculate_offsets()
607 * @ch - channel ptr
610 static void vpif_config_addr(struct channel_obj *ch, int muxmode) vpif_config_addr() argument
616 common = &(ch->common[VPIF_VIDEO_INDEX]); vpif_config_addr()
618 if (VPIF_CHANNEL1_VIDEO == ch->channel_id) vpif_config_addr()
663 * @ch - channel
670 struct channel_obj *ch, vpif_set_input()
674 &vpif_cfg->chan_config[ch->channel_id]; vpif_set_input()
689 ret = vpif_cfg->setup_input_path(ch->channel_id, vpif_set_input()
709 ch->input_idx = index; vpif_set_input()
710 ch->sd = sd; vpif_set_input()
712 ch->vpifparams.iface = chan_cfg->vpif_if; vpif_set_input()
715 ch->video_dev.tvnorms = chan_cfg->inputs[index].input.std; vpif_set_input()
730 struct channel_obj *ch = video_get_drvdata(vdev); vpif_querystd() local
736 ret = v4l2_subdev_call(ch->sd, video, querystd, std_id); vpif_querystd()
758 struct channel_obj *ch = video_get_drvdata(vdev); vpif_g_std() local
764 if (config->chan_config[ch->channel_id].inputs == NULL) vpif_g_std()
767 chan_cfg = &config->chan_config[ch->channel_id]; vpif_g_std()
768 input = chan_cfg->inputs[ch->input_idx].input; vpif_g_std()
772 *std = ch->video.stdid; vpif_g_std()
786 struct channel_obj *ch = video_get_drvdata(vdev); vpif_s_std() local
787 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_s_std()
794 if (config->chan_config[ch->channel_id].inputs == NULL) vpif_s_std()
797 chan_cfg = &config->chan_config[ch->channel_id]; vpif_s_std()
798 input = chan_cfg->inputs[ch->input_idx].input; vpif_s_std()
806 ch->video.stdid = std_id; vpif_s_std()
807 memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); vpif_s_std()
810 if (vpif_update_std_info(ch)) { vpif_s_std()
816 ret = v4l2_subdev_call(ch->sd, video, s_std, std_id); vpif_s_std()
836 struct channel_obj *ch = video_get_drvdata(vdev); vpif_enum_input() local
839 chan_cfg = &config->chan_config[ch->channel_id]; vpif_enum_input()
858 struct channel_obj *ch = video_get_drvdata(vdev); vpif_g_input() local
860 *index = ch->input_idx; vpif_g_input()
874 struct channel_obj *ch = video_get_drvdata(vdev); vpif_s_input() local
875 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_s_input()
878 chan_cfg = &config->chan_config[ch->channel_id]; vpif_s_input()
886 return vpif_set_input(config, ch, index); vpif_s_input()
899 struct channel_obj *ch = video_get_drvdata(vdev); vpif_enum_fmt_vid_cap() local
907 if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) { vpif_enum_fmt_vid_cap()
929 struct channel_obj *ch = video_get_drvdata(vdev); vpif_try_fmt_vid_cap() local
931 struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); vpif_try_fmt_vid_cap()
932 struct vpif_params *vpif_params = &ch->vpifparams; vpif_try_fmt_vid_cap()
948 vpif_update_std_info(ch); vpif_try_fmt_vid_cap()
972 struct channel_obj *ch = video_get_drvdata(vdev); vpif_g_fmt_vid_cap() local
973 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_g_fmt_vid_cap()
994 struct channel_obj *ch = video_get_drvdata(vdev); vpif_s_fmt_vid_cap() local
995 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_s_fmt_vid_cap()
1045 struct channel_obj *ch = video_get_drvdata(vdev); vpif_enum_dv_timings() local
1050 if (config->chan_config[ch->channel_id].inputs == NULL) vpif_enum_dv_timings()
1053 chan_cfg = &config->chan_config[ch->channel_id]; vpif_enum_dv_timings()
1054 input = chan_cfg->inputs[ch->input_idx].input; vpif_enum_dv_timings()
1060 ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings); vpif_enum_dv_timings()
1079 struct channel_obj *ch = video_get_drvdata(vdev); vpif_query_dv_timings() local
1084 if (config->chan_config[ch->channel_id].inputs == NULL) vpif_query_dv_timings()
1087 chan_cfg = &config->chan_config[ch->channel_id]; vpif_query_dv_timings()
1088 input = chan_cfg->inputs[ch->input_idx].input; vpif_query_dv_timings()
1092 ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings); vpif_query_dv_timings()
1110 struct channel_obj *ch = video_get_drvdata(vdev); vpif_s_dv_timings() local
1111 struct vpif_params *vpifparams = &ch->vpifparams; vpif_s_dv_timings()
1113 struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; vpif_s_dv_timings()
1114 struct video_obj *vid_ch = &ch->video; vpif_s_dv_timings()
1120 if (config->chan_config[ch->channel_id].inputs == NULL) vpif_s_dv_timings()
1123 chan_cfg = &config->chan_config[ch->channel_id]; vpif_s_dv_timings()
1124 input = chan_cfg->inputs[ch->input_idx].input; vpif_s_dv_timings()
1137 ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); vpif_s_dv_timings()
1213 struct channel_obj *ch = video_get_drvdata(vdev); vpif_g_dv_timings() local
1214 struct video_obj *vid_ch = &ch->video; vpif_g_dv_timings()
1218 if (config->chan_config[ch->channel_id].inputs == NULL) vpif_g_dv_timings()
1221 chan_cfg = &config->chan_config[ch->channel_id]; vpif_g_dv_timings()
1222 input = chan_cfg->inputs[ch->input_idx].input; vpif_g_dv_timings()
1338 struct channel_obj *ch; vpif_probe_complete() local
1343 ch = vpif_obj.dev[j]; vpif_probe_complete()
1344 ch->channel_id = j; vpif_probe_complete()
1345 common = &(ch->common[VPIF_VIDEO_INDEX]); vpif_probe_complete()
1350 err = vpif_set_input(vpif_obj.config, ch, 0); vpif_probe_complete()
1355 ch->video.stdid = V4L2_STD_525_60; vpif_probe_complete()
1356 memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); vpif_probe_complete()
1357 vpif_update_std_info(ch); vpif_probe_complete()
1363 q->drv_priv = ch; vpif_probe_complete()
1387 vdev = &ch->video_dev; vpif_probe_complete()
1396 video_set_drvdata(&ch->video_dev, ch); vpif_probe_complete()
1409 ch = vpif_obj.dev[k]; vpif_probe_complete()
1410 common = &ch->common[k]; vpif_probe_complete()
1413 video_unregister_device(&ch->video_dev); vpif_probe_complete()
1534 struct channel_obj *ch; vpif_remove() local
1543 ch = vpif_obj.dev[i]; vpif_remove()
1544 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_remove()
1547 video_unregister_device(&ch->video_dev); vpif_remove()
1561 struct channel_obj *ch; vpif_suspend() local
1566 ch = vpif_obj.dev[i]; vpif_suspend()
1567 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_suspend()
1574 if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { vpif_suspend()
1578 if (ch->channel_id == VPIF_CHANNEL1_VIDEO || vpif_suspend()
1595 struct channel_obj *ch; vpif_resume() local
1600 ch = vpif_obj.dev[i]; vpif_resume()
1601 common = &ch->common[VPIF_VIDEO_INDEX]; vpif_resume()
1608 if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { vpif_resume()
1612 if (ch->channel_id == VPIF_CHANNEL1_VIDEO || vpif_resume()
668 vpif_set_input( struct vpif_capture_config *vpif_cfg, struct channel_obj *ch, int index) vpif_set_input() argument
/linux-4.1.27/drivers/isdn/sc/
H A Dshmem.c30 unsigned char ch; memcpy_toshmem() local
44 ch = (unsigned long) dest / SRAM_PAGESIZE; memcpy_toshmem()
45 pr_debug("%s: loaded page %d\n", sc_adapter[card]->devicename, ch); memcpy_toshmem()
51 outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, memcpy_toshmem()
56 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80); memcpy_toshmem()
69 unsigned char ch; memcpy_fromshmem() local
83 ch = (unsigned long) src / SRAM_PAGESIZE; memcpy_fromshmem()
84 pr_debug("%s: loaded page %d\n", sc_adapter[card]->devicename, ch); memcpy_fromshmem()
92 outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, memcpy_fromshmem()
98 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80); memcpy_fromshmem()
108 unsigned char ch;
122 ch = (unsigned long) dest / SRAM_PAGESIZE;
123 pr_debug("%s: loaded page %d\n", sc_adapter[card]->devicename, ch);
130 outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80,
135 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80);
/linux-4.1.27/sound/isa/sb/
H A Demu8000_pcm.c182 static inline int emu8k_get_curpos(struct snd_emu8k_pcm *rec, int ch) emu8k_get_curpos() argument
184 int val = EMU8000_CCCA_READ(rec->emu, ch) & 0xfffffff; emu8k_get_curpos()
185 val -= rec->loop_start[ch] - 1; emu8k_get_curpos()
280 static void setup_voice(struct snd_emu8k_pcm *rec, int ch) setup_voice() argument
286 EMU8000_DCYSUSV_WRITE(hw, ch, 0x0080); setup_voice()
287 EMU8000_VTFT_WRITE(hw, ch, 0x0000FFFF); setup_voice()
288 EMU8000_CVCF_WRITE(hw, ch, 0x0000FFFF); setup_voice()
289 EMU8000_PTRX_WRITE(hw, ch, 0); setup_voice()
290 EMU8000_CPF_WRITE(hw, ch, 0); setup_voice()
293 EMU8000_IP_WRITE(hw, ch, rec->pitch); setup_voice()
295 EMU8000_ENVVAL_WRITE(hw, ch, 0x8000); setup_voice()
296 EMU8000_ATKHLD_WRITE(hw, ch, 0x7f7f); setup_voice()
297 EMU8000_DCYSUS_WRITE(hw, ch, 0x7f7f); setup_voice()
298 EMU8000_ENVVOL_WRITE(hw, ch, 0x8000); setup_voice()
299 EMU8000_ATKHLDV_WRITE(hw, ch, 0x7f7f); setup_voice()
303 EMU8000_PEFE_WRITE(hw, ch, 0x0); setup_voice()
305 EMU8000_LFO1VAL_WRITE(hw, ch, 0x8000); setup_voice()
306 EMU8000_LFO2VAL_WRITE(hw, ch, 0x8000); setup_voice()
308 EMU8000_FMMOD_WRITE(hw, ch, 0); setup_voice()
310 EMU8000_TREMFRQ_WRITE(hw, ch, 0); setup_voice()
312 EMU8000_FM2FRQ2_WRITE(hw, ch, 0); setup_voice()
314 temp = rec->panning[ch]; setup_voice()
315 temp = (temp <<24) | ((unsigned int)rec->loop_start[ch] - 1); setup_voice()
316 EMU8000_PSST_WRITE(hw, ch, temp); setup_voice()
319 temp = (temp << 24) | ((unsigned int)rec->loop_start[ch] + rec->buf_size - 1); setup_voice()
320 EMU8000_CSL_WRITE(hw, ch, temp); setup_voice()
323 temp = (temp << 28) | ((unsigned int)rec->loop_start[ch] - 1); setup_voice()
324 EMU8000_CCCA_WRITE(hw, ch, temp); setup_voice()
326 EMU8000_00A0_WRITE(hw, ch, 0); setup_voice()
327 EMU8000_0080_WRITE(hw, ch, 0); setup_voice()
333 static void start_voice(struct snd_emu8k_pcm *rec, int ch) start_voice() argument
341 EMU8000_IFATN_WRITE(hw, ch, 0xff00); start_voice()
342 EMU8000_VTFT_WRITE(hw, ch, 0xffff); start_voice()
343 EMU8000_CVCF_WRITE(hw, ch, 0xffff); start_voice()
345 EMU8000_DCYSUSV_WRITE(hw, ch, 0x7f7f); start_voice()
348 if (rec->panning[ch] == 0) start_voice()
351 aux = (-rec->panning[ch]) & 0xff; start_voice()
353 EMU8000_PTRX_WRITE(hw, ch, temp); start_voice()
354 EMU8000_CPF_WRITE(hw, ch, pt << 16); start_voice()
368 static void stop_voice(struct snd_emu8k_pcm *rec, int ch) stop_voice() argument
373 EMU8000_DCYSUSV_WRITE(hw, ch, 0x807F); stop_voice()
387 int ch; emu8k_pcm_trigger() local
391 for (ch = 0; ch < rec->voices; ch++) emu8k_pcm_trigger()
392 start_voice(rec, ch); emu8k_pcm_trigger()
397 for (ch = 0; ch < rec->voices; ch++) emu8k_pcm_trigger()
398 stop_voice(rec, ch); emu8k_pcm_trigger()
598 int ch; emu8k_pcm_hw_free() local
599 for (ch = 0; ch < rec->voices; ch++) emu8k_pcm_hw_free()
600 stop_voice(rec, ch); // to be sure emu8k_pcm_hw_free()
632 int err, i, ch; emu8k_pcm_prepare() local
644 for (ch = 0; ch < rec->voices; ch++) { emu8k_pcm_prepare()
645 EMU8000_SMALW_WRITE(rec->emu, rec->loop_start[ch] + rec->buf_size); emu8k_pcm_prepare()
H A Demu8000_callback.c35 static void reset_voice(struct snd_emux *emu, int ch);
52 static void snd_emu8000_tweak_voice(struct snd_emu8000 *emu, int ch);
103 EMU8000_DCYSUS_WRITE(hw, vp->ch, dcysusv); release_voice()
105 EMU8000_DCYSUSV_WRITE(hw, vp->ch, dcysusv); release_voice()
117 EMU8000_DCYSUSV_WRITE(hw, vp->ch, 0x807F); terminate_voice()
153 * The channel index (vp->ch) must be initialized in this routine.
196 val = (EMU8000_CVCF_READ(hw, vp->ch) >> 16) & 0xffff; get_voice()
208 val = EMU8000_CCCA_READ(hw, vp->ch) & 0xffffff; get_voice()
222 vp->ch = best[i].voice; get_voice()
237 int ch; start_voice() local
243 ch = vp->ch; start_voice()
247 EMU8000_DCYSUSV_WRITE(hw, ch, 0x0080); start_voice()
248 EMU8000_VTFT_WRITE(hw, ch, 0x0000FFFF); start_voice()
249 EMU8000_CVCF_WRITE(hw, ch, 0x0000FFFF); start_voice()
250 EMU8000_PTRX_WRITE(hw, ch, 0); start_voice()
251 EMU8000_CPF_WRITE(hw, ch, 0); start_voice()
257 EMU8000_ENVVAL_WRITE(hw, ch, vp->reg.parm.moddelay); start_voice()
258 EMU8000_ATKHLD_WRITE(hw, ch, vp->reg.parm.modatkhld); start_voice()
259 EMU8000_DCYSUS_WRITE(hw, ch, vp->reg.parm.moddcysus); start_voice()
260 EMU8000_ENVVOL_WRITE(hw, ch, vp->reg.parm.voldelay); start_voice()
261 EMU8000_ATKHLDV_WRITE(hw, ch, vp->reg.parm.volatkhld); start_voice()
269 EMU8000_PEFE_WRITE(hw, ch, vp->reg.parm.pefe); start_voice()
272 EMU8000_LFO1VAL_WRITE(hw, ch, vp->reg.parm.lfo1delay); start_voice()
273 EMU8000_LFO2VAL_WRITE(hw, ch, vp->reg.parm.lfo2delay); start_voice()
290 EMU8000_CSL_WRITE(hw, ch, temp); start_voice()
296 EMU8000_CCCA_WRITE(hw, ch, temp); start_voice()
299 EMU8000_00A0_WRITE(hw, ch, 0); start_voice()
300 EMU8000_0080_WRITE(hw, ch, 0); start_voice()
304 EMU8000_VTFT_WRITE(hw, ch, temp | vp->ftarget); start_voice()
305 EMU8000_CVCF_WRITE(hw, ch, temp | 0xff00); start_voice()
316 int ch = vp->ch; trigger_voice() local
327 EMU8000_PTRX_WRITE(hw, ch, temp); trigger_voice()
328 EMU8000_CPF_WRITE(hw, ch, vp->ptarget << 16); trigger_voice()
329 EMU8000_DCYSUSV_WRITE(hw, ch, vp->reg.parm.voldcysus); trigger_voice()
336 reset_voice(struct snd_emux *emu, int ch) reset_voice() argument
341 EMU8000_DCYSUSV_WRITE(hw, ch, 0x807F); reset_voice()
342 snd_emu8000_tweak_voice(hw, ch); reset_voice()
351 EMU8000_IP_WRITE(hw, vp->ch, vp->apitch); set_pitch()
365 EMU8000_IFATN_WRITE(hw, vp->ch, ifatn); set_volume()
377 EMU8000_PSST_WRITE(hw, vp->ch, temp); set_pan()
396 EMU8000_FMMOD_WRITE(hw, vp->ch, fmmod); set_fmmod()
403 EMU8000_TREMFRQ_WRITE(hw, vp->ch, vp->reg.parm.tremfrq); set_tremfreq()
421 EMU8000_FM2FRQ2_WRITE(hw, vp->ch, fm2frq2); set_fm2frq2()
429 addr = EMU8000_CCCA_READ(hw, vp->ch) & 0xffffff; set_filterQ()
431 EMU8000_CCCA_WRITE(hw, vp->ch, addr); set_filterQ()
H A Demu8000.c110 snd_emu8000_dma_chan(struct snd_emu8000 *emu, int ch, int mode) snd_emu8000_dma_chan() argument
115 EMU8000_CCCA_WRITE(emu, ch, 0); snd_emu8000_dma_chan()
116 EMU8000_DCYSUSV_WRITE(emu, ch, 0x807F); snd_emu8000_dma_chan()
119 EMU8000_DCYSUSV_WRITE(emu, ch, 0x80); snd_emu8000_dma_chan()
120 EMU8000_VTFT_WRITE(emu, ch, 0); snd_emu8000_dma_chan()
121 EMU8000_CVCF_WRITE(emu, ch, 0); snd_emu8000_dma_chan()
122 EMU8000_PTRX_WRITE(emu, ch, 0x40000000); snd_emu8000_dma_chan()
123 EMU8000_CPF_WRITE(emu, ch, 0x40000000); snd_emu8000_dma_chan()
124 EMU8000_PSST_WRITE(emu, ch, 0); snd_emu8000_dma_chan()
125 EMU8000_CSL_WRITE(emu, ch, 0); snd_emu8000_dma_chan()
127 EMU8000_CCCA_WRITE(emu, ch, 0x06000000 | right_bit); snd_emu8000_dma_chan()
129 EMU8000_CCCA_WRITE(emu, ch, 0x04000000 | right_bit); snd_emu8000_dma_chan()
188 int ch; init_audio() local
191 for (ch = 0; ch < EMU8000_CHANNELS; ch++) init_audio()
192 EMU8000_DCYSUSV_WRITE(emu, ch, 0x80); init_audio()
195 for (ch = 0; ch < EMU8000_CHANNELS; ch++) { init_audio()
196 EMU8000_ENVVOL_WRITE(emu, ch, 0); init_audio()
197 EMU8000_ENVVAL_WRITE(emu, ch, 0); init_audio()
198 EMU8000_DCYSUS_WRITE(emu, ch, 0); init_audio()
199 EMU8000_ATKHLDV_WRITE(emu, ch, 0); init_audio()
200 EMU8000_LFO1VAL_WRITE(emu, ch, 0); init_audio()
201 EMU8000_ATKHLD_WRITE(emu, ch, 0); init_audio()
202 EMU8000_LFO2VAL_WRITE(emu, ch, 0); init_audio()
203 EMU8000_IP_WRITE(emu, ch, 0); init_audio()
204 EMU8000_IFATN_WRITE(emu, ch, 0); init_audio()
205 EMU8000_PEFE_WRITE(emu, ch, 0); init_audio()
206 EMU8000_FMMOD_WRITE(emu, ch, 0); init_audio()
207 EMU8000_TREMFRQ_WRITE(emu, ch, 0); init_audio()
208 EMU8000_FM2FRQ2_WRITE(emu, ch, 0); init_audio()
209 EMU8000_PTRX_WRITE(emu, ch, 0); init_audio()
210 EMU8000_VTFT_WRITE(emu, ch, 0); init_audio()
211 EMU8000_PSST_WRITE(emu, ch, 0); init_audio()
212 EMU8000_CSL_WRITE(emu, ch, 0); init_audio()
213 EMU8000_CCCA_WRITE(emu, ch, 0); init_audio()
216 for (ch = 0; ch < EMU8000_CHANNELS; ch++) { init_audio()
217 EMU8000_CPF_WRITE(emu, ch, 0); init_audio()
218 EMU8000_CVCF_WRITE(emu, ch, 0); init_audio()
/linux-4.1.27/arch/mips/include/asm/txx9/
H A Dtx4938.h31 #define TX4938_DMA_REG(ch) (TX4938_REG_BASE + 0xb000 + (ch) * 0x800)
35 #define TX4938_TMR_REG(ch) ((TX4938_REG_BASE + 0xf000) + (ch) * 0x100)
37 #define TX4938_SIO_REG(ch) ((TX4938_REG_BASE + 0xf300) + (ch) * 0x100)
73 #define TX4938_IR_DMA(ch, n) ((ch ? 27 : 10) + (n)) /* 10-13, 27-30 */
147 #define TX4938_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
149 #define TX4938_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
200 #define TX4938_DMA_MCR_EIS(ch) (0x10000000<<(ch))
201 #define TX4938_DMA_MCR_DIS(ch) (0x01000000<<(ch))
203 #define TX4938_DMA_MCR_FIFUM(ch) (0x00000008<<(ch))
270 #define TX4938_SDRAMC_CR(ch) TX4927_SDRAMC_CR(ch)
271 #define TX4938_SDRAMC_BA(ch) TX4927_SDRAMC_BA(ch)
272 #define TX4938_SDRAMC_SIZE(ch) TX4927_SDRAMC_SIZE(ch)
274 #define TX4938_EBUSC_CR(ch) TX4927_EBUSC_CR(ch)
275 #define TX4938_EBUSC_BA(ch) TX4927_EBUSC_BA(ch)
276 #define TX4938_EBUSC_SIZE(ch) TX4927_EBUSC_SIZE(ch)
277 #define TX4938_EBUSC_WIDTH(ch) TX4927_EBUSC_WIDTH(ch)
293 void tx4938_mtd_init(int ch);
H A Dtx3927.h20 #define TX3927_TMR_REG(ch) (TX3927_REG_BASE + 0xf000 + (ch) * 0x100)
22 #define TX3927_SIO_REG(ch) (TX3927_REG_BASE + 0xf300 + (ch) * 0x100)
46 } ch[4]; member in struct:tx3927_dma_reg
162 #define TX3927_DMA_MCR_EIS(ch) (0x10000000<<(ch))
163 #define TX3927_DMA_MCR_DIS(ch) (0x01000000<<(ch))
165 #define TX3927_DMA_MCR_FIFUM(ch) (0x00000008<<(ch))
221 #define TX3927_IR_SIO(ch) (6 + (ch))
225 #define TX3927_IR_TMR(ch) (13 + (ch))
298 #define TX3927_PCFG_SDRCLKEN(ch) (0x00400000<<(ch))
300 #define TX3927_PCFG_PCICLKEN(ch) (0x00040000<<(ch))
305 #define TX3927_PCFG_SELSIOC(ch) (0x00004000<<(ch))
307 #define TX3927_PCFG_SELSIO(ch) (0x00001000<<(ch))
309 #define TX3927_PCFG_SELTMR(ch) (0x00000200<<(ch))
312 #define TX3927_PCFG_INTDMA(ch) (0x00000010<<(ch))
314 #define TX3927_PCFG_SELDMA(ch) (0x00000001<<(ch))
321 #define tx3927_sioptr(ch) ((struct txx927_sio_reg *)TX3927_SIO_REG(ch))
325 #define TX3927_ROMC_BA(ch) (tx3927_romcptr->cr[(ch)] & 0xfff00000)
326 #define TX3927_ROMC_SIZE(ch) \
327 (0x00100000 << ((tx3927_romcptr->cr[(ch)] >> 8) & 0xf))
328 #define TX3927_ROMC_WIDTH(ch) (32 >> ((tx3927_romcptr->cr[(ch)] >> 7) & 0x1))
339 void tx3927_mtd_init(int ch);
H A Dtx4927.h49 #define TX4927_TMR_REG(ch) (TX4927_REG_BASE + 0xf000 + (ch) * 0x100)
51 #define TX4927_SIO_REG(ch) (TX4927_REG_BASE + 0xf300 + (ch) * 0x100)
142 #define TX4927_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
144 #define TX4927_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
203 #define TX4927_SDRAMC_CR(ch) __raw_readq(&tx4927_sdramcptr->cr[(ch)])
204 #define TX4927_SDRAMC_BA(ch) ((TX4927_SDRAMC_CR(ch) >> 49) << 21)
205 #define TX4927_SDRAMC_SIZE(ch) \
206 ((((TX4927_SDRAMC_CR(ch) >> 33) & 0x7fff) + 1) << 21)
208 #define TX4927_EBUSC_CR(ch) __raw_readq(&tx4927_ebuscptr->cr[(ch)])
209 #define TX4927_EBUSC_BA(ch) ((TX4927_EBUSC_CR(ch) >> 48) << 20)
210 #define TX4927_EBUSC_SIZE(ch) \
211 (0x00100000 << ((unsigned long)(TX4927_EBUSC_CR(ch) >> 8) & 0xf))
212 #define TX4927_EBUSC_WIDTH(ch) \
213 (64 >> ((__u32)(TX4927_EBUSC_CR(ch) >> 20) & 0x3))
269 void tx4927_mtd_init(int ch);
H A Dtx4939.h23 #define TX4939_ATA_REG(ch) (TX4939_REG_BASE + 0x3000 + (ch) * 0x1000)
31 #define TX4939_DMA_REG(ch) (TX4939_REG_BASE + 0xb000 + (ch) * 0x800)
36 #define TX4939_TMR_REG(ch) \
37 (TX4939_REG_BASE + 0xf000 + ((ch) + ((ch) >= 3) * 10) * 0x100)
39 #define TX4939_SIO_REG(ch) \
40 (TX4939_REG_BASE + 0xf300 + (((ch) & 1) << 8) + (((ch) & 2) << 6))
197 #define TX4939_IR_DMA(ch, n) (((ch) ? 22 : 10) + (n)) /* 10-13,22-25 */
295 #define TX4939_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
519 #define TX4939_EBUSC_CR(ch) TX4927_EBUSC_CR(ch)
520 #define TX4939_EBUSC_BA(ch) TX4927_EBUSC_BA(ch)
521 #define TX4939_EBUSC_SIZE(ch) TX4927_EBUSC_SIZE(ch)
522 #define TX4939_EBUSC_WIDTH(ch) \
523 (16 >> ((__u32)(TX4939_EBUSC_CR(ch) >> 20) & 0x1))
544 void tx4939_mtd_init(int ch);
H A Drbtx4939.h47 #define RBTX4939_7SEG_ADDR(s, ch) \
48 (IO_BASE + TXX9_CE(1) + 0x00006000 + (s) * 16 + ((ch) & 3) * 2)
72 #define RBTX4939_PE1_ATA(ch) (0x01 << (ch))
73 #define RBTX4939_PE1_RMII(ch) (0x04 << (ch))
111 #define rbtx4939_7seg_addr(s, ch) \
112 ((u8 __iomem *)RBTX4939_7SEG_ADDR(s, ch))
/linux-4.1.27/arch/mips/include/asm/mach-lantiq/xway/
H A Dxway_dma.h50 extern void ltq_dma_enable_irq(struct ltq_dma_channel *ch);
51 extern void ltq_dma_disable_irq(struct ltq_dma_channel *ch);
52 extern void ltq_dma_ack_irq(struct ltq_dma_channel *ch);
53 extern void ltq_dma_open(struct ltq_dma_channel *ch);
54 extern void ltq_dma_close(struct ltq_dma_channel *ch);
55 extern void ltq_dma_alloc_tx(struct ltq_dma_channel *ch);
56 extern void ltq_dma_alloc_rx(struct ltq_dma_channel *ch);
57 extern void ltq_dma_free(struct ltq_dma_channel *ch);
/linux-4.1.27/arch/mips/lantiq/xway/
H A Ddma.c64 ltq_dma_enable_irq(struct ltq_dma_channel *ch) ltq_dma_enable_irq() argument
69 ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_enable_irq()
70 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ltq_dma_enable_irq()
76 ltq_dma_disable_irq(struct ltq_dma_channel *ch) ltq_dma_disable_irq() argument
81 ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_disable_irq()
82 ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); ltq_dma_disable_irq()
88 ltq_dma_ack_irq(struct ltq_dma_channel *ch) ltq_dma_ack_irq() argument
93 ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_ack_irq()
100 ltq_dma_open(struct ltq_dma_channel *ch) ltq_dma_open() argument
105 ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_open()
107 ltq_dma_enable_irq(ch); ltq_dma_open()
113 ltq_dma_close(struct ltq_dma_channel *ch) ltq_dma_close() argument
118 ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_close()
120 ltq_dma_disable_irq(ch); ltq_dma_close()
126 ltq_dma_alloc(struct ltq_dma_channel *ch) ltq_dma_alloc() argument
130 ch->desc = 0; ltq_dma_alloc()
131 ch->desc_base = dma_alloc_coherent(NULL, ltq_dma_alloc()
133 &ch->phys, GFP_ATOMIC); ltq_dma_alloc()
134 memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); ltq_dma_alloc()
137 ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_alloc()
138 ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); ltq_dma_alloc()
149 ltq_dma_alloc_tx(struct ltq_dma_channel *ch) ltq_dma_alloc_tx() argument
153 ltq_dma_alloc(ch); ltq_dma_alloc_tx()
157 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ltq_dma_alloc_tx()
164 ltq_dma_alloc_rx(struct ltq_dma_channel *ch) ltq_dma_alloc_rx() argument
168 ltq_dma_alloc(ch); ltq_dma_alloc_rx()
172 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ltq_dma_alloc_rx()
179 ltq_dma_free(struct ltq_dma_channel *ch) ltq_dma_free() argument
181 if (!ch->desc_base) ltq_dma_free()
183 ltq_dma_close(ch); ltq_dma_free()
185 ch->desc_base, ch->phys); ltq_dma_free()
/linux-4.1.27/arch/sh/kernel/
H A Dprocess_64.c40 unsigned long long ah, al, bh, bl, ch, cl; show_regs() local
49 ch = (regs->regs[15]) >> 32; show_regs()
52 ah, al, bh, bl, ch, cl); show_regs()
60 asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch)); show_regs()
62 ch = (ch) >> 32; show_regs()
65 ah, al, bh, bl, ch, cl); show_regs()
71 ch = (regs->regs[2]) >> 32; show_regs()
74 ah, al, bh, bl, ch, cl); show_regs()
80 ch = (regs->regs[5]) >> 32; show_regs()
83 ah, al, bh, bl, ch, cl); show_regs()
89 ch = (regs->regs[8]) >> 32; show_regs()
92 ah, al, bh, bl, ch, cl); show_regs()
98 ch = (regs->regs[11]) >> 32; show_regs()
101 ah, al, bh, bl, ch, cl); show_regs()
107 ch = (regs->regs[14]) >> 32; show_regs()
110 ah, al, bh, bl, ch, cl); show_regs()
116 ch = (regs->regs[19]) >> 32; show_regs()
119 ah, al, bh, bl, ch, cl); show_regs()
125 ch = (regs->regs[22]) >> 32; show_regs()
128 ah, al, bh, bl, ch, cl); show_regs()
134 ch = (regs->regs[25]) >> 32; show_regs()
137 ah, al, bh, bl, ch, cl); show_regs()
143 ch = (regs->regs[28]) >> 32; show_regs()
146 ah, al, bh, bl, ch, cl); show_regs()
152 ch = (regs->regs[31]) >> 32; show_regs()
155 ah, al, bh, bl, ch, cl); show_regs()
161 ch = (regs->regs[34]) >> 32; show_regs()
164 ah, al, bh, bl, ch, cl); show_regs()
170 ch = (regs->regs[37]) >> 32; show_regs()
173 ah, al, bh, bl, ch, cl); show_regs()
179 ch = (regs->regs[40]) >> 32; show_regs()
182 ah, al, bh, bl, ch, cl); show_regs()
188 ch = (regs->regs[43]) >> 32; show_regs()
191 ah, al, bh, bl, ch, cl); show_regs()
197 ch = (regs->regs[46]) >> 32; show_regs()
200 ah, al, bh, bl, ch, cl); show_regs()
206 ch = (regs->regs[49]) >> 32; show_regs()
209 ah, al, bh, bl, ch, cl); show_regs()
215 ch = (regs->regs[52]) >> 32; show_regs()
218 ah, al, bh, bl, ch, cl); show_regs()
224 ch = (regs->regs[55]) >> 32; show_regs()
227 ah, al, bh, bl, ch, cl); show_regs()
233 ch = (regs->regs[58]) >> 32; show_regs()
236 ah, al, bh, bl, ch, cl); show_regs()
242 ch = (regs->regs[61]) >> 32; show_regs()
245 ah, al, bh, bl, ch, cl); show_regs()
251 ch = (regs->tregs[1]) >> 32; show_regs()
254 ah, al, bh, bl, ch, cl); show_regs()
260 ch = (regs->tregs[4]) >> 32; show_regs()
263 ah, al, bh, bl, ch, cl); show_regs()
269 ch = (regs->tregs[7]) >> 32; show_regs()
272 ah, al, bh, bl, ch, cl); show_regs()
/linux-4.1.27/sound/core/seq/oss/
H A Dseq_oss_event.c40 static int note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, struct snd_seq_event *ev);
41 static int note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, struct snd_seq_event *ev);
42 static int set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev);
43 static int set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev);
286 note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, struct snd_seq_event *ev) note_on_event() argument
296 if (! info->ch || ch < 0 || ch >= info->nr_voices) { note_on_event()
298 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); note_on_event()
301 if (note == 255 && info->ch[ch].note >= 0) { note_on_event()
308 if (info->ch[ch].vel) note_on_event()
314 info->ch[ch].vel = vel; note_on_event()
315 return set_note_event(dp, dev, type, ch, info->ch[ch].note, vel, ev); note_on_event()
319 if (note != info->ch[ch].note && info->ch[ch].note >= 0) note_on_event()
321 set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEOFF, ch, info->ch[ch].note, 0, ev); note_on_event()
323 info->ch[ch].note = note; note_on_event()
324 info->ch[ch].vel = vel; note_on_event()
326 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); note_on_event()
331 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); note_on_event()
335 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_KEYPRESS, ch, note - 128, vel, ev); note_on_event()
337 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); note_on_event()
346 note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, struct snd_seq_event *ev) note_off_event() argument
356 if (! info->ch || ch < 0 || ch >= info->nr_voices) { note_off_event()
358 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); note_off_event()
361 if (info->ch[ch].note >= 0) { note_off_event()
362 note = info->ch[ch].note; note_off_event()
363 info->ch[ch].vel = 0; note_off_event()
364 info->ch[ch].note = -1; note_off_event()
365 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEOFF, ch, note, vel, ev); note_off_event()
372 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEOFF, ch, note, vel, ev); note_off_event()
382 set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev) set_note_event() argument
389 ev->data.note.channel = ch; set_note_event()
400 set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev) set_control_event() argument
407 ev->data.control.channel = ch; set_control_event()
/linux-4.1.27/fs/cifs/
H A Dasn1.c118 asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) asn1_octet_decode() argument
124 *ch = *(ctx->pointer)++; asn1_octet_decode()
132 unsigned char ch;
139 ch = *(ctx->pointer)++; /* ch has 0xa, ptr points to length octet */
140 if ((ch) == ASN1_ENUM) /* if ch value is ENUM, 0xa */
153 unsigned char ch; asn1_tag_decode() local
158 if (!asn1_octet_decode(ctx, &ch)) asn1_tag_decode()
161 *tag |= ch & 0x7F; asn1_tag_decode()
162 } while ((ch & 0x80) == 0x80); asn1_tag_decode()
170 unsigned char ch; asn1_id_decode() local
172 if (!asn1_octet_decode(ctx, &ch)) asn1_id_decode()
175 *cls = (ch & 0xC0) >> 6; asn1_id_decode()
176 *con = (ch & 0x20) >> 5; asn1_id_decode()
177 *tag = (ch & 0x1F); asn1_id_decode()
189 unsigned char ch, cnt; asn1_length_decode() local
191 if (!asn1_octet_decode(ctx, &ch)) asn1_length_decode()
194 if (ch == 0x80) asn1_length_decode()
199 if (ch < 0x80) asn1_length_decode()
200 *len = ch; asn1_length_decode()
202 cnt = (unsigned char) (ch & 0x7F); asn1_length_decode()
206 if (!asn1_octet_decode(ctx, &ch)) asn1_length_decode()
209 *len |= ch; asn1_length_decode()
250 unsigned char ch; asn1_eoc_decode() local
253 if (!asn1_octet_decode(ctx, &ch)) asn1_eoc_decode()
256 if (ch != 0x00) { asn1_eoc_decode()
261 if (!asn1_octet_decode(ctx, &ch)) asn1_eoc_decode()
264 if (ch != 0x00) { asn1_eoc_decode()
288 unsigned char ch;
291 if (!asn1_octet_decode(ctx, &ch))
294 *integer = (signed char) ch;
303 if (!asn1_octet_decode(ctx, &ch))
307 *integer |= ch;
316 unsigned char ch;
319 if (!asn1_octet_decode(ctx, &ch))
322 *integer = ch;
323 if (ch == 0)
334 if (!asn1_octet_decode(ctx, &ch))
338 *integer |= ch;
347 unsigned char ch;
350 if (!asn1_octet_decode(ctx, &ch))
353 *integer = ch;
354 if (ch == 0)
365 if (!asn1_octet_decode(ctx, &ch))
369 *integer |= ch;
403 unsigned char ch; asn1_subid_decode() local
408 if (!asn1_octet_decode(ctx, &ch)) asn1_subid_decode()
412 *subid |= ch & 0x7F; asn1_subid_decode()
413 } while ((ch & 0x80) == 0x80); asn1_subid_decode()
/linux-4.1.27/arch/mips/ath79/
H A Dearly_printk.c36 static void prom_putchar_ar71xx(unsigned char ch) prom_putchar_ar71xx() argument
41 __raw_writel(ch, base + UART_TX * 4); prom_putchar_ar71xx()
45 static void prom_putchar_ar933x(unsigned char ch) prom_putchar_ar933x() argument
51 __raw_writel(AR933X_UART_DATA_TX_CSR | ch, base + AR933X_UART_DATA_REG); prom_putchar_ar933x()
56 static void prom_putchar_dummy(unsigned char ch) prom_putchar_dummy() argument
95 void prom_putchar(unsigned char ch) prom_putchar() argument
100 _prom_putchar(ch); prom_putchar()
/linux-4.1.27/arch/frv/kernel/
H A Duaccess.c23 char *p, ch; strncpy_from_user() local
46 __get_user_asm(err, ch, src, "ub", "=r"); strncpy_from_user()
49 if (!ch) strncpy_from_user()
51 *p = ch; strncpy_from_user()
76 char ch; strnlen_user() local
89 __get_user_asm(err, ch, p, "ub", "=r"); strnlen_user()
92 if (!ch) strnlen_user()
H A Dgdb-stub.c214 static int hex(unsigned char ch);
220 * Convert ch from a hex digit to an int
222 static int hex(unsigned char ch) hex() argument
224 if (ch >= 'a' && ch <= 'f') hex()
225 return ch-'a'+10; hex()
226 if (ch >= '0' && ch <= '9') hex()
227 return ch-'0'; hex()
228 if (ch >= 'A' && ch <= 'F') hex()
229 return ch-'A'+10; hex()
271 unsigned char ch; gdbstub_recv_packet() local
277 gdbstub_rx_char(&ch, 0); gdbstub_recv_packet()
278 } while (ch != '$'); gdbstub_recv_packet()
287 ret = gdbstub_rx_char(&ch, 0); gdbstub_recv_packet()
291 if (ch == '#') gdbstub_recv_packet()
293 checksum += ch; gdbstub_recv_packet()
294 buffer[count] = ch; gdbstub_recv_packet()
311 ret = gdbstub_rx_char(&ch, 0); gdbstub_recv_packet()
314 xmitcsum = hex(ch) << 4; gdbstub_recv_packet()
316 ret = gdbstub_rx_char(&ch, 0); gdbstub_recv_packet()
319 xmitcsum |= hex(ch); gdbstub_recv_packet()
366 unsigned char ch; gdbstub_send_packet() local
376 while ((ch = buffer[count]) != 0) { gdbstub_send_packet()
377 gdbstub_tx_char(ch); gdbstub_send_packet()
378 checksum += ch; gdbstub_send_packet()
386 } while (gdbstub_rx_char(&ch,0), gdbstub_send_packet()
388 ch=='-' && (gdbstub_proto("### GDB Rx NAK\n"),0), gdbstub_send_packet()
389 ch!='-' && ch!='+' && (gdbstub_proto("### GDB Rx ??? %02x\n",ch),0), gdbstub_send_packet()
391 ch!='+' && ch!='$'); gdbstub_send_packet()
393 if (ch=='+') { gdbstub_send_packet()
399 gdbstub_rx_unget = ch; gdbstub_send_packet()
409 int count = 0, ch; hexToInt() local
413 ch = hex(**ptr); hexToInt()
414 if (ch < 0) hexToInt()
417 *_value = (*_value << 4) | ((uint8_t) ch & 0xf); hexToInt()
669 uint8_t ch[4] __attribute__((aligned(4))); mem2hex() local
672 if (!gdbstub_read_byte(mem,ch)) mem2hex()
674 buf = hex_byte_pack(buf, ch[0]); mem2hex()
680 if (!gdbstub_read_word(mem,(uint16_t *)ch)) mem2hex()
682 buf = hex_byte_pack(buf, ch[0]); mem2hex()
683 buf = hex_byte_pack(buf, ch[1]); mem2hex()
689 if (!gdbstub_read_dword(mem,(uint32_t *)ch)) mem2hex()
691 buf = hex_byte_pack(buf, ch[0]); mem2hex()
692 buf = hex_byte_pack(buf, ch[1]); mem2hex()
693 buf = hex_byte_pack(buf, ch[2]); mem2hex()
694 buf = hex_byte_pack(buf, ch[3]); mem2hex()
700 if (!gdbstub_read_word(mem,(uint16_t *)ch)) mem2hex()
702 buf = hex_byte_pack(buf, ch[0]); mem2hex()
703 buf = hex_byte_pack(buf, ch[1]); mem2hex()
709 if (!gdbstub_read_byte(mem,ch)) mem2hex()
711 buf = hex_byte_pack(buf, ch[0]); mem2hex()
731 } ch; hex2mem() local
734 ch.b[0] = hex(*buf++) << 4; hex2mem()
735 ch.b[0] |= hex(*buf++); hex2mem()
736 if (!gdbstub_write_byte(mem,ch.b[0])) hex2mem()
743 ch.b[0] = hex(*buf++) << 4; hex2mem()
744 ch.b[0] |= hex(*buf++); hex2mem()
745 ch.b[1] = hex(*buf++) << 4; hex2mem()
746 ch.b[1] |= hex(*buf++); hex2mem()
747 if (!gdbstub_write_word(mem,ch.w)) hex2mem()
754 ch.b[0] = hex(*buf++) << 4; hex2mem()
755 ch.b[0] |= hex(*buf++); hex2mem()
756 ch.b[1] = hex(*buf++) << 4; hex2mem()
757 ch.b[1] |= hex(*buf++); hex2mem()
758 ch.b[2] = hex(*buf++) << 4; hex2mem()
759 ch.b[2] |= hex(*buf++); hex2mem()
760 ch.b[3] = hex(*buf++) << 4; hex2mem()
761 ch.b[3] |= hex(*buf++); hex2mem()
762 if (!gdbstub_write_dword(mem,ch.l)) hex2mem()
769 ch.b[0] = hex(*buf++) << 4; hex2mem()
770 ch.b[0] |= hex(*buf++); hex2mem()
771 ch.b[1] = hex(*buf++) << 4; hex2mem()
772 ch.b[1] |= hex(*buf++); hex2mem()
773 if (!gdbstub_write_word(mem,ch.w)) hex2mem()
780 ch.b[0] = hex(*buf++) << 4; hex2mem()
781 ch.b[0] |= hex(*buf++); hex2mem()
782 if (!gdbstub_write_byte(mem,ch.b[0])) hex2mem()
2030 unsigned char ch; gdbstub_init() local
2049 do { gdbstub_rx_char(&ch, 0); } while (ch != '$'); gdbstub_init()
2050 do { gdbstub_rx_char(&ch, 0); } while (ch != '#'); gdbstub_init()
2051 do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat first csum byte */ gdbstub_init()
2052 do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat second csum byte */ gdbstub_init()
2097 unsigned char ch; gdbstub_exit() local
2105 while ((ch = output_buffer[count]) != 0) { gdbstub_exit()
2106 gdbstub_tx_char(ch); gdbstub_exit()
2107 checksum += ch; gdbstub_exit()
H A Dgdb-io.c142 u8 ch, st; gdbstub_rx_char() local
165 ch = gdbstub_rx_buffer[ix++]; gdbstub_rx_char()
177 gdbstub_io("### GDB Rx %02x (st=%02x) ###\n",ch,st); gdbstub_rx_char()
178 *_ch = ch & 0x7f; gdbstub_rx_char()
188 void gdbstub_tx_char(unsigned char ch) gdbstub_tx_char() argument
194 if (ch == 0x0a) { gdbstub_tx_char()
200 __UART(TX) = ch; gdbstub_tx_char()
/linux-4.1.27/drivers/gpio/
H A Dgpio-ml-ioh.c88 * @ch: Indicate GPIO channel
99 int ch; member in struct:ioh_gpio
113 reg_val = ioread32(&chip->reg->regs[chip->ch].po); ioh_gpio_set()
119 iowrite32(reg_val, &chip->reg->regs[chip->ch].po); ioh_gpio_set()
127 return ioread32(&chip->reg->regs[chip->ch].pi) & (1 << nr); ioh_gpio_get()
139 pm = ioread32(&chip->reg->regs[chip->ch].pm) & ioh_gpio_direction_output()
140 ((1 << num_ports[chip->ch]) - 1); ioh_gpio_direction_output()
142 iowrite32(pm, &chip->reg->regs[chip->ch].pm); ioh_gpio_direction_output()
144 reg_val = ioread32(&chip->reg->regs[chip->ch].po); ioh_gpio_direction_output()
149 iowrite32(reg_val, &chip->reg->regs[chip->ch].po); ioh_gpio_direction_output()
163 pm = ioread32(&chip->reg->regs[chip->ch].pm) & ioh_gpio_direction_input()
164 ((1 << num_ports[chip->ch]) - 1); ioh_gpio_direction_input()
166 iowrite32(pm, &chip->reg->regs[chip->ch].pm); ioh_gpio_direction_input()
182 ioread32(&chip->reg->regs[chip->ch].po); ioh_gpio_save_reg_conf()
184 ioread32(&chip->reg->regs[chip->ch].pm); ioh_gpio_save_reg_conf()
186 ioread32(&chip->reg->regs[chip->ch].ien); ioh_gpio_save_reg_conf()
188 ioread32(&chip->reg->regs[chip->ch].imask); ioh_gpio_save_reg_conf()
190 ioread32(&chip->reg->regs[chip->ch].im_0); ioh_gpio_save_reg_conf()
192 ioread32(&chip->reg->regs[chip->ch].im_1); ioh_gpio_save_reg_conf()
208 &chip->reg->regs[chip->ch].po); ioh_gpio_restore_reg_conf()
210 &chip->reg->regs[chip->ch].pm); ioh_gpio_restore_reg_conf()
212 &chip->reg->regs[chip->ch].ien); ioh_gpio_restore_reg_conf()
214 &chip->reg->regs[chip->ch].imask); ioh_gpio_restore_reg_conf()
216 &chip->reg->regs[chip->ch].im_0); ioh_gpio_restore_reg_conf()
218 &chip->reg->regs[chip->ch].im_1); ioh_gpio_restore_reg_conf()
255 int ch; ioh_irq_type() local
262 ch = irq - chip->irq_base; ioh_irq_type()
264 im_reg = &chip->reg->regs[chip->ch].im_0; ioh_irq_type()
265 im_pos = ch; ioh_irq_type()
267 im_reg = &chip->reg->regs[chip->ch].im_1; ioh_irq_type()
268 im_pos = ch - 8; ioh_irq_type()
270 dev_dbg(chip->dev, "%s:irq=%d type=%d ch=%d pos=%d type=%d\n", ioh_irq_type()
271 __func__, irq, type, ch, im_pos, type); ioh_irq_type()
304 iowrite32(BIT(ch), &chip->reg->regs[chip->ch].iclr); ioh_irq_type()
307 iowrite32(BIT(ch), &chip->reg->regs[chip->ch].imaskclr); ioh_irq_type()
310 ien = ioread32(&chip->reg->regs[chip->ch].ien); ioh_irq_type()
311 iowrite32(ien | BIT(ch), &chip->reg->regs[chip->ch].ien); ioh_irq_type()
324 &chip->reg->regs[chip->ch].imaskclr); ioh_irq_unmask()
333 &chip->reg->regs[chip->ch].imask); ioh_irq_mask()
344 ien = ioread32(&chip->reg->regs[chip->ch].ien); ioh_irq_disable()
346 iowrite32(ien, &chip->reg->regs[chip->ch].ien); ioh_irq_disable()
358 ien = ioread32(&chip->reg->regs[chip->ch].ien); ioh_irq_enable()
360 iowrite32(ien, &chip->reg->regs[chip->ch].ien); ioh_irq_enable()
379 &chip->reg->regs[chip->ch].iclr); ioh_gpio_handler()
450 chip->ch = i; ioh_gpio_probe()
/linux-4.1.27/drivers/staging/speakup/
H A Dkeyhelp.c64 u_char *kp, counters[MAXFUNCS], ch, ch1; build_key_data() local
95 while ((ch = *kp++)) { build_key_data()
102 key = (state_tbl[i] << 8) + ch; build_key_data()
145 int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key) spk_handle_help() argument
155 if (ch == SPACE) { spk_handle_help()
160 ch |= 32; /* lower case */ spk_handle_help()
161 if (ch < 'a' || ch > 'z') spk_handle_help()
163 if (letter_offsets[ch-'a'] == -1) { spk_handle_help()
164 synth_printf(spk_msg_get(MSG_NO_COMMAND), ch); spk_handle_help() local
168 cur_item = letter_offsets[ch-'a']; spk_handle_help()
170 if (ch == 0 spk_handle_help()
174 else if (ch == 3 && cur_item > 0) spk_handle_help()
179 && ch == SPEAKUP_HELP spk_handle_help()
193 if (ch == funcvals[i]) spk_handle_help()
200 if (ch == kp[i]) spk_handle_help()
H A Dbuffers.c62 void synth_buffer_add(char ch) synth_buffer_add() argument
75 *buff_in++ = ch; synth_buffer_add()
82 char ch; synth_buffer_getc() local
86 ch = *buff_out++; synth_buffer_getc()
89 return ch; synth_buffer_getc()
H A Dmain.c430 static void speak_char(u_char ch) speak_char() argument
432 char *cp = spk_characters[ch]; speak_char()
436 if (IS_CHAR(ch, B_CAP)) { speak_char()
440 synth_printf("%c", ch); speak_char()
441 if (IS_CHAR(ch, B_CAP)) speak_char()
450 if (IS_CHAR(ch, B_CAP)) { speak_char()
467 u16 ch = ' '; get_char() local
476 ch = inverse_translate(vc, c, 0); get_char()
479 return ch; get_char()
484 u_short ch; say_char() local
487 ch = get_char(vc, (u_short *) spk_pos, &spk_attr); say_char()
494 speak_char(ch & 0xff); say_char()
499 u_short ch; say_phonetic_char() local
502 ch = get_char(vc, (u_short *) spk_pos, &spk_attr); say_phonetic_char()
503 if (isascii(ch) && isalpha(ch)) { say_phonetic_char()
504 ch &= 0x1f; say_phonetic_char()
505 synth_printf("%s\n", phonetic[--ch]); say_phonetic_char()
507 if (IS_CHAR(ch, B_NUM)) say_phonetic_char()
509 speak_char(ch); say_phonetic_char()
548 char ch; get_word() local
553 ch = (char)get_char(vc, (u_short *) tmp_pos, &temp); get_word()
556 if (spk_say_word_ctl && ch == SPACE) { get_word()
561 && (ch == SPACE || ch == 0 || IS_WDLM(ch)) get_word()
568 ch = (char)get_char(vc, (u_short *) tmp_pos - 1, &temp); get_word()
569 if ((ch == SPACE || ch == 0 || IS_WDLM(ch)) get_word()
581 ch = (char)get_char(vc, (u_short *) tmp_pos, &temp); get_word()
582 if ((ch == SPACE) || ch == 0 get_word()
583 || (IS_WDLM(buf[cnt - 1]) && (ch > SPACE))) get_word()
585 buf[cnt++] = ch; get_word()
607 char ch; say_prev_word() local
636 ch = (char)get_char(vc, (u_short *) spk_pos, &temp); say_prev_word()
637 if (ch == SPACE || ch == 0) say_prev_word()
639 else if (IS_WDLM(ch)) say_prev_word()
660 char ch; say_next_word() local
669 ch = (char)get_char(vc, (u_short *) spk_pos, &temp); say_next_word()
670 if (ch == SPACE || ch == 0) say_next_word()
672 else if (IS_WDLM(ch)) say_next_word()
702 u_char ch; spell_word() local
706 while ((ch = (u_char) *cp)) { spell_word()
709 if (IS_CHAR(ch, B_CAP)) { spell_word()
722 && (isascii(ch) && isalpha(ch))) { spell_word()
723 ch &= 31; spell_word()
724 cp1 = phonetic[--ch]; spell_word()
726 cp1 = spk_characters[ch]; spell_word()
992 u_char ch; say_first_char() local
1002 ch = buf[i]; say_first_char()
1006 speak_char(ch); say_first_char()
1012 u_char ch; say_last_char() local
1019 ch = buf[--len]; say_last_char()
1023 speak_char(ch); say_last_char()
1037 u_short ch = get_char(vc, (u_short *) spk_pos, &tmp); say_char_num() local
1039 ch &= 0xff; say_char_num()
1040 synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch); say_char_num() local
1070 static u_char ch = '\0', old_ch = '\0'; spkup_write() local
1082 ch = (u_char) *in_buf++; spkup_write()
1083 char_type = spk_chartab[ch]; spkup_write()
1084 if (ch == old_ch && !(char_type & B_NUM)) { spkup_write()
1096 if (ch == spk_lastkey) { spkup_write()
1098 if (spk_key_echo == 1 && ch >= MINECHOCHAR) spkup_write()
1099 speak_char(ch); spkup_write()
1103 synth_printf("%c", ch); spkup_write()
1106 synth_printf("%c", ch); spkup_write()
1108 speak_char(ch); spkup_write()
1117 if (ch != old_ch) spkup_write()
1118 synth_printf("%c", ch); spkup_write()
1123 if (old_ch != ch) spkup_write()
1128 old_ch = ch; spkup_write()
1208 u_char ch, version, num_keys; spk_set_key_info() local
1227 ch = *cp1++; spk_set_key_info()
1228 if (ch >= SHIFT_TBL_SIZE) spk_set_key_info()
1230 spk_shift_table[ch] = i; spk_set_key_info()
1233 while ((ch = *cp1)) { spk_set_key_info()
1234 if (ch >= MAX_KEY) spk_set_key_info()
1236 spk_our_keys[ch] = cp1; spk_set_key_info()
1289 static int edit_bits(struct vc_data *vc, u_char type, u_char ch, u_short key) edit_bits() argument
1291 short mask = pb_edit->mask, ch_type = spk_chartab[ch]; edit_bits()
1293 if (type != KT_LATIN || (ch_type & B_NUM) || ch < SPACE) edit_bits()
1295 if (ch == SPACE) { edit_bits()
1302 spk_chartab[ch] ^= mask; edit_bits()
1303 speak_char(ch); edit_bits()
1305 (spk_chartab[ch] & mask) ? spk_msg_get(MSG_ON) : edit_bits()
1572 u16 ch; count_highlight_color() local
1583 ch = get_attributes(ptr); count_highlight_color()
1584 bg = (ch & 0x70) >> 4; count_highlight_color()
1894 static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key) handle_goto() argument
1901 if (type == KT_SPKUP && ch == SPEAKUP_GOTO) handle_goto()
1903 if (type == KT_LATIN && ch == '\n') handle_goto()
1907 if (ch == 8) { handle_goto()
1910 ch = goto_buf[--num]; handle_goto()
1912 spkup_write(&ch, 1); handle_goto()
1915 if (ch < '+' || ch > 'y') handle_goto()
1917 goto_buf[num++] = ch; handle_goto()
1919 spkup_write(&ch, 1); handle_goto()
1921 if ((ch == '+' || ch == '-') && num == 1) handle_goto()
1923 if (ch >= '0' && ch <= '9' && num < maxlen) handle_goto()
1927 if (ch < 'x' || ch > 'y') { handle_goto()
H A Dspeakup_dectlk.c158 static int is_indnum(u_char *ch) is_indnum() argument
160 if ((*ch >= '0') && (*ch <= '9')) { is_indnum()
161 *ch = *ch - '0'; is_indnum()
208 static u_char ch; do_catch_up() local
250 ch = synth_buffer_peek(); do_catch_up()
255 if (ch == '\n') do_catch_up()
256 ch = 0x0D; do_catch_up()
257 if (synth_full_val || !spk_serial_out(ch)) { do_catch_up()
265 if (ch == '[') do_catch_up()
267 else if (ch == ']') do_catch_up()
269 else if (ch <= SPACE) { do_catch_up()
286 last = ch; do_catch_up()
H A Dspeakup_dtlk.c174 static void spk_out(const char ch) spk_out() argument
183 outb_p(ch, speakup_info.port_tts); spk_out()
194 u_char ch; do_catch_up() local
229 ch = synth_buffer_getc(); do_catch_up()
231 if (ch == '\n') do_catch_up()
232 ch = PROCSPEECH; do_catch_up()
233 spk_out(ch); do_catch_up()
234 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { do_catch_up()
249 u_char ch; synth_immediate() local
251 while ((ch = (u_char)*buf)) { synth_immediate()
254 if (ch == '\n') synth_immediate()
255 ch = PROCSPEECH; synth_immediate()
256 spk_out(ch); synth_immediate()
271 u_char ch; synth_read_tts() local
275 ch = synth_status & 0x7f; synth_read_tts()
276 outb_p(ch, speakup_info.port_tts); synth_read_tts()
279 return (char) ch; synth_read_tts()
H A Dspeakup_acntpc.c154 u_char ch; synth_immediate() local
156 while ((ch = *buf)) { synth_immediate()
159 if (ch == '\n') synth_immediate()
160 ch = PROCSPEECH; synth_immediate()
168 outb_p(ch, speakup_info.port_tts); synth_immediate()
176 u_char ch; do_catch_up() local
223 ch = synth_buffer_getc(); do_catch_up()
225 if (ch == '\n') do_catch_up()
226 ch = PROCSPEECH; do_catch_up()
227 outb_p(ch, speakup_info.port_tts); do_catch_up()
228 if (time_after_eq(jiffies, jiff_max) && ch == SPACE) { do_catch_up()
H A Dspeakup_keypc.c157 u_char ch; synth_immediate() local
160 while ((ch = *buf)) { synth_immediate()
161 if (ch == '\n') synth_immediate()
162 ch = PROCSPEECH; synth_immediate()
169 outb_p(ch, synth_port); synth_immediate()
178 u_char ch; do_catch_up() local
226 ch = synth_buffer_getc(); do_catch_up()
228 if (ch == '\n') do_catch_up()
229 ch = PROCSPEECH; do_catch_up()
230 outb_p(ch, synth_port); do_catch_up()
232 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { do_catch_up()
H A Dspeakup_decpc.c338 static int dt_sendchar(char ch) dt_sendchar() argument
345 outb_p(ch, speakup_info.port_tts+4); dt_sendchar()
374 u_char ch; do_catch_up() local
402 ch = synth_buffer_peek(); do_catch_up()
406 if (ch == '\n') do_catch_up()
407 ch = 0x0D; do_catch_up()
408 if (dt_sendchar(ch)) { do_catch_up()
416 if (ch == '[') do_catch_up()
418 else if (ch == ']') do_catch_up()
420 else if (ch <= SPACE) { do_catch_up()
437 last = ch; do_catch_up()
438 ch = 0; do_catch_up()
446 u_char ch; synth_immediate() local
448 while ((ch = *buf)) { synth_immediate()
449 if (ch == '\n') synth_immediate()
450 ch = PROCSPEECH; synth_immediate()
451 if (dt_sendchar(ch)) synth_immediate()
H A Dspeakup_decext.c157 u_char ch; do_catch_up() local
186 ch = synth_buffer_peek(); do_catch_up()
190 if (ch == '\n') do_catch_up()
191 ch = 0x0D; do_catch_up()
192 if (synth_full() || !spk_serial_out(ch)) { do_catch_up()
200 if (ch == '[') do_catch_up()
202 else if (ch == ']') do_catch_up()
204 else if (ch <= SPACE) { do_catch_up()
221 last = ch; do_catch_up()
H A Dsynth.c84 u_char ch; spk_do_catch_up() local
115 ch = synth_buffer_peek(); spk_do_catch_up()
119 if (ch == '\n') spk_do_catch_up()
120 ch = synth->procspeech; spk_do_catch_up()
121 if (!spk_serial_out(ch)) { spk_do_catch_up()
125 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { spk_do_catch_up()
150 u_char ch; spk_synth_immediate() local
152 while ((ch = *buff)) { spk_synth_immediate()
153 if (ch == '\n') spk_synth_immediate()
154 ch = synth->procspeech; spk_synth_immediate()
156 outb(ch, speakup_info.port_tts); spk_synth_immediate()
/linux-4.1.27/arch/um/drivers/
H A Dssl.h10 extern void ssl_receive_char(int line, char ch);
/linux-4.1.27/arch/mips/ath25/
H A Dearly_printk.c18 unsigned char ch) prom_uart_wr()
20 __raw_writel(ch, base + 4 * reg); prom_uart_wr()
28 void prom_putchar(unsigned char ch) prom_putchar() argument
41 prom_uart_wr(base, UART_TX, ch); prom_putchar()
17 prom_uart_wr(void __iomem *base, unsigned reg, unsigned char ch) prom_uart_wr() argument
/linux-4.1.27/drivers/isdn/isdnloop/
H A Disdnloop.c51 * ch = channel number (0-based)
54 isdnloop_bchan_send(isdnloop_card *card, int ch) isdnloop_bchan_send() argument
56 isdnloop_card *rcard = card->rcard[ch]; isdnloop_bchan_send()
57 int rch = card->rch[ch], len, ack; isdnloop_bchan_send()
61 while (card->sndcount[ch]) { isdnloop_bchan_send()
62 skb = skb_dequeue(&card->bqueue[ch]); isdnloop_bchan_send()
65 card->sndcount[ch] -= len; isdnloop_bchan_send()
68 cmd.arg = ch; isdnloop_bchan_send()
80 card->sndcount[ch] = 0; isdnloop_bchan_send()
315 int ch; isdnloop_polldchan() local
339 ch = (card->imsg[1] - '0') - 1; isdnloop_polldchan()
341 isdnloop_parse_status(p, ch, card); isdnloop_polldchan()
466 * ch = channel: 0 = generic messages, 1 and 2 = D-channel messages.
471 isdnloop_fake(isdnloop_card *card, char *s, int ch) isdnloop_fake() argument
474 int len = strlen(s) + ((ch >= 0) ? 3 : 0); isdnloop_fake()
480 if (ch >= 0) isdnloop_fake()
481 sprintf(skb_put(skb, 3), "%02d;", ch); isdnloop_fake()
566 * ch = channel (0-based)
569 isdnloop_atimeout(isdnloop_card *card, int ch) isdnloop_atimeout() argument
576 isdnloop_fake(card->rcard[ch], "DDIS_I", card->rch[ch] + 1); isdnloop_atimeout()
577 card->rcard[ch]->rcard[card->rch[ch]] = NULL; isdnloop_atimeout()
578 card->rcard[ch] = NULL; isdnloop_atimeout()
580 isdnloop_fake(card, "DDIS_I", ch + 1); isdnloop_atimeout()
583 isdnloop_fake(card, buf, ch + 1); isdnloop_atimeout()
612 * ch = channel to watch for.
615 isdnloop_start_ctimer(isdnloop_card *card, int ch) isdnloop_start_ctimer() argument
620 init_timer(&card->c_timer[ch]); isdnloop_start_ctimer()
621 card->c_timer[ch].expires = jiffies + ISDNLOOP_TIMER_ALERTWAIT; isdnloop_start_ctimer()
622 if (ch) isdnloop_start_ctimer()
623 card->c_timer[ch].function = isdnloop_atimeout1; isdnloop_start_ctimer()
625 card->c_timer[ch].function = isdnloop_atimeout0; isdnloop_start_ctimer()
626 card->c_timer[ch].data = (unsigned long) card; isdnloop_start_ctimer()
627 add_timer(&card->c_timer[ch]); isdnloop_start_ctimer()
636 * ch = channel (0-based).
639 isdnloop_kill_ctimer(isdnloop_card *card, int ch) isdnloop_kill_ctimer() argument
644 del_timer(&card->c_timer[ch]); isdnloop_kill_ctimer()
670 int ch; isdnloop_try_call() local
678 for (ch = 0; ch < 2; ch++) { isdnloop_try_call()
680 if ((cc == card) && (ch == lch)) isdnloop_try_call()
690 e = cc->eazlist[ch]; isdnloop_try_call()
701 if (!(cc->rcard[ch])) { isdnloop_try_call()
703 if (!(si2bit[cmd->parm.setup.si1] & cc->sil[ch])) { isdnloop_try_call()
707 /* ch is idle, si and number matches */ isdnloop_try_call()
708 cc->rcard[ch] = card; isdnloop_try_call()
709 cc->rch[ch] = lch; isdnloop_try_call()
711 card->rch[lch] = ch; isdnloop_try_call()
717 if (ch == 1) isdnloop_try_call()
785 int ch; isdnloop_parse_cmd() local
791 ch = card->omsg[1] - '0'; isdnloop_parse_cmd()
792 if ((ch < 0) || (ch > 2)) { isdnloop_parse_cmd()
800 if (s->command && (ch != 0)) { isdnloop_parse_cmd()
813 if (card->rcard[ch - 1]) { isdnloop_parse_cmd()
814 isdnloop_fake(card->rcard[ch - 1], "BCON_I", isdnloop_parse_cmd()
815 card->rch[ch - 1] + 1); isdnloop_parse_cmd()
816 isdnloop_fake(card, "BCON_C", ch); isdnloop_parse_cmd()
821 if (card->rcard[ch - 1]) { isdnloop_parse_cmd()
822 isdnloop_fake(card->rcard[ch - 1], "BCON_C", isdnloop_parse_cmd()
823 card->rch[ch - 1] + 1); isdnloop_parse_cmd()
828 isdnloop_fake(card, "BDIS_C", ch); isdnloop_parse_cmd()
829 if (card->rcard[ch - 1]) { isdnloop_parse_cmd()
830 isdnloop_fake(card->rcard[ch - 1], "BDIS_I", isdnloop_parse_cmd()
831 card->rch[ch - 1] + 1); isdnloop_parse_cmd()
836 isdnloop_kill_ctimer(card, ch - 1); isdnloop_parse_cmd()
837 if (card->rcard[ch - 1]) { isdnloop_parse_cmd()
838 isdnloop_kill_ctimer(card->rcard[ch - 1], card->rch[ch - 1]); isdnloop_parse_cmd()
839 isdnloop_fake(card->rcard[ch - 1], "DCON_C", isdnloop_parse_cmd()
840 card->rch[ch - 1] + 1); isdnloop_parse_cmd()
841 isdnloop_fake(card, "DCON_C", ch); isdnloop_parse_cmd()
846 isdnloop_kill_ctimer(card, ch - 1); isdnloop_parse_cmd()
847 if (card->rcard[ch - 1]) { isdnloop_parse_cmd()
848 isdnloop_kill_ctimer(card->rcard[ch - 1], card->rch[ch - 1]); isdnloop_parse_cmd()
849 isdnloop_fake(card->rcard[ch - 1], "DDIS_I", isdnloop_parse_cmd()
850 card->rch[ch - 1] + 1); isdnloop_parse_cmd()
851 card->rcard[ch - 1] = NULL; isdnloop_parse_cmd()
853 isdnloop_fake(card, "DDIS_C", ch); isdnloop_parse_cmd()
865 switch (isdnloop_try_call(card, p, ch - 1, &cmd)) { isdnloop_parse_cmd()
873 isdnloop_vstphone(card->rcard[ch - 1], isdnloop_parse_cmd()
875 isdnloop_fake(card->rcard[ch - 1], buf, card->rch[ch - 1] + 1); isdnloop_parse_cmd()
879 isdnloop_start_ctimer(card, ch - 1); isdnloop_parse_cmd()
883 isdnloop_fake(card, "DDIS_I", ch); isdnloop_parse_cmd()
885 isdnloop_fake(card, buf, ch); isdnloop_parse_cmd()
889 isdnloop_fake(card, "DDIS_I", ch); isdnloop_parse_cmd()
891 isdnloop_fake(card, buf, ch); isdnloop_parse_cmd()
897 card->eazlist[ch - 1][0] = '\0'; isdnloop_parse_cmd()
904 strcpy(card->eazlist[ch - 1], p); isdnloop_parse_cmd()
908 sprintf(buf, "EAZ-LIST: %s", card->eazlist[ch - 1]); isdnloop_parse_cmd()
909 isdnloop_fake(card, buf, ch + 1); isdnloop_parse_cmd()
923 card->sil[ch - 1] |= si2bit[*p - '0']; isdnloop_parse_cmd()
934 if (card->sil[ch - 1] & (1 << i)) isdnloop_parse_cmd()
936 isdnloop_fake(card, buf, ch + 1); isdnloop_parse_cmd()
940 card->sil[ch - 1] = 0; isdnloop_parse_cmd()
/linux-4.1.27/drivers/usb/musb/
H A Dtusb6010_omap.c42 int ch; member in struct:tusb_omap_dma_ch
62 int ch; member in struct:tusb_omap_dma
118 int ch; tusb_omap_dma_cb() local
123 ch = chdat->ch; tusb_omap_dma_cb()
125 ch = tusb_dma->ch; tusb_omap_dma_cb()
130 dev_dbg(musb->controller, "ep%i %s dma callback ch: %i status: %x\n", tusb_omap_dma_cb()
132 ch, ch_status); tusb_omap_dma_cb()
143 dev_dbg(musb->controller, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n", tusb_omap_dma_cb()
144 chdat->tx ? "tx" : "rx", chdat->ch, tusb_omap_dma_cb()
223 int ch; tusb_omap_dma_program() local
251 dev_dbg(musb->controller, "Busy %s dma ch%i, not using: %08x\n", tusb_omap_dma_program()
252 chdat->tx ? "tx" : "rx", chdat->ch, tusb_omap_dma_program()
265 ch = chdat->ch; tusb_omap_dma_program()
273 if (tusb_dma->ch < 0) { tusb_omap_dma_program()
281 ch = tusb_dma->ch; tusb_omap_dma_program()
284 omap_set_dma_callback(ch, tusb_omap_dma_cb, channel); tusb_omap_dma_program()
313 dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n", tusb_omap_dma_program()
315 ch, dma_addr, chdat->transfer_len, len, tusb_omap_dma_program()
363 omap_set_dma_params(ch, &dma_params); tusb_omap_dma_program()
364 omap_set_dma_src_burst_mode(ch, src_burst); tusb_omap_dma_program()
365 omap_set_dma_dest_burst_mode(ch, dst_burst); tusb_omap_dma_program()
366 omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED); tusb_omap_dma_program()
390 omap_start_dma(ch); tusb_omap_dma_program()
417 if (tusb_dma->ch >= 0) { tusb_omap_dma_abort()
418 omap_stop_dma(tusb_dma->ch); tusb_omap_dma_abort()
419 omap_free_dma(tusb_dma->ch); tusb_omap_dma_abort()
420 tusb_dma->ch = -1; tusb_omap_dma_abort()
517 struct dma_channel *ch = dma_channel_pool[i]; tusb_omap_dma_allocate() local
518 if (ch->status == MUSB_DMA_STATUS_UNKNOWN) { tusb_omap_dma_allocate()
519 ch->status = MUSB_DMA_STATUS_FREE; tusb_omap_dma_allocate()
520 channel = ch; tusb_omap_dma_allocate()
521 chdat = ch->private_data; tusb_omap_dma_allocate()
555 tusb_omap_dma_cb, channel, &chdat->ch); tusb_omap_dma_allocate()
558 } else if (tusb_dma->ch == -1) { tusb_omap_dma_allocate()
564 tusb_omap_dma_cb, NULL, &tusb_dma->ch); tusb_omap_dma_allocate()
569 chdat->ch = -1; tusb_omap_dma_allocate()
575 chdat->ch >= 0 ? "dedicated" : "shared", tusb_omap_dma_allocate()
576 chdat->ch >= 0 ? chdat->ch : tusb_dma->ch, tusb_omap_dma_allocate()
598 dev_dbg(musb->controller, "ep%i ch%i\n", chdat->epnum, chdat->ch); tusb_omap_dma_release()
616 if (chdat->ch >= 0) { tusb_omap_dma_release()
617 omap_stop_dma(chdat->ch); tusb_omap_dma_release()
618 omap_free_dma(chdat->ch); tusb_omap_dma_release()
619 chdat->ch = -1; tusb_omap_dma_release()
635 struct dma_channel *ch = dma_channel_pool[i]; dma_controller_destroy() local
636 if (ch) { dma_controller_destroy()
637 kfree(ch->private_data); dma_controller_destroy()
638 kfree(ch); dma_controller_destroy()
642 if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0) dma_controller_destroy()
643 omap_free_dma(tusb_dma->ch); dma_controller_destroy()
671 tusb_dma->ch = -1; dma_controller_create()
684 struct dma_channel *ch; dma_controller_create() local
687 ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL); dma_controller_create()
688 if (!ch) dma_controller_create()
691 dma_channel_pool[i] = ch; dma_controller_create()
697 ch->status = MUSB_DMA_STATUS_UNKNOWN; dma_controller_create()
698 ch->private_data = chdat; dma_controller_create()
/linux-4.1.27/arch/arm/mach-shmobile/
H A Dsmp-r8a7779.c69 struct rcar_sysc_ch *ch = NULL; r8a7779_platform_cpu_kill() local
75 ch = r8a7779_ch_cpu[cpu]; r8a7779_platform_cpu_kill()
77 if (ch) r8a7779_platform_cpu_kill()
78 ret = rcar_sysc_power_down(ch); r8a7779_platform_cpu_kill()
85 struct rcar_sysc_ch *ch = NULL; r8a7779_boot_secondary() local
90 ch = r8a7779_ch_cpu[lcpu]; r8a7779_boot_secondary()
92 if (ch) r8a7779_boot_secondary()
93 ret = rcar_sysc_power_up(ch); r8a7779_boot_secondary()
H A Dsetup-r8a7778.c434 HPB_DMAE_CHANNEL(0x7c, HPBDMA_SLAVE_USBFUNC_TX), /* ch. 14 */
435 HPB_DMAE_CHANNEL(0x7c, HPBDMA_SLAVE_USBFUNC_RX), /* ch. 15 */
436 HPB_DMAE_CHANNEL(0x7e, HPBDMA_SLAVE_SDHI0_TX), /* ch. 21 */
437 HPB_DMAE_CHANNEL(0x7e, HPBDMA_SLAVE_SDHI0_RX), /* ch. 22 */
438 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI0_TX), /* ch. 28 */
439 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI0_RX), /* ch. 28 */
440 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF0_TX), /* ch. 28 */
441 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF0_RX), /* ch. 28 */
442 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI1_TX), /* ch. 29 */
443 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI1_RX), /* ch. 29 */
444 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF1_TX), /* ch. 29 */
445 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF1_RX), /* ch. 29 */
446 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI2_TX), /* ch. 30 */
447 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI2_RX), /* ch. 30 */
448 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF2_TX), /* ch. 30 */
449 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF2_RX), /* ch. 30 */
450 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI3_TX), /* ch. 31 */
451 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI3_RX), /* ch. 31 */
452 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF3_TX), /* ch. 31 */
453 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF3_RX), /* ch. 31 */
454 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI4_TX), /* ch. 32 */
455 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI4_RX), /* ch. 32 */
456 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF4_TX), /* ch. 32 */
457 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF4_RX), /* ch. 32 */
458 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI5_TX), /* ch. 33 */
459 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI5_RX), /* ch. 33 */
460 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF5_TX), /* ch. 33 */
461 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF5_RX), /* ch. 33 */
462 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI6_TX), /* ch. 34 */
463 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI6_RX), /* ch. 34 */
464 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF6_TX), /* ch. 34 */
465 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF6_RX), /* ch. 34 */
466 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI7_TX), /* ch. 35 */
467 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI7_RX), /* ch. 35 */
468 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF7_TX), /* ch. 35 */
469 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF7_RX), /* ch. 35 */
470 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI8_TX), /* ch. 36 */
471 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_SSI8_RX), /* ch. 36 */
472 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF8_TX), /* ch. 36 */
473 HPB_DMAE_CHANNEL(0x7f, HPBDMA_SLAVE_HPBIF8_RX), /* ch. 36 */
H A Dpm-r8a7779.c35 struct rcar_sysc_ch ch; member in struct:r8a7779_pm_domain
40 return &container_of(d, struct r8a7779_pm_domain, genpd)->ch; to_r8a7779_ch()
99 .ch = {
106 .ch = {
113 .ch = {
120 .ch = {
/linux-4.1.27/drivers/media/pci/solo6x10/
H A Dsolo6x10-tw28.h52 int tw28_set_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, s32 val);
53 int tw28_get_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, s32 *val);
54 bool tw28_has_sharpness(struct solo_dev *solo_dev, u8 ch);
56 u8 tw28_get_audio_gain(struct solo_dev *solo_dev, u8 ch);
57 void tw28_set_audio_gain(struct solo_dev *solo_dev, u8 ch, u8 val);
58 int tw28_get_video_status(struct solo_dev *solo_dev, u8 ch);
H A Dsolo6x10-regs.h208 #define SOLO_VI_MOSAIC(ch) (0x0140 + ((ch)*4))
214 #define SOLO_VI_WIN_CTRL0(ch) (0x0180 + ((ch)*4))
215 #define SOLO_VI_WIN_CTRL1(ch) (0x01C0 + ((ch)*4))
228 #define SOLO_VI_WIN_ON(ch) (0x0200 + ((ch)*4))
379 #define SOLO_CAP_CH_SCALE(ch) (0x0440+((ch)*4))
380 #define SOLO_CAP_CH_COMP_ENA_E(ch) (0x0480+((ch)*4))
381 #define SOLO_CAP_CH_INTV(ch) (0x04C0+((ch)*4))
382 #define SOLO_CAP_CH_INTV_E(ch) (0x0500+((ch)*4))
434 #define SOLO_VE_CH_INTL(ch) (0x0700+((ch)*4))
435 #define SOLO_VE_CH_MOT(ch) (0x0740+((ch)*4))
436 #define SOLO_VE_CH_QP(ch) (0x0780+((ch)*4))
437 #define SOLO_VE_CH_QP_E(ch) (0x07C0+((ch)*4))
438 #define SOLO_VE_CH_GOP(ch) (0x0800+((ch)*4))
439 #define SOLO_VE_CH_GOP_E(ch) (0x0840+((ch)*4))
440 #define SOLO_VE_CH_REF_BASE(ch) (0x0880+((ch)*4))
441 #define SOLO_VE_CH_REF_BASE_E(ch) (0x08C0+((ch)*4))
617 #define SOLO_AUDIO_EE_ENC_CH(ch) ((ch)<<25)
626 #define SOLO_AUDIO_EVOL(ch, value) ((value)<<((ch)%10))
H A Dsolo6x10-enc.c152 reg &= ~(1 << solo_enc->ch); solo_osd_print()
167 SOLO_EOSD_EXT_ADDR_CHAN(solo_dev, solo_enc->ch), solo_osd_print()
171 reg |= (1 << solo_enc->ch); solo_osd_print()
181 void solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch, solo_s_jpeg_qp() argument
187 if ((ch > 31) || (qp > 3)) solo_s_jpeg_qp()
193 if (ch < 16) { solo_s_jpeg_qp()
197 ch -= 16; solo_s_jpeg_qp()
201 ch *= 2; solo_s_jpeg_qp()
205 solo_dev->jpeg_qp[idx] &= ~(3 << ch); solo_s_jpeg_qp()
206 solo_dev->jpeg_qp[idx] |= (qp & 3) << ch; solo_s_jpeg_qp()
213 int solo_g_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch) solo_g_jpeg_qp() argument
220 if (WARN_ON_ONCE(ch > 31)) solo_g_jpeg_qp()
223 if (ch < 16) { solo_g_jpeg_qp()
226 ch -= 16; solo_g_jpeg_qp()
229 ch *= 2; solo_g_jpeg_qp()
231 return (solo_dev->jpeg_qp[idx] >> ch) & 3; solo_g_jpeg_qp()
H A Dsolo6x10-tw28.c375 int ch; tw2815_setup() local
471 for (ch = 0; ch < 4; ch++) { tw2815_setup()
473 switch (ch) { tw2815_setup()
492 dev_addr, (ch * 0x10) + i, tw2815_setup()
649 int tw28_get_video_status(struct solo_dev *solo_dev, u8 ch) tw28_get_video_status() argument
654 chip_num = ch / 4; tw28_get_video_status()
655 ch %= 4; tw28_get_video_status()
660 return val & (1 << ch) ? 1 : 0; tw28_get_video_status()
682 bool tw28_has_sharpness(struct solo_dev *solo_dev, u8 ch) tw28_has_sharpness() argument
684 return is_tw286x(solo_dev, ch / 4); tw28_has_sharpness()
687 int tw28_set_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, tw28_set_ctrl_val() argument
694 chip_num = ch / 4; tw28_set_ctrl_val()
695 ch %= 4; tw28_set_ctrl_val()
722 tw_writebyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch), tw28_set_ctrl_val()
723 TW_HUE_ADDR(ch), sval); tw28_set_ctrl_val()
732 TW286x_SATURATIONU_ADDR(ch), val); tw28_set_ctrl_val()
734 tw_writebyte(solo_dev, chip_num, TW286x_SATURATIONV_ADDR(ch), tw28_set_ctrl_val()
735 TW_SATURATION_ADDR(ch), val); tw28_set_ctrl_val()
740 tw_writebyte(solo_dev, chip_num, TW286x_CONTRAST_ADDR(ch), tw28_set_ctrl_val()
741 TW_CONTRAST_ADDR(ch), val); tw28_set_ctrl_val()
749 tw_writebyte(solo_dev, chip_num, TW286x_BRIGHTNESS_ADDR(ch), tw28_set_ctrl_val()
750 TW_BRIGHTNESS_ADDR(ch), sval); tw28_set_ctrl_val()
760 int tw28_get_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, tw28_get_ctrl_val() argument
766 chip_num = ch / 4; tw28_get_ctrl_val()
767 ch %= 4; tw28_get_ctrl_val()
781 rval = tw_readbyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch), tw28_get_ctrl_val()
782 TW_HUE_ADDR(ch)); tw28_get_ctrl_val()
790 TW286x_SATURATIONU_ADDR(ch), tw28_get_ctrl_val()
791 TW_SATURATION_ADDR(ch)); tw28_get_ctrl_val()
795 TW286x_CONTRAST_ADDR(ch), tw28_get_ctrl_val()
796 TW_CONTRAST_ADDR(ch)); tw28_get_ctrl_val()
800 TW286x_BRIGHTNESS_ADDR(ch), tw28_get_ctrl_val()
801 TW_BRIGHTNESS_ADDR(ch)); tw28_get_ctrl_val()
837 u8 tw28_get_audio_gain(struct solo_dev *solo_dev, u8 ch) tw28_get_audio_gain() argument
843 chip_num = ch / 4; tw28_get_audio_gain()
844 ch %= 4; tw28_get_audio_gain()
847 TW286x_AUDIO_INPUT_GAIN_ADDR(ch), tw28_get_audio_gain()
848 TW_AUDIO_INPUT_GAIN_ADDR(ch)); tw28_get_audio_gain()
850 return (ch % 2) ? (val >> 4) : (val & 0x0f); tw28_get_audio_gain()
853 void tw28_set_audio_gain(struct solo_dev *solo_dev, u8 ch, u8 val) tw28_set_audio_gain() argument
859 chip_num = ch / 4; tw28_set_audio_gain()
860 ch %= 4; tw28_set_audio_gain()
863 TW286x_AUDIO_INPUT_GAIN_ADDR(ch), tw28_set_audio_gain()
864 TW_AUDIO_INPUT_GAIN_ADDR(ch)); tw28_set_audio_gain()
866 val = (old_val & ((ch % 2) ? 0x0f : 0xf0)) | tw28_set_audio_gain()
867 ((ch % 2) ? (val << 4) : val); tw28_set_audio_gain()
869 tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), tw28_set_audio_gain()
870 TW_AUDIO_INPUT_GAIN_ADDR(ch), val); tw28_set_audio_gain()
H A Dsolo6x10-v4l2.c70 static void solo_win_setup(struct solo_dev *solo_dev, u8 ch, solo_win_setup() argument
73 if (ch >= solo_dev->nr_chans) solo_win_setup()
77 solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL0(ch), solo_win_setup()
78 SOLO_VI_WIN_CHANNEL(ch) | solo_win_setup()
83 solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(ch), solo_win_setup()
90 u8 ch = idx * 4; solo_v4l2_ch_ext_4up() local
92 if (ch >= solo_dev->nr_chans) solo_v4l2_ch_ext_4up()
98 for (i = ch; i < ch + 4; i++) solo_v4l2_ch_ext_4up()
107 solo_win_setup(solo_dev, ch, 0, 0, solo_dev->video_hsize / 2, solo_v4l2_ch_ext_4up()
109 solo_win_setup(solo_dev, ch + 1, solo_dev->video_hsize / 2, 0, solo_v4l2_ch_ext_4up()
112 solo_win_setup(solo_dev, ch + 2, 0, solo_vlines(solo_dev) / 2, solo_v4l2_ch_ext_4up()
114 solo_win_setup(solo_dev, ch + 3, solo_dev->video_hsize / 2, solo_v4l2_ch_ext_4up()
151 static int solo_v4l2_ch(struct solo_dev *solo_dev, u8 ch, int on) solo_v4l2_ch() argument
155 if (ch < solo_dev->nr_chans) { solo_v4l2_ch()
156 solo_win_setup(solo_dev, ch, on ? 0 : solo_dev->video_hsize, solo_v4l2_ch()
163 if (ch >= solo_dev->nr_chans + solo_dev->nr_ext) solo_v4l2_ch()
166 ext_ch = ch - solo_dev->nr_chans; solo_v4l2_ch()
176 static int solo_v4l2_set_ch(struct solo_dev *solo_dev, u8 ch) solo_v4l2_set_ch() argument
178 if (ch >= solo_dev->nr_chans + solo_dev->nr_ext) solo_v4l2_set_ch()
184 solo_v4l2_ch(solo_dev, ch, 1); solo_v4l2_set_ch()
186 solo_dev->cur_disp_ch = ch; solo_v4l2_set_ch()
H A Dsolo6x10-v4l2-enc.c106 return (solo_dev->motion_mask >> solo_enc->ch) & 1; solo_is_motion_on()
113 u32 ch_mask = 1 << solo_enc->ch; solo_motion_detected()
129 u32 mask = 1 << solo_enc->ch; solo_motion_toggle()
227 jpeg_dqt[solo_g_jpeg_qp(solo_dev, solo_enc->ch)], DQT_LEN); solo_update_mode()
232 u8 ch = solo_enc->ch; solo_enc_on() local
245 solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(ch), 1); solo_enc_on()
248 solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(ch), 0); solo_enc_on()
251 solo_reg_write(solo_dev, SOLO_VE_CH_INTL(ch), solo_enc_on()
260 solo_reg_write(solo_dev, SOLO_VE_CH_GOP(ch), solo_enc->gop); solo_enc_on()
261 solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp); solo_enc_on()
262 solo_reg_write(solo_dev, SOLO_CAP_CH_INTV(ch), interval); solo_enc_on()
265 solo_reg_write(solo_dev, SOLO_VE_CH_GOP_E(ch), solo_enc->gop); solo_enc_on()
266 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp); solo_enc_on()
267 solo_reg_write(solo_dev, SOLO_CAP_CH_INTV_E(ch), interval); solo_enc_on()
270 solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(ch), solo_enc->mode); solo_enc_on()
281 solo_reg_write(solo_dev, SOLO_CAP_CH_SCALE(solo_enc->ch), 0); solo_enc_off()
282 solo_reg_write(solo_dev, SOLO_CAP_CH_COMP_ENA_E(solo_enc->ch), 0); solo_enc_off()
590 u8 ch; solo_handle_ring() local
602 ch = (mpeg_current >> 24) & 0x1f; solo_handle_ring()
605 if (ch >= SOLO_MAX_CHANNELS) { solo_handle_ring()
606 ch -= SOLO_MAX_CHANNELS; solo_handle_ring()
611 solo_enc = solo_dev->v4l2_enc[ch]; solo_handle_ring()
614 "Got spurious packet for channel %d\n", ch); solo_handle_ring()
779 solo_enc->ch); solo_enc_querycap()
798 solo_enc->ch + 1); solo_enc_enum_input()
802 if (!tw28_get_video_status(solo_dev, solo_enc->ch)) solo_enc_enum_input()
1089 return tw28_set_ctrl_val(solo_dev, ctrl->id, solo_enc->ch, solo_s_ctrl()
1093 solo_reg_write(solo_dev, SOLO_VE_CH_GOP(solo_enc->ch), solo_enc->gop); solo_s_ctrl()
1094 solo_reg_write(solo_dev, SOLO_VE_CH_GOP_E(solo_enc->ch), solo_enc->gop); solo_s_ctrl()
1098 solo_reg_write(solo_dev, SOLO_VE_CH_QP(solo_enc->ch), solo_enc->qp); solo_s_ctrl()
1099 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(solo_enc->ch), solo_enc->qp); solo_s_ctrl()
1105 return solo_set_motion_threshold(solo_dev, solo_enc->ch, solo_s_ctrl()
1112 err = solo_set_motion_threshold(solo_dev, solo_enc->ch, solo_s_ctrl()
1115 err = solo_set_motion_block(solo_dev, solo_enc->ch, solo_s_ctrl()
1124 return solo_set_motion_block(solo_dev, solo_enc->ch, solo_s_ctrl()
1227 u8 ch, unsigned nr) solo_enc_alloc()
1252 if (tw28_has_sharpness(solo_dev, ch)) solo_enc_alloc()
1275 solo_enc->ch = ch; solo_enc_alloc()
1226 solo_enc_alloc(struct solo_dev *solo_dev, u8 ch, unsigned nr) solo_enc_alloc() argument
/linux-4.1.27/drivers/net/wireless/rtlwifi/
H A Dregd.c148 struct ieee80211_channel *ch; _rtl_reg_apply_beaconing_flags() local
159 ch = &sband->channels[i]; _rtl_reg_apply_beaconing_flags()
160 if (_rtl_is_radar_freq(ch->center_freq) || _rtl_reg_apply_beaconing_flags()
161 (ch->flags & IEEE80211_CHAN_RADAR)) _rtl_reg_apply_beaconing_flags()
165 ch->center_freq); _rtl_reg_apply_beaconing_flags()
178 ch->flags &= ~IEEE80211_CHAN_NO_IBSS; _rtl_reg_apply_beaconing_flags()
181 ch->flags &= _rtl_reg_apply_beaconing_flags()
184 if (ch->beacon_found) _rtl_reg_apply_beaconing_flags()
185 ch->flags &= ~(IEEE80211_CHAN_NO_IBSS | _rtl_reg_apply_beaconing_flags()
198 struct ieee80211_channel *ch; _rtl_reg_apply_active_scan_flags() local
210 ch = &sband->channels[11]; /* CH 12 */ _rtl_reg_apply_active_scan_flags()
211 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) _rtl_reg_apply_active_scan_flags()
212 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; _rtl_reg_apply_active_scan_flags()
213 ch = &sband->channels[12]; /* CH 13 */ _rtl_reg_apply_active_scan_flags()
214 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) _rtl_reg_apply_active_scan_flags()
215 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; _rtl_reg_apply_active_scan_flags()
226 ch = &sband->channels[11]; /* CH 12 */ _rtl_reg_apply_active_scan_flags()
227 reg_rule = freq_reg_info(wiphy, ch->center_freq); _rtl_reg_apply_active_scan_flags()
230 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) _rtl_reg_apply_active_scan_flags()
231 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; _rtl_reg_apply_active_scan_flags()
234 ch = &sband->channels[12]; /* CH 13 */ _rtl_reg_apply_active_scan_flags()
235 reg_rule = freq_reg_info(wiphy, ch->center_freq); _rtl_reg_apply_active_scan_flags()
238 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) _rtl_reg_apply_active_scan_flags()
239 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; _rtl_reg_apply_active_scan_flags()
250 struct ieee80211_channel *ch; _rtl_reg_apply_radar_flags() local
259 ch = &sband->channels[i]; _rtl_reg_apply_radar_flags()
260 if (!_rtl_is_radar_freq(ch->center_freq)) _rtl_reg_apply_radar_flags()
274 if (!(ch->flags & IEEE80211_CHAN_DISABLED)) _rtl_reg_apply_radar_flags()
275 ch->flags |= IEEE80211_CHAN_RADAR | _rtl_reg_apply_radar_flags()
294 struct ieee80211_channel *ch; _rtl_dump_channel_map() local
302 ch = &sband->channels[i]; _rtl_dump_channel_map()
/linux-4.1.27/drivers/gpu/host1x/
H A Ddebug.c45 static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo) show_channels() argument
47 struct host1x *m = dev_get_drvdata(ch->dev->parent); show_channels()
50 mutex_lock(&ch->reflock); show_channels()
51 if (ch->refcount) { show_channels()
52 mutex_lock(&ch->cdma.lock); show_channels()
54 host1x_hw_show_channel_fifo(m, ch, o); show_channels()
55 host1x_hw_show_channel_cdma(m, ch, o); show_channels()
56 mutex_unlock(&ch->cdma.lock); show_channels()
58 mutex_unlock(&ch->reflock); show_channels()
89 struct host1x_channel *ch; show_all() local
95 host1x_for_each_channel(m, ch) show_all()
96 show_channels(ch, o, true); show_all()
101 struct host1x_channel *ch; show_all_no_fifo() local
107 host1x_for_each_channel(host1x, ch) show_all_no_fifo()
108 show_channels(ch, o, false); show_all_no_fifo()
/linux-4.1.27/arch/mn10300/boot/compressed/
H A Dmisc.c128 unsigned char ch = inptr < insize ? inbuf[inptr++] : fill_inbuf(); get_byte() local
132 hex[0] = ((ch & 0x0f) > 9) ? get_byte()
133 ((ch & 0x0f) + 'A' - 0xa) : ((ch & 0x0f) + '0'); get_byte()
134 hex[1] = ((ch >> 4) > 9) ? get_byte()
135 ((ch >> 4) + 'A' - 0xa) : ((ch >> 4) + '0'); get_byte()
139 return ch; get_byte()
182 static inline void kputchar(unsigned char ch) kputchar() argument
188 if (ch == 0x0a) { kputchar()
194 SC0TXB = ch; kputchar()
200 if (ch == 0x0a) { kputchar()
206 SC1TXB = ch; kputchar()
215 char ch; kputs() local
222 ch = *s++; kputs()
223 if (ch == 0x0a) { kputs()
227 CYG_DEV_THR = ch; kputs()
263 uch *in, *out, ch; flush_window_low() local
268 ch = *out++ = *in++; flush_window_low()
269 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); flush_window_low()
281 uch *in, ch; flush_window_high() local
284 ch = *output_data++ = *in++; flush_window_high()
287 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); flush_window_high()
/linux-4.1.27/drivers/infiniband/ulp/srpt/
H A Dib_srpt.c97 static void srpt_release_channel(struct srpt_rdma_ch *ch);
123 static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch) srpt_get_ch_state() argument
128 spin_lock_irqsave(&ch->spinlock, flags); srpt_get_ch_state()
129 state = ch->state; srpt_get_ch_state()
130 spin_unlock_irqrestore(&ch->spinlock, flags); srpt_get_ch_state()
135 srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state) srpt_set_ch_state() argument
140 spin_lock_irqsave(&ch->spinlock, flags); srpt_set_ch_state()
141 prev = ch->state; srpt_set_ch_state()
142 ch->state = new_state; srpt_set_ch_state()
143 spin_unlock_irqrestore(&ch->spinlock, flags); srpt_set_ch_state()
153 srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old, srpt_test_and_set_ch_state() argument
159 spin_lock_irqsave(&ch->spinlock, flags); srpt_test_and_set_ch_state()
160 prev = ch->state; srpt_test_and_set_ch_state()
162 ch->state = new; srpt_test_and_set_ch_state()
163 spin_unlock_irqrestore(&ch->spinlock, flags); srpt_test_and_set_ch_state()
227 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) srpt_qp_event() argument
230 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch)); srpt_qp_event()
234 ib_cm_notify(ch->cm_id, event->event); srpt_qp_event()
237 if (srpt_test_and_set_ch_state(ch, CH_DRAINING, srpt_qp_event()
239 srpt_release_channel(ch); srpt_qp_event()
242 ch->sess_name, srpt_get_ch_state(ch)); srpt_qp_event()
800 static int srpt_post_send(struct srpt_rdma_ch *ch, srpt_post_send() argument
805 struct srpt_device *sdev = ch->sport->sdev; srpt_post_send()
808 atomic_inc(&ch->req_lim); srpt_post_send()
811 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) { srpt_post_send()
830 ret = ib_post_send(ch->qp, &wr, &bad_wr); srpt_post_send()
834 atomic_inc(&ch->sq_wr_avail); srpt_post_send()
835 atomic_dec(&ch->req_lim); srpt_post_send()
951 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_init_ch_qp() argument
963 attr->port_num = ch->sport->port; srpt_init_ch_qp()
976 * @ch: channel of the queue pair.
985 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_ch_qp_rtr() argument
992 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); srpt_ch_qp_rtr()
1006 * @ch: channel of the queue pair.
1015 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_ch_qp_rts() argument
1022 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); srpt_ch_qp_rts()
1037 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) srpt_ch_qp_err() argument
1042 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); srpt_ch_qp_err()
1048 static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch, srpt_unmap_sg_to_ib_sge() argument
1054 BUG_ON(!ch); srpt_unmap_sg_to_ib_sge()
1069 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt, srpt_unmap_sg_to_ib_sge()
1078 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, srpt_map_sg_to_ib_sge() argument
1081 struct ib_device *dev = ch->sport->sdev->device; srpt_map_sg_to_ib_sge()
1097 BUG_ON(!ch); srpt_map_sg_to_ib_sge()
1106 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt, srpt_map_sg_to_ib_sge()
1209 sge->lkey = ch->sport->sdev->mr->lkey; srpt_map_sg_to_ib_sge()
1248 srpt_unmap_sg_to_ib_sge(ch, ioctx); srpt_map_sg_to_ib_sge()
1256 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) srpt_get_send_ioctx() argument
1261 BUG_ON(!ch); srpt_get_send_ioctx()
1264 spin_lock_irqsave(&ch->spinlock, flags); srpt_get_send_ioctx()
1265 if (!list_empty(&ch->free_list)) { srpt_get_send_ioctx()
1266 ioctx = list_first_entry(&ch->free_list, srpt_get_send_ioctx()
1270 spin_unlock_irqrestore(&ch->spinlock, flags); srpt_get_send_ioctx()
1275 BUG_ON(ioctx->ch != ch); srpt_get_send_ioctx()
1333 struct srpt_rdma_ch *ch = ioctx->ch; srpt_abort_cmd() local
1335 BUG_ON(ch->sess == NULL); srpt_abort_cmd()
1367 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); srpt_abort_cmd()
1386 static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) srpt_handle_send_err_comp() argument
1393 atomic_inc(&ch->sq_wr_avail); srpt_handle_send_err_comp()
1396 ioctx = ch->ioctx_ring[index]; srpt_handle_send_err_comp()
1405 /* If SRP_RSP sending failed, undo the ch->req_lim change. */ srpt_handle_send_err_comp()
1408 atomic_dec(&ch->req_lim); srpt_handle_send_err_comp()
1416 static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, srpt_handle_send_comp() argument
1421 atomic_inc(&ch->sq_wr_avail); srpt_handle_send_comp()
1431 srpt_unmap_sg_to_ib_sge(ch, ioctx); srpt_handle_send_comp()
1447 static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, srpt_handle_rdma_comp() argument
1452 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); srpt_handle_rdma_comp()
1471 static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, srpt_handle_rdma_err_comp() argument
1488 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); srpt_handle_rdma_err_comp()
1505 * @ch: RDMA channel through which the request has been received.
1518 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, srpt_build_cmd_rsp() argument
1542 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); srpt_build_cmd_rsp()
1548 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); srpt_build_cmd_rsp()
1565 * @ch: RDMA channel through which the request has been received.
1576 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, srpt_build_tskmgmt_rsp() argument
1593 + atomic_xchg(&ch->req_lim_delta, 0)); srpt_build_tskmgmt_rsp()
1688 static int srpt_handle_cmd(struct srpt_rdma_ch *ch, srpt_handle_cmd() argument
1731 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb, srpt_handle_cmd()
1771 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, srpt_handle_tsk_mgmt() argument
1777 struct se_session *sess = ch->sess; srpt_handle_tsk_mgmt()
1789 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); srpt_handle_tsk_mgmt()
1810 * @ch: RDMA channel through which the information unit has been received.
1813 static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, srpt_handle_new_iu() argument
1820 BUG_ON(!ch); srpt_handle_new_iu()
1823 ib_dma_sync_single_for_cpu(ch->sport->sdev->device, srpt_handle_new_iu()
1827 ch_state = srpt_get_ch_state(ch); srpt_handle_new_iu()
1829 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list); srpt_handle_new_iu()
1839 send_ioctx = srpt_get_send_ioctx(ch); srpt_handle_new_iu()
1842 &ch->cmd_wait_list); srpt_handle_new_iu()
1849 srpt_handle_cmd(ch, recv_ioctx, send_ioctx); srpt_handle_new_iu()
1852 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx); srpt_handle_new_iu()
1872 srpt_post_recv(ch->sport->sdev, recv_ioctx); srpt_handle_new_iu()
1878 struct srpt_rdma_ch *ch, srpt_process_rcv_completion()
1881 struct srpt_device *sdev = ch->sport->sdev; srpt_process_rcv_completion()
1889 req_lim = atomic_dec_return(&ch->req_lim); srpt_process_rcv_completion()
1893 srpt_handle_new_iu(ch, ioctx, NULL); srpt_process_rcv_completion()
1916 struct srpt_rdma_ch *ch, srpt_process_send_completion()
1925 send_ioctx = ch->ioctx_ring[index]; srpt_process_send_completion()
1928 srpt_handle_send_comp(ch, send_ioctx); srpt_process_send_completion()
1932 srpt_handle_rdma_comp(ch, send_ioctx, opcode); srpt_process_send_completion()
1938 srpt_handle_send_err_comp(ch, wc->wr_id); srpt_process_send_completion()
1942 srpt_handle_rdma_err_comp(ch, send_ioctx, opcode); srpt_process_send_completion()
1947 && !list_empty(&ch->cmd_wait_list) srpt_process_send_completion()
1948 && srpt_get_ch_state(ch) == CH_LIVE srpt_process_send_completion()
1949 && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) { srpt_process_send_completion()
1952 recv_ioctx = list_first_entry(&ch->cmd_wait_list, srpt_process_send_completion()
1956 srpt_handle_new_iu(ch, recv_ioctx, send_ioctx); srpt_process_send_completion()
1960 static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch) srpt_process_completion() argument
1962 struct ib_wc *const wc = ch->wc; srpt_process_completion()
1965 WARN_ON(cq != ch->cq); srpt_process_completion()
1968 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) { srpt_process_completion()
1971 srpt_process_rcv_completion(cq, ch, &wc[i]); srpt_process_completion()
1973 srpt_process_send_completion(cq, ch, &wc[i]); srpt_process_completion()
1991 struct srpt_rdma_ch *ch = ctx; srpt_completion() local
1993 wake_up_interruptible(&ch->wait_queue); srpt_completion()
1998 struct srpt_rdma_ch *ch; srpt_compl_thread() local
2003 ch = arg; srpt_compl_thread()
2004 BUG_ON(!ch); srpt_compl_thread()
2006 ch->sess_name, ch->thread->comm, current->pid); srpt_compl_thread()
2008 wait_event_interruptible(ch->wait_queue, srpt_compl_thread()
2009 (srpt_process_completion(ch->cq, ch), srpt_compl_thread()
2013 ch->sess_name, ch->thread->comm, current->pid); srpt_compl_thread()
2020 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) srpt_create_ch_ib() argument
2023 struct srpt_port *sport = ch->sport; srpt_create_ch_ib()
2028 WARN_ON(ch->rq_size < 1); srpt_create_ch_ib()
2036 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, srpt_create_ch_ib()
2037 ch->rq_size + srp_sq_size, 0); srpt_create_ch_ib()
2038 if (IS_ERR(ch->cq)) { srpt_create_ch_ib()
2039 ret = PTR_ERR(ch->cq); srpt_create_ch_ib()
2041 ch->rq_size + srp_sq_size, ret); srpt_create_ch_ib()
2045 qp_init->qp_context = (void *)ch; srpt_create_ch_ib()
2048 qp_init->send_cq = ch->cq; srpt_create_ch_ib()
2049 qp_init->recv_cq = ch->cq; srpt_create_ch_ib()
2056 ch->qp = ib_create_qp(sdev->pd, qp_init); srpt_create_ch_ib()
2057 if (IS_ERR(ch->qp)) { srpt_create_ch_ib()
2058 ret = PTR_ERR(ch->qp); srpt_create_ch_ib()
2062 ib_destroy_cq(ch->cq); srpt_create_ch_ib()
2070 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr); srpt_create_ch_ib()
2073 __func__, ch->cq->cqe, qp_init->cap.max_send_sge, srpt_create_ch_ib()
2074 qp_init->cap.max_send_wr, ch->cm_id); srpt_create_ch_ib()
2076 ret = srpt_init_ch_qp(ch, ch->qp); srpt_create_ch_ib()
2080 init_waitqueue_head(&ch->wait_queue); srpt_create_ch_ib()
2082 pr_debug("creating thread for session %s\n", ch->sess_name); srpt_create_ch_ib()
2084 ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl"); srpt_create_ch_ib()
2085 if (IS_ERR(ch->thread)) { srpt_create_ch_ib()
2087 PTR_ERR(ch->thread)); srpt_create_ch_ib()
2088 ch->thread = NULL; srpt_create_ch_ib()
2097 ib_destroy_qp(ch->qp); srpt_create_ch_ib()
2099 ib_destroy_cq(ch->cq); srpt_create_ch_ib()
2103 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) srpt_destroy_ch_ib() argument
2105 if (ch->thread) srpt_destroy_ch_ib()
2106 kthread_stop(ch->thread); srpt_destroy_ch_ib()
2108 ib_destroy_qp(ch->qp); srpt_destroy_ch_ib()
2109 ib_destroy_cq(ch->cq); srpt_destroy_ch_ib()
2118 * Note: The caller must hold ch->sport->sdev->spinlock.
2120 static void __srpt_close_ch(struct srpt_rdma_ch *ch) __srpt_close_ch() argument
2126 sdev = ch->sport->sdev; __srpt_close_ch()
2128 spin_lock_irqsave(&ch->spinlock, flags); __srpt_close_ch()
2129 prev_state = ch->state; __srpt_close_ch()
2133 ch->state = CH_DISCONNECTING; __srpt_close_ch()
2138 spin_unlock_irqrestore(&ch->spinlock, flags); __srpt_close_ch()
2142 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0, __srpt_close_ch()
2146 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) __srpt_close_ch()
2160 static void srpt_close_ch(struct srpt_rdma_ch *ch) srpt_close_ch() argument
2164 sdev = ch->sport->sdev; srpt_close_ch()
2166 __srpt_close_ch(ch); srpt_close_ch()
2175 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; srpt_shutdown_session() local
2178 spin_lock_irqsave(&ch->spinlock, flags); srpt_shutdown_session()
2179 if (ch->in_shutdown) { srpt_shutdown_session()
2180 spin_unlock_irqrestore(&ch->spinlock, flags); srpt_shutdown_session()
2184 ch->in_shutdown = true; srpt_shutdown_session()
2186 spin_unlock_irqrestore(&ch->spinlock, flags); srpt_shutdown_session()
2206 struct srpt_rdma_ch *ch; srpt_drain_channel() local
2215 list_for_each_entry(ch, &sdev->rch_list, list) { srpt_drain_channel()
2216 if (ch->cm_id == cm_id) { srpt_drain_channel()
2217 do_reset = srpt_test_and_set_ch_state(ch, srpt_drain_channel()
2219 srpt_test_and_set_ch_state(ch, srpt_drain_channel()
2221 srpt_test_and_set_ch_state(ch, srpt_drain_channel()
2229 if (ch->sess) srpt_drain_channel()
2230 srpt_shutdown_session(ch->sess); srpt_drain_channel()
2232 ret = srpt_ch_qp_err(ch); srpt_drain_channel()
2248 struct srpt_rdma_ch *ch; srpt_find_channel() local
2256 list_for_each_entry(ch, &sdev->rch_list, list) { srpt_find_channel()
2257 if (ch->cm_id == cm_id) { srpt_find_channel()
2264 return found ? ch : NULL; srpt_find_channel()
2275 static void srpt_release_channel(struct srpt_rdma_ch *ch) srpt_release_channel() argument
2277 schedule_work(&ch->release_work); srpt_release_channel()
2282 struct srpt_rdma_ch *ch; srpt_release_channel_work() local
2286 ch = container_of(w, struct srpt_rdma_ch, release_work); srpt_release_channel_work()
2287 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess, srpt_release_channel_work()
2288 ch->release_done); srpt_release_channel_work()
2290 sdev = ch->sport->sdev; srpt_release_channel_work()
2293 se_sess = ch->sess; srpt_release_channel_work()
2300 ch->sess = NULL; srpt_release_channel_work()
2302 ib_destroy_cm_id(ch->cm_id); srpt_release_channel_work()
2304 srpt_destroy_ch_ib(ch); srpt_release_channel_work()
2306 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, srpt_release_channel_work()
2307 ch->sport->sdev, ch->rq_size, srpt_release_channel_work()
2308 ch->rsp_size, DMA_TO_DEVICE); srpt_release_channel_work()
2311 list_del(&ch->list); srpt_release_channel_work()
2314 if (ch->release_done) srpt_release_channel_work()
2315 complete(ch->release_done); srpt_release_channel_work()
2319 kfree(ch); srpt_release_channel_work()
2363 struct srpt_rdma_ch *ch, *tmp_ch; srpt_cm_req_recv() local
2423 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) { srpt_cm_req_recv()
2424 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16) srpt_cm_req_recv()
2425 && !memcmp(ch->t_port_id, req->target_port_id, 16) srpt_cm_req_recv()
2426 && param->port == ch->sport->port srpt_cm_req_recv()
2427 && param->listen_id == ch->sport->sdev->cm_id srpt_cm_req_recv()
2428 && ch->cm_id) { srpt_cm_req_recv()
2431 ch_state = srpt_get_ch_state(ch); srpt_cm_req_recv()
2439 ch->sess_name, ch->cm_id, ch_state); srpt_cm_req_recv()
2441 __srpt_close_ch(ch); srpt_cm_req_recv()
2464 ch = kzalloc(sizeof *ch, GFP_KERNEL); srpt_cm_req_recv()
2465 if (!ch) { srpt_cm_req_recv()
2473 INIT_WORK(&ch->release_work, srpt_release_channel_work); srpt_cm_req_recv()
2474 memcpy(ch->i_port_id, req->initiator_port_id, 16); srpt_cm_req_recv()
2475 memcpy(ch->t_port_id, req->target_port_id, 16); srpt_cm_req_recv()
2476 ch->sport = &sdev->port[param->port - 1]; srpt_cm_req_recv()
2477 ch->cm_id = cm_id; srpt_cm_req_recv()
2482 ch->rq_size = SRPT_RQ_SIZE; srpt_cm_req_recv()
2483 spin_lock_init(&ch->spinlock); srpt_cm_req_recv()
2484 ch->state = CH_CONNECTING; srpt_cm_req_recv()
2485 INIT_LIST_HEAD(&ch->cmd_wait_list); srpt_cm_req_recv()
2486 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size; srpt_cm_req_recv()
2488 ch->ioctx_ring = (struct srpt_send_ioctx **) srpt_cm_req_recv()
2489 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, srpt_cm_req_recv()
2490 sizeof(*ch->ioctx_ring[0]), srpt_cm_req_recv()
2491 ch->rsp_size, DMA_TO_DEVICE); srpt_cm_req_recv()
2492 if (!ch->ioctx_ring) srpt_cm_req_recv()
2495 INIT_LIST_HEAD(&ch->free_list); srpt_cm_req_recv()
2496 for (i = 0; i < ch->rq_size; i++) { srpt_cm_req_recv()
2497 ch->ioctx_ring[i]->ch = ch; srpt_cm_req_recv()
2498 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list); srpt_cm_req_recv()
2501 ret = srpt_create_ch_ib(ch); srpt_cm_req_recv()
2510 ret = srpt_ch_qp_rtr(ch, ch->qp); srpt_cm_req_recv()
2521 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx", srpt_cm_req_recv()
2522 be64_to_cpu(*(__be64 *)ch->i_port_id), srpt_cm_req_recv()
2523 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); srpt_cm_req_recv()
2525 pr_debug("registering session %s\n", ch->sess_name); srpt_cm_req_recv()
2527 nacl = srpt_lookup_acl(sport, ch->i_port_id); srpt_cm_req_recv()
2530 " configured yet for initiator %s.\n", ch->sess_name); srpt_cm_req_recv()
2536 ch->sess = transport_init_session(TARGET_PROT_NORMAL); srpt_cm_req_recv()
2537 if (IS_ERR(ch->sess)) { srpt_cm_req_recv()
2543 ch->sess->se_node_acl = &nacl->nacl; srpt_cm_req_recv()
2544 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch); srpt_cm_req_recv()
2546 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess, srpt_cm_req_recv()
2547 ch->sess_name, ch->cm_id); srpt_cm_req_recv()
2554 ch->max_ti_iu_len = it_iu_len; srpt_cm_req_recv()
2557 rsp->req_lim_delta = cpu_to_be32(ch->rq_size); srpt_cm_req_recv()
2558 atomic_set(&ch->req_lim, ch->rq_size); srpt_cm_req_recv()
2559 atomic_set(&ch->req_lim_delta, 0); srpt_cm_req_recv()
2562 rep_param->qp_num = ch->qp->qp_num; srpt_cm_req_recv()
2580 list_add_tail(&ch->list, &sdev->rch_list); srpt_cm_req_recv()
2586 srpt_set_ch_state(ch, CH_RELEASING); srpt_cm_req_recv()
2587 transport_deregister_session_configfs(ch->sess); srpt_cm_req_recv()
2590 transport_deregister_session(ch->sess); srpt_cm_req_recv()
2591 ch->sess = NULL; srpt_cm_req_recv()
2594 srpt_destroy_ch_ib(ch); srpt_cm_req_recv()
2597 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, srpt_cm_req_recv()
2598 ch->sport->sdev, ch->rq_size, srpt_cm_req_recv()
2599 ch->rsp_size, DMA_TO_DEVICE); srpt_cm_req_recv()
2601 kfree(ch); srpt_cm_req_recv()
2634 struct srpt_rdma_ch *ch; srpt_cm_rtu_recv() local
2637 ch = srpt_find_channel(cm_id->context, cm_id); srpt_cm_rtu_recv()
2638 BUG_ON(!ch); srpt_cm_rtu_recv()
2640 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) { srpt_cm_rtu_recv()
2643 ret = srpt_ch_qp_rts(ch, ch->qp); srpt_cm_rtu_recv()
2645 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list, srpt_cm_rtu_recv()
2648 srpt_handle_new_iu(ch, ioctx, NULL); srpt_cm_rtu_recv()
2651 srpt_close_ch(ch); srpt_cm_rtu_recv()
2672 struct srpt_rdma_ch *ch; srpt_cm_dreq_recv() local
2676 ch = srpt_find_channel(cm_id->context, cm_id); srpt_cm_dreq_recv()
2677 BUG_ON(!ch); srpt_cm_dreq_recv()
2679 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch)); srpt_cm_dreq_recv()
2681 spin_lock_irqsave(&ch->spinlock, flags); srpt_cm_dreq_recv()
2682 switch (ch->state) { srpt_cm_dreq_recv()
2686 ch->state = CH_DISCONNECTING; srpt_cm_dreq_recv()
2691 WARN(true, "unexpected channel state %d\n", ch->state); srpt_cm_dreq_recv()
2694 spin_unlock_irqrestore(&ch->spinlock, flags); srpt_cm_dreq_recv()
2697 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0) srpt_cm_dreq_recv()
2700 ch->sess_name); srpt_cm_dreq_recv()
2771 static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, srpt_perform_rdmas() argument
2787 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail); srpt_perform_rdmas()
2824 ret = ib_post_send(ch->qp, &wr, &bad_wr); srpt_perform_rdmas()
2836 while (ch->state == CH_LIVE && srpt_perform_rdmas()
2837 ib_post_send(ch->qp, &wr, &bad_wr) != 0) { srpt_perform_rdmas()
2842 while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) { srpt_perform_rdmas()
2850 atomic_add(n_rdma, &ch->sq_wr_avail); srpt_perform_rdmas()
2857 static int srpt_xfer_data(struct srpt_rdma_ch *ch, srpt_xfer_data() argument
2862 ret = srpt_map_sg_to_ib_sge(ch, ioctx); srpt_xfer_data()
2868 ret = srpt_perform_rdmas(ch, ioctx); srpt_xfer_data()
2882 srpt_unmap_sg_to_ib_sge(ch, ioctx); srpt_xfer_data()
2899 struct srpt_rdma_ch *ch; srpt_write_pending() local
2910 ch = ioctx->ch; srpt_write_pending()
2911 BUG_ON(!ch); srpt_write_pending()
2913 ch_state = srpt_get_ch_state(ch); srpt_write_pending()
2930 ret = srpt_xfer_data(ch, ioctx); srpt_write_pending()
2955 struct srpt_rdma_ch *ch; srpt_queue_response() local
2965 ch = ioctx->ch; srpt_queue_response()
2966 BUG_ON(!ch); srpt_queue_response()
2979 WARN(true, "ch %p; cmd %d: unexpected command state %d\n", srpt_queue_response()
2980 ch, ioctx->ioctx.index, ioctx->state); srpt_queue_response()
2987 atomic_inc(&ch->req_lim_delta); srpt_queue_response()
2997 ret = srpt_xfer_data(ch, ioctx); srpt_queue_response()
3006 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag, srpt_queue_response()
3011 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, srpt_queue_response()
3014 ret = srpt_post_send(ch, ioctx, resp_len); srpt_queue_response()
3018 srpt_unmap_sg_to_ib_sge(ch, ioctx); srpt_queue_response()
3040 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); srpt_aborted_task()
3080 struct srpt_rdma_ch *ch, *tmp_ch; srpt_release_sdev() local
3088 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) srpt_release_sdev()
3089 __srpt_close_ch(ch); srpt_release_sdev()
3429 struct srpt_rdma_ch *ch = ioctx->ch; srpt_release_cmd() local
3441 spin_lock_irqsave(&ch->spinlock, flags); srpt_release_cmd()
3442 list_add(&ioctx->free_list, &ch->free_list); srpt_release_cmd()
3443 spin_unlock_irqrestore(&ch->spinlock, flags); srpt_release_cmd()
3456 struct srpt_rdma_ch *ch; srpt_close_session() local
3460 ch = se_sess->fabric_sess_ptr; srpt_close_session()
3461 WARN_ON(ch->sess != se_sess); srpt_close_session()
3463 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch)); srpt_close_session()
3465 sdev = ch->sport->sdev; srpt_close_session()
3467 BUG_ON(ch->release_done); srpt_close_session()
3468 ch->release_done = &release_done; srpt_close_session()
3469 __srpt_close_ch(ch); srpt_close_session()
1877 srpt_process_rcv_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch, struct ib_wc *wc) srpt_process_rcv_completion() argument
1915 srpt_process_send_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch, struct ib_wc *wc) srpt_process_send_completion() argument
/linux-4.1.27/drivers/media/pci/cx25821/
H A Dcx25821-core.c439 const struct sram_channel *ch, cx25821_sram_channel_setup()
445 if (ch->cmds_start == 0) { cx25821_sram_channel_setup()
446 cx_write(ch->ptr1_reg, 0); cx25821_sram_channel_setup()
447 cx_write(ch->ptr2_reg, 0); cx25821_sram_channel_setup()
448 cx_write(ch->cnt2_reg, 0); cx25821_sram_channel_setup()
449 cx_write(ch->cnt1_reg, 0); cx25821_sram_channel_setup()
454 cdt = ch->cdt; cx25821_sram_channel_setup()
455 lines = ch->fifo_size / bpl; cx25821_sram_channel_setup()
468 cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx25821_sram_channel_setup()
476 cx_write(ch->fifo_start + 4 * i, i); cx25821_sram_channel_setup()
479 if (ch->jumponly) cx25821_sram_channel_setup()
480 cx_write(ch->cmds_start + 0, 8); cx25821_sram_channel_setup()
482 cx_write(ch->cmds_start + 0, risc); cx25821_sram_channel_setup()
484 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */ cx25821_sram_channel_setup()
485 cx_write(ch->cmds_start + 8, cdt); cx25821_sram_channel_setup()
486 cx_write(ch->cmds_start + 12, (lines * 16) >> 3); cx25821_sram_channel_setup()
487 cx_write(ch->cmds_start + 16, ch->ctrl_start); cx25821_sram_channel_setup()
489 if (ch->jumponly) cx25821_sram_channel_setup()
490 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2)); cx25821_sram_channel_setup()
492 cx_write(ch->cmds_start + 20, 64 >> 2); cx25821_sram_channel_setup()
495 cx_write(ch->cmds_start + i, 0); cx25821_sram_channel_setup()
498 cx_write(ch->ptr1_reg, ch->fifo_start); cx25821_sram_channel_setup()
499 cx_write(ch->ptr2_reg, cdt); cx25821_sram_channel_setup()
500 cx_write(ch->cnt2_reg, (lines * 16) >> 3); cx25821_sram_channel_setup()
501 cx_write(ch->cnt1_reg, (bpl >> 3) - 1); cx25821_sram_channel_setup()
507 const struct sram_channel *ch, cx25821_sram_channel_setup_audio()
513 if (ch->cmds_start == 0) { cx25821_sram_channel_setup_audio()
514 cx_write(ch->ptr1_reg, 0); cx25821_sram_channel_setup_audio()
515 cx_write(ch->ptr2_reg, 0); cx25821_sram_channel_setup_audio()
516 cx_write(ch->cnt2_reg, 0); cx25821_sram_channel_setup_audio()
517 cx_write(ch->cnt1_reg, 0); cx25821_sram_channel_setup_audio()
522 cdt = ch->cdt; cx25821_sram_channel_setup_audio()
523 lines = ch->fifo_size / bpl; cx25821_sram_channel_setup_audio()
536 cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx25821_sram_channel_setup_audio()
543 if (ch->jumponly) cx25821_sram_channel_setup_audio()
544 cx_write(ch->cmds_start + 0, 8); cx25821_sram_channel_setup_audio()
546 cx_write(ch->cmds_start + 0, risc); cx25821_sram_channel_setup_audio()
548 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */ cx25821_sram_channel_setup_audio()
549 cx_write(ch->cmds_start + 8, cdt); cx25821_sram_channel_setup_audio()
550 cx_write(ch->cmds_start + 12, (lines * 16) >> 3); cx25821_sram_channel_setup_audio()
551 cx_write(ch->cmds_start + 16, ch->ctrl_start); cx25821_sram_channel_setup_audio()
554 if (ch->jumponly) cx25821_sram_channel_setup_audio()
555 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2)); cx25821_sram_channel_setup_audio()
557 cx_write(ch->cmds_start + 20, 64 >> 2); cx25821_sram_channel_setup_audio()
561 cx_write(ch->cmds_start + i, 0); cx25821_sram_channel_setup_audio()
564 cx_write(ch->ptr1_reg, ch->fifo_start); cx25821_sram_channel_setup_audio()
565 cx_write(ch->ptr2_reg, cdt); cx25821_sram_channel_setup_audio()
566 cx_write(ch->cnt2_reg, (lines * 16) >> 3); cx25821_sram_channel_setup_audio()
567 cx_write(ch->cnt1_reg, (bpl >> 3) - 1); cx25821_sram_channel_setup_audio()
573 void cx25821_sram_channel_dump(struct cx25821_dev *dev, const struct sram_channel *ch) cx25821_sram_channel_dump() argument
594 pr_warn("%s: %s - dma channel status dump\n", dev->name, ch->name); cx25821_sram_channel_dump()
597 i * 4, name[i], cx_read(ch->cmds_start + 4 * i)); cx25821_sram_channel_dump()
601 risc = cx_read(ch->cmds_start + 4 * (i + 14)); cx25821_sram_channel_dump()
607 risc = cx_read(ch->ctrl_start + 4 * i); cx25821_sram_channel_dump()
611 i * 4, ch->ctrl_start + 4 * i, i); cx25821_sram_channel_dump()
614 risc = cx_read(ch->ctrl_start + 4 * (i + j)); cx25821_sram_channel_dump()
621 ch->fifo_start, ch->fifo_start + ch->fifo_size); cx25821_sram_channel_dump()
623 ch->ctrl_start, ch->ctrl_start + 6 * 16); cx25821_sram_channel_dump()
625 cx_read(ch->ptr1_reg)); cx25821_sram_channel_dump()
627 cx_read(ch->ptr2_reg)); cx25821_sram_channel_dump()
629 cx_read(ch->cnt1_reg)); cx25821_sram_channel_dump()
631 cx_read(ch->cnt2_reg)); cx25821_sram_channel_dump()
635 const struct sram_channel *ch) cx25821_sram_channel_dump_audio()
658 dev->name, ch->name); cx25821_sram_channel_dump_audio()
663 cx_read(ch->cmds_start + 4 * i)); cx25821_sram_channel_dump_audio()
667 risc = cx_read(ch->cmds_start + 4 * (i + 14)); cx25821_sram_channel_dump_audio()
673 risc = cx_read(ch->ctrl_start + 4 * i); cx25821_sram_channel_dump_audio()
677 i * 4, ch->ctrl_start + 4 * i, i); cx25821_sram_channel_dump_audio()
681 risc = cx_read(ch->ctrl_start + 4 * (i + j)); cx25821_sram_channel_dump_audio()
688 ch->fifo_start, ch->fifo_start + ch->fifo_size); cx25821_sram_channel_dump_audio()
690 ch->ctrl_start, ch->ctrl_start + 6 * 16); cx25821_sram_channel_dump_audio()
692 cx_read(ch->ptr1_reg)); cx25821_sram_channel_dump_audio()
694 cx_read(ch->ptr2_reg)); cx25821_sram_channel_dump_audio()
696 cx_read(ch->cnt1_reg)); cx25821_sram_channel_dump_audio()
698 cx_read(ch->cnt2_reg)); cx25821_sram_channel_dump_audio()
701 risc = cx_read(ch->cmds_start + 56 + (i * 4)); cx25821_sram_channel_dump_audio()
783 const struct sram_channel *ch) cx25821_set_vip_mode()
785 cx_write(ch->pix_frmt, PIXEL_FRMT_422); cx25821_set_vip_mode()
786 cx_write(ch->vip_ctl, PIXEL_ENGINE_VIP1); cx25821_set_vip_mode()
438 cx25821_sram_channel_setup(struct cx25821_dev *dev, const struct sram_channel *ch, unsigned int bpl, u32 risc) cx25821_sram_channel_setup() argument
506 cx25821_sram_channel_setup_audio(struct cx25821_dev *dev, const struct sram_channel *ch, unsigned int bpl, u32 risc) cx25821_sram_channel_setup_audio() argument
634 cx25821_sram_channel_dump_audio(struct cx25821_dev *dev, const struct sram_channel *ch) cx25821_sram_channel_dump_audio() argument
782 cx25821_set_vip_mode(struct cx25821_dev *dev, const struct sram_channel *ch) cx25821_set_vip_mode() argument
H A Dcx25821-video-upstream.c42 const struct sram_channel *ch, cx25821_sram_channel_setup_upstream()
48 if (ch->cmds_start == 0) { cx25821_sram_channel_setup_upstream()
49 cx_write(ch->ptr1_reg, 0); cx25821_sram_channel_setup_upstream()
50 cx_write(ch->ptr2_reg, 0); cx25821_sram_channel_setup_upstream()
51 cx_write(ch->cnt2_reg, 0); cx25821_sram_channel_setup_upstream()
52 cx_write(ch->cnt1_reg, 0); cx25821_sram_channel_setup_upstream()
57 cdt = ch->cdt; cx25821_sram_channel_setup_upstream()
58 lines = ch->fifo_size / bpl; cx25821_sram_channel_setup_upstream()
67 cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx25821_sram_channel_setup_upstream()
74 cx_write(ch->cmds_start + 0, risc); cx25821_sram_channel_setup_upstream()
76 cx_write(ch->cmds_start + 4, 0); cx25821_sram_channel_setup_upstream()
77 cx_write(ch->cmds_start + 8, cdt); cx25821_sram_channel_setup_upstream()
78 cx_write(ch->cmds_start + 12, (lines * 16) >> 3); cx25821_sram_channel_setup_upstream()
79 cx_write(ch->cmds_start + 16, ch->ctrl_start); cx25821_sram_channel_setup_upstream()
81 cx_write(ch->cmds_start + 20, VID_IQ_SIZE_DW); cx25821_sram_channel_setup_upstream()
84 cx_write(ch->cmds_start + i, 0); cx25821_sram_channel_setup_upstream()
87 cx_write(ch->ptr1_reg, ch->fifo_start); cx25821_sram_channel_setup_upstream()
88 cx_write(ch->ptr2_reg, cdt); cx25821_sram_channel_setup_upstream()
89 cx_write(ch->cnt2_reg, (lines * 16) >> 3); cx25821_sram_channel_setup_upstream()
90 cx_write(ch->cnt1_reg, (bpl >> 3) - 1); cx25821_sram_channel_setup_upstream()
517 const struct sram_channel *ch, cx25821_set_pixelengine()
531 cx_write(ch->vid_fmt_ctl, value); cx25821_set_pixelengine()
535 cx_write(ch->vid_active_ctl1, width); cx25821_set_pixelengine()
546 cx_write(ch->vid_active_ctl2, value); cx25821_set_pixelengine()
548 cx_write(ch->vid_cdt_size, VID_CDT_SIZE >> 3); cx25821_set_pixelengine()
41 cx25821_sram_channel_setup_upstream(struct cx25821_dev *dev, const struct sram_channel *ch, unsigned int bpl, u32 risc) cx25821_sram_channel_setup_upstream() argument
516 cx25821_set_pixelengine(struct cx25821_channel *chan, const struct sram_channel *ch, int pix_format) cx25821_set_pixelengine() argument
H A Dcx25821-audio-upstream.c48 const struct sram_channel *ch, cx25821_sram_channel_setup_upstream_audio()
54 if (ch->cmds_start == 0) { cx25821_sram_channel_setup_upstream_audio()
55 cx_write(ch->ptr1_reg, 0); cx25821_sram_channel_setup_upstream_audio()
56 cx_write(ch->ptr2_reg, 0); cx25821_sram_channel_setup_upstream_audio()
57 cx_write(ch->cnt2_reg, 0); cx25821_sram_channel_setup_upstream_audio()
58 cx_write(ch->cnt1_reg, 0); cx25821_sram_channel_setup_upstream_audio()
63 cdt = ch->cdt; cx25821_sram_channel_setup_upstream_audio()
64 lines = ch->fifo_size / bpl; cx25821_sram_channel_setup_upstream_audio()
73 cx_write(cdt + 16 * i, ch->fifo_start + bpl * i); cx25821_sram_channel_setup_upstream_audio()
80 cx_write(ch->cmds_start + 0, risc); cx25821_sram_channel_setup_upstream_audio()
82 cx_write(ch->cmds_start + 4, 0); cx25821_sram_channel_setup_upstream_audio()
83 cx_write(ch->cmds_start + 8, cdt); cx25821_sram_channel_setup_upstream_audio()
84 cx_write(ch->cmds_start + 12, AUDIO_CDT_SIZE_QW); cx25821_sram_channel_setup_upstream_audio()
85 cx_write(ch->cmds_start + 16, ch->ctrl_start); cx25821_sram_channel_setup_upstream_audio()
88 cx_write(ch->cmds_start + 20, AUDIO_IQ_SIZE_DW); cx25821_sram_channel_setup_upstream_audio()
91 cx_write(ch->cmds_start + i, 0); cx25821_sram_channel_setup_upstream_audio()
94 cx_write(ch->ptr1_reg, ch->fifo_start); cx25821_sram_channel_setup_upstream_audio()
95 cx_write(ch->ptr2_reg, cdt); cx25821_sram_channel_setup_upstream_audio()
96 cx_write(ch->cnt2_reg, AUDIO_CDT_SIZE_QW); cx25821_sram_channel_setup_upstream_audio()
97 cx_write(ch->cnt1_reg, AUDIO_CLUSTER_SIZE_QW - 1); cx25821_sram_channel_setup_upstream_audio()
47 cx25821_sram_channel_setup_upstream_audio(struct cx25821_dev *dev, const struct sram_channel *ch, unsigned int bpl, u32 risc) cx25821_sram_channel_setup_upstream_audio() argument
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/
H A Dg94.c56 #define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
57 #define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
60 auxch_fini(struct nvkm_i2c *aux, int ch) auxch_fini() argument
62 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000); auxch_fini()
66 auxch_init(struct nvkm_i2c *aux, int ch) auxch_init() argument
76 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); auxch_init()
85 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq); auxch_init()
88 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); auxch_init()
92 auxch_fini(aux, ch); auxch_init()
108 int ch = port->addr; g94_aux() local
113 ret = auxch_init(aux, ch); g94_aux()
117 stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50)); g94_aux()
128 nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]); g94_aux()
132 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); g94_aux()
136 nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr); g94_aux()
141 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl); g94_aux()
142 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl); g94_aux()
147 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl); g94_aux()
151 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); g94_aux()
162 stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0); g94_aux()
176 xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i); g94_aux()
183 auxch_fini(aux, ch); g94_aux()
H A Dgm204.c26 #define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
27 #define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
30 auxch_fini(struct nvkm_i2c *aux, int ch) auxch_fini() argument
32 nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000); auxch_fini()
36 auxch_init(struct nvkm_i2c *aux, int ch) auxch_init() argument
46 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50)); auxch_init()
55 nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq); auxch_init()
58 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50)); auxch_init()
62 auxch_fini(aux, ch); auxch_init()
78 int ch = port->addr; gm204_aux() local
83 ret = auxch_init(aux, ch); gm204_aux()
87 stat = nv_rd32(aux, 0x00d958 + (ch * 0x50)); gm204_aux()
98 nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]); gm204_aux()
102 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50)); gm204_aux()
106 nv_wr32(aux, 0x00d950 + (ch * 0x50), addr); gm204_aux()
111 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl); gm204_aux()
112 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl); gm204_aux()
117 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl); gm204_aux()
121 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50)); gm204_aux()
132 stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0); gm204_aux()
146 xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i); gm204_aux()
153 auxch_fini(aux, ch); gm204_aux()
/linux-4.1.27/drivers/tty/serial/
H A Dmsm_smd_tty.c33 smd_channel_t *ch; member in struct:smd_tty_info
69 avail = smd_read_avail(info->ch); smd_tty_notify()
75 if (smd_read(info->ch, ptr, avail) != avail) { smd_tty_notify()
107 if (info->ch) smd_tty_port_activate()
108 smd_kick(info->ch); smd_tty_port_activate()
110 res = smd_open(name, &info->ch, info, smd_tty_notify); smd_tty_port_activate()
123 if (info->ch) { smd_tty_port_shutdown()
124 smd_close(info->ch); smd_tty_port_shutdown()
125 info->ch = 0; smd_tty_port_shutdown()
153 avail = smd_write_avail(info->ch); smd_tty_write()
157 return smd_write(info->ch, buf, len); smd_tty_write()
163 return smd_write_avail(info->ch); smd_tty_write_room()
169 return smd_read_avail(info->ch); smd_tty_chars_in_buffer()
175 smd_kick(info->ch); smd_tty_unthrottle()
/linux-4.1.27/drivers/devfreq/exynos/
H A Dexynos_ppmu.c29 void exynos_ppmu_setevent(void __iomem *ppmu_base, unsigned int ch, exynos_ppmu_setevent() argument
32 __raw_writel(evt, ppmu_base + PPMU_BEVTSEL(ch)); exynos_ppmu_setevent()
45 unsigned int exynos_ppmu_read(void __iomem *ppmu_base, unsigned int ch) exynos_ppmu_read() argument
49 if (ch == PPMU_PMNCNT3) exynos_ppmu_read()
50 total = ((__raw_readl(ppmu_base + PMCNT_OFFSET(ch)) << 8) | exynos_ppmu_read()
51 __raw_readl(ppmu_base + PMCNT_OFFSET(ch + 1))); exynos_ppmu_read()
53 total = __raw_readl(ppmu_base + PMCNT_OFFSET(ch)); exynos_ppmu_read()
H A Dexynos_ppmu.h40 #define PPMU_BEVTSEL(x) (PPMU_BEVT0SEL + (ch * PPMU_BEVTSEL_OFFSET))
78 void exynos_ppmu_setevent(void __iomem *ppmu_base, unsigned int ch,
82 unsigned int exynos_ppmu_read(void __iomem *ppmu_base, unsigned int ch);
/linux-4.1.27/drivers/staging/vt6655/
H A Dchannel.c132 struct ieee80211_channel *ch; vnt_init_bands() local
140 ch = vnt_channels_5ghz; vnt_init_bands()
143 ch[i].max_power = 0x3f; vnt_init_bands()
144 ch[i].flags = IEEE80211_CHAN_NO_HT40; vnt_init_bands()
155 ch = vnt_channels_2ghz; vnt_init_bands()
158 ch[i].max_power = 0x3f; vnt_init_bands()
159 ch[i].flags = IEEE80211_CHAN_NO_HT40; vnt_init_bands()
177 bool set_channel(void *pDeviceHandler, struct ieee80211_channel *ch) set_channel() argument
182 if (pDevice->byCurrentCh == ch->hw_value) set_channel()
201 ch->hw_value); set_channel()
203 pDevice->byCurrentCh = ch->hw_value; set_channel()
205 ch->hw_value); set_channel()
209 RFvWriteWakeProgSyn(pDevice, pDevice->byRFType, ch->hw_value); set_channel()
/linux-4.1.27/arch/mips/txx9/generic/
H A Dsetup_tx3927.c80 for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) { tx3927_setup()
82 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST; tx3927_setup()
83 tx3927_dmaptr->ch[i].ccr = 0; tx3927_setup()
126 void __init tx3927_mtd_init(int ch) tx3927_mtd_init() argument
129 .width = TX3927_ROMC_WIDTH(ch) / 8, tx3927_mtd_init()
131 unsigned long start = txx9_ce_res[ch].start; tx3927_mtd_init()
132 unsigned long size = txx9_ce_res[ch].end - start + 1; tx3927_mtd_init()
134 if (!(tx3927_romcptr->cr[ch] & 0x8)) tx3927_mtd_init()
136 txx9_physmap_flash_init(ch, start, size, &pdata); tx3927_mtd_init()
H A D7segled.c44 unsigned int ch = dev->id; ascii_store() local
45 txx9_7segled_putc(ch, buf[0]); ascii_store()
53 unsigned int ch = dev->id; raw_store() local
54 tx_7segled_putc(ch, buf[0]); raw_store()
/linux-4.1.27/arch/powerpc/boot/
H A Dserial.c39 char ch, *cp; serial_edit_cmdline() local
49 while (((ch = scdp->getc()) != '\n') && (ch != '\r')) { serial_edit_cmdline()
51 if ((ch == '\b') || (ch == '\177')) { serial_edit_cmdline()
58 } else if ((ch == '\030') || (ch == '\025')) { serial_edit_cmdline()
65 *cp++ = ch; serial_edit_cmdline()
67 scdp->putc(ch); serial_edit_cmdline()
H A Dugecon.c75 static void ug_raw_putc(char ch) ug_raw_putc() argument
77 ug_io_transaction(0xb0000000 | (ch << 20)); ug_raw_putc()
80 static void ug_putc(char ch) ug_putc() argument
90 ug_raw_putc(ch); ug_putc()
H A Dugecon.h20 extern void ug_putc(char ch);
/linux-4.1.27/drivers/gpu/drm/i915/
H A Ddvo_sil164.c68 static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) sil164_readb() argument
94 *ch = in_buf[0]; sil164_readb()
105 static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) sil164_writeb() argument
118 out_buf[1] = ch; sil164_writeb()
137 unsigned char ch; sil164_init() local
147 if (!sil164_readb(dvo, SIL164_VID_LO, &ch)) sil164_init()
150 if (ch != (SIL164_VID & 0xff)) { sil164_init()
152 ch, adapter->name, dvo->slave_addr); sil164_init()
156 if (!sil164_readb(dvo, SIL164_DID_LO, &ch)) sil164_init()
159 if (ch != (SIL164_DID & 0xff)) { sil164_init()
161 ch, adapter->name, dvo->slave_addr); sil164_init()
214 unsigned char ch; sil164_dpms() local
216 ret = sil164_readb(dvo, SIL164_REG8, &ch); sil164_dpms()
221 ch |= SIL164_8_PD; sil164_dpms()
223 ch &= ~SIL164_8_PD; sil164_dpms()
225 sil164_writeb(dvo, SIL164_REG8, ch); sil164_dpms()
232 unsigned char ch; sil164_get_hw_state() local
234 ret = sil164_readb(dvo, SIL164_REG8, &ch); sil164_get_hw_state()
238 if (ch & SIL164_8_PD) sil164_get_hw_state()
H A Dintel_hdmi.c1365 enum dpio_channel ch = vlv_dport_to_channel(dport); chv_hdmi_pre_pll_enable() local
1377 if (ch == DPIO_CH0) chv_hdmi_pre_pll_enable()
1379 if (ch == DPIO_CH1) chv_hdmi_pre_pll_enable()
1385 if (ch == DPIO_CH0) chv_hdmi_pre_pll_enable()
1387 if (ch == DPIO_CH1) chv_hdmi_pre_pll_enable()
1393 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); chv_hdmi_pre_pll_enable()
1399 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); chv_hdmi_pre_pll_enable()
1401 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); chv_hdmi_pre_pll_enable()
1407 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); chv_hdmi_pre_pll_enable()
1414 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); chv_hdmi_pre_pll_enable()
1419 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); chv_hdmi_pre_pll_enable()
1447 enum dpio_channel ch = vlv_dport_to_channel(dport); chv_hdmi_post_disable() local
1454 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); chv_hdmi_post_disable()
1456 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); chv_hdmi_post_disable()
1458 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); chv_hdmi_post_disable()
1460 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); chv_hdmi_post_disable()
1462 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); chv_hdmi_post_disable()
1464 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); chv_hdmi_post_disable()
1466 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); chv_hdmi_post_disable()
1468 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); chv_hdmi_post_disable()
1483 enum dpio_channel ch = vlv_dport_to_channel(dport); chv_hdmi_pre_enable() local
1491 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); chv_hdmi_pre_enable()
1493 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); chv_hdmi_pre_enable()
1495 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); chv_hdmi_pre_enable()
1497 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); chv_hdmi_pre_enable()
1500 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); chv_hdmi_pre_enable()
1502 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); chv_hdmi_pre_enable()
1504 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); chv_hdmi_pre_enable()
1506 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); chv_hdmi_pre_enable()
1508 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); chv_hdmi_pre_enable()
1510 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); chv_hdmi_pre_enable()
1512 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); chv_hdmi_pre_enable()
1514 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); chv_hdmi_pre_enable()
1520 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), chv_hdmi_pre_enable()
1528 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); chv_hdmi_pre_enable()
1532 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); chv_hdmi_pre_enable()
1534 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); chv_hdmi_pre_enable()
1538 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); chv_hdmi_pre_enable()
1540 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); chv_hdmi_pre_enable()
1543 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); chv_hdmi_pre_enable()
1545 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); chv_hdmi_pre_enable()
1548 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); chv_hdmi_pre_enable()
1553 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); chv_hdmi_pre_enable()
1556 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); chv_hdmi_pre_enable()
1560 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); chv_hdmi_pre_enable()
1563 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); chv_hdmi_pre_enable()
1568 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); chv_hdmi_pre_enable()
1570 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); chv_hdmi_pre_enable()
1575 val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch)); chv_hdmi_pre_enable()
1576 if (ch) chv_hdmi_pre_enable()
1580 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val); chv_hdmi_pre_enable()
1582 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch), chv_hdmi_pre_enable()
1583 vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) | chv_hdmi_pre_enable()
1587 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); chv_hdmi_pre_enable()
1589 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); chv_hdmi_pre_enable()
1591 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); chv_hdmi_pre_enable()
1593 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); chv_hdmi_pre_enable()
/linux-4.1.27/drivers/char/
H A Dmsm_smd_pkt.c44 struct smd_channel *ch; member in struct:smd_pkt_dev
94 if (!smd_pkt_devp || !smd_pkt_devp->ch) check_and_wakeup_reader()
97 sz = smd_cur_packet_size(smd_pkt_devp->ch); check_and_wakeup_reader()
102 if (sz > smd_read_avail(smd_pkt_devp->ch)) { check_and_wakeup_reader()
123 if (!smd_pkt_devp || !smd_pkt_devp->ch) smd_pkt_read()
126 chl = smd_pkt_devp->ch; smd_pkt_read()
141 bytes_read = smd_cur_packet_size(smd_pkt_devp->ch); smd_pkt_read()
143 bytes_read < smd_read_avail(smd_pkt_devp->ch)) { smd_pkt_read()
155 r = smd_read(smd_pkt_devp->ch, smd_pkt_devp->rx_buf, bytes_read); smd_pkt_read()
188 if (!smd_pkt_devp || !smd_pkt_devp->ch) smd_pkt_write()
192 if (smd_write_avail(smd_pkt_devp->ch) < count) { smd_pkt_write()
206 r = smd_write(smd_pkt_devp->ch, smd_pkt_devp->tx_buf, count); smd_pkt_write()
229 if (smd_read_avail(smd_pkt_devp->ch)) smd_pkt_poll()
240 if (smd_pkt_devp->ch == 0) smd_pkt_ch_notify()
304 &smd_pkt_devp->ch, smd_pkt_devp, smd_pkt_open()
321 smd_close(smd_pkt_devp->ch); smd_pkt_open()
322 smd_pkt_devp->ch = 0; smd_pkt_open()
343 r = smd_close(smd_pkt_devp->ch); smd_pkt_release()
344 smd_pkt_devp->ch = 0; smd_pkt_release()
H A Ddtlk.c129 char ch; dtlk_read() local
140 ch = dtlk_read_lpc(); dtlk_read()
141 /* printk("dtlk_read() reads 0x%02x\n", ch); */ dtlk_read()
142 if (put_user(ch, buf++)) dtlk_read()
161 int i = 0, retries = 0, ch; dtlk_write() local
167 int i, ch; dtlk_write() local
169 if (get_user(ch, buf + i)) dtlk_write()
171 if (' ' <= ch && ch <= '~') dtlk_write()
172 printk("%c", ch); dtlk_write()
174 printk("\\%03o", ch); dtlk_write()
184 while (i < count && !get_user(ch, buf) && dtlk_write()
185 (ch == DTLK_CLEAR || dtlk_writeable())) { dtlk_write()
186 dtlk_write_tts(ch); dtlk_write()
569 char ch; dtlk_read_tts() local
580 ch = inb_p(dtlk_port_tts); /* input from TTS port */ dtlk_read_tts()
581 ch &= 0x7f; dtlk_read_tts()
582 outb_p(ch, dtlk_port_tts); dtlk_read_tts()
593 return ch; dtlk_read_tts()
599 char ch; dtlk_read_lpc() local
604 ch = inb_p(dtlk_port_lpc); /* input from LPC port */ dtlk_read_lpc()
617 return ch; dtlk_read_lpc()
632 static char dtlk_write_tts(char ch) dtlk_write_tts() argument
637 if (' ' <= ch && ch <= '~') dtlk_write_tts()
638 printk("'%c'", ch); dtlk_write_tts()
640 printk("0x%02x", ch); dtlk_write_tts()
642 if (ch != DTLK_CLEAR) /* no flow control for CLEAR command */ dtlk_write_tts()
649 outb_p(ch, dtlk_port_tts); /* output to TTS port */ dtlk_write_tts()
/linux-4.1.27/arch/arm/plat-omap/
H A Ddma.c611 int ch, free_ch = -1; omap_request_dma() local
618 for (ch = 0; ch < dma_chan_count; ch++) { omap_request_dma()
619 if (free_ch == -1 && dma_chan[ch].dev_id == -1) { omap_request_dma()
620 free_ch = ch; omap_request_dma()
1087 static int omap1_dma_handle_ch(int ch) omap1_dma_handle_ch() argument
1091 if (enable_1510_mode && ch >= 6) { omap1_dma_handle_ch()
1092 csr = dma_chan[ch].saved_csr; omap1_dma_handle_ch()
1093 dma_chan[ch].saved_csr = 0; omap1_dma_handle_ch()
1095 csr = p->dma_read(CSR, ch); omap1_dma_handle_ch()
1096 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { omap1_dma_handle_ch()
1097 dma_chan[ch + 6].saved_csr = csr >> 7; omap1_dma_handle_ch()
1102 if (unlikely(dma_chan[ch].dev_id == -1)) { omap1_dma_handle_ch()
1104 ch, csr); omap1_dma_handle_ch()
1108 pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id); omap1_dma_handle_ch()
1111 dma_chan[ch].dev_id); omap1_dma_handle_ch()
1113 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; omap1_dma_handle_ch()
1114 if (likely(dma_chan[ch].callback != NULL)) omap1_dma_handle_ch()
1115 dma_chan[ch].callback(ch, csr, dma_chan[ch].data); omap1_dma_handle_ch()
1122 int ch = ((int) dev_id) - 1; omap1_dma_irq_handler() local
1128 handled_now += omap1_dma_handle_ch(ch); omap1_dma_irq_handler()
1129 if (enable_1510_mode && dma_chan[ch + 6].saved_csr) omap1_dma_irq_handler()
1130 handled_now += omap1_dma_handle_ch(ch + 6); omap1_dma_irq_handler()
1145 static int omap2_dma_handle_ch(int ch) omap2_dma_handle_ch() argument
1147 u32 status = p->dma_read(CSR, ch); omap2_dma_handle_ch()
1151 pr_warn("Spurious DMA IRQ for lch %d\n", ch); omap2_dma_handle_ch()
1152 p->dma_write(1 << ch, IRQSTATUS_L0, ch); omap2_dma_handle_ch()
1155 if (unlikely(dma_chan[ch].dev_id == -1)) { omap2_dma_handle_ch()
1158 status, ch); omap2_dma_handle_ch()
1163 dma_chan[ch].dev_id); omap2_dma_handle_ch()
1166 dma_chan[ch].dev_id); omap2_dma_handle_ch()
1170 ccr = p->dma_read(CCR, ch); omap2_dma_handle_ch()
1172 p->dma_write(ccr, CCR, ch); omap2_dma_handle_ch()
1173 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; omap2_dma_handle_ch()
1178 dma_chan[ch].dev_id); omap2_dma_handle_ch()
1181 dma_chan[ch].dev_id); omap2_dma_handle_ch()
1183 p->dma_write(status, CSR, ch); omap2_dma_handle_ch()
1184 p->dma_write(1 << ch, IRQSTATUS_L0, ch); omap2_dma_handle_ch()
1186 p->dma_read(IRQSTATUS_L0, ch); omap2_dma_handle_ch()
1188 /* If the ch is not chained then chain_id will be -1 */ omap2_dma_handle_ch()
1189 if (dma_chan[ch].chain_id != -1) { omap2_dma_handle_ch()
1190 int chain_id = dma_chan[ch].chain_id; omap2_dma_handle_ch()
1191 dma_chan[ch].state = DMA_CH_NOTSTARTED; omap2_dma_handle_ch()
1192 if (p->dma_read(CLNK_CTRL, ch) & (1 << 15)) omap2_dma_handle_ch()
1193 dma_chan[dma_chan[ch].next_linked_ch].state = omap2_dma_handle_ch()
1197 disable_lnk(ch); omap2_dma_handle_ch()
1202 status = p->dma_read(CSR, ch); omap2_dma_handle_ch()
1203 p->dma_write(status, CSR, ch); omap2_dma_handle_ch()
1206 if (likely(dma_chan[ch].callback != NULL)) omap2_dma_handle_ch()
1207 dma_chan[ch].callback(ch, status, dma_chan[ch].data); omap2_dma_handle_ch()
1264 int ch; omap_dma_global_context_restore() local
1277 for (ch = 0; ch < dma_chan_count; ch++) omap_dma_global_context_restore()
1278 if (dma_chan[ch].dev_id != -1) omap_dma_global_context_restore()
1279 omap_clear_dma(ch); omap_dma_global_context_restore()
1290 int ch, ret = 0; omap_system_dma_probe() local
1332 for (ch = 0; ch < dma_chan_count; ch++) { omap_system_dma_probe()
1333 omap_clear_dma(ch); omap_system_dma_probe()
1335 omap2_disable_irq_lch(ch); omap_system_dma_probe()
1337 dma_chan[ch].dev_id = -1; omap_system_dma_probe()
1338 dma_chan[ch].next_lch = -1; omap_system_dma_probe()
1340 if (ch >= 6 && enable_1510_mode) omap_system_dma_probe()
1345 * request_irq() doesn't like dev_id (ie. ch) being omap_system_dma_probe()
1348 sprintf(&irq_name[0], "%d", ch); omap_system_dma_probe()
1362 (void *) (ch + 1)); omap_system_dma_probe()
1400 for (irq_rel = 0; irq_rel < ch; irq_rel++) { omap_system_dma_probe()
/linux-4.1.27/fs/isofs/
H A Djoliet.c19 __be16 *ip, ch; uni16_to_x8() local
25 while ((ch = get_unaligned(ip)) && len) { uni16_to_x8()
27 llen = nls->uni2char(be16_to_cpu(ch), op, NLS_MAX_CHARSET_SIZE); uni16_to_x8()
/linux-4.1.27/include/linux/
H A Dtty_flip.h17 unsigned char ch, char flag) tty_insert_flip_char()
26 *char_buf_ptr(tb, tb->used++) = ch; tty_insert_flip_char()
29 return tty_insert_flip_string_flags(port, &ch, &flag, 1); tty_insert_flip_char()
16 tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag) tty_insert_flip_char() argument
/linux-4.1.27/arch/powerpc/platforms/powermac/
H A Dudbg_scc.c74 struct device_node *ch, *ch_def = NULL, *ch_a = NULL; udbg_scc_init() local
87 for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) { udbg_scc_init()
88 if (ch == stdout) udbg_scc_init()
89 ch_def = of_node_get(ch); udbg_scc_init()
90 if (strcmp(ch->name, "ch-a") == 0) udbg_scc_init()
91 ch_a = of_node_get(ch); udbg_scc_init()
96 ch = ch_def ? ch_def : ch_a; udbg_scc_init()
111 pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch, udbg_scc_init()
114 if (ch == ch_a) udbg_scc_init()
/linux-4.1.27/arch/mips/ralink/
H A Dearly_printk.c50 void prom_putchar(unsigned char ch) prom_putchar() argument
53 uart_w32(ch, UART_TX); prom_putchar()
59 uart_w32(ch, UART_REG_TX); prom_putchar()
/linux-4.1.27/drivers/scsi/esas2r/
H A Desas2r_flash.c245 struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS]; fix_bios() local
249 pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset); fix_bios()
272 esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0); fix_bios()
277 struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI]; fix_efi() local
278 u32 len = ch->length; fix_efi()
279 u32 offset = ch->image_offset; fix_efi()
329 struct esas2r_component_header *ch; fw_download_proc() local
344 ch = &fi->cmp_hdr[fc->comp_typ]; fw_download_proc()
348 + ch->image_offset /* start of the current image */ fw_download_proc()
349 + ch->length /* end of the current image */ fw_download_proc()
379 ch = &fi->cmp_hdr[fc->comp_typ]; fw_download_proc()
384 ch = &fi->cmp_hdr[CH_IT_BIOS]; fw_download_proc()
385 if (ch->length == 0) fw_download_proc()
392 fc->sgc.length = ch->length; fw_download_proc()
394 ch->image_offset; fw_download_proc()
405 fc->cmp_len = ch->length; fw_download_proc()
418 ch->status = CH_STAT_SUCCESS; fw_download_proc()
421 ch = &fi->cmp_hdr[CH_IT_MAC]; fw_download_proc()
422 if (ch->length == 0) fw_download_proc()
430 fc->sgc.length = ch->length; fw_download_proc()
432 ch->image_offset; fw_download_proc()
439 fc->flsh_addr -= ch->length; fw_download_proc()
440 fc->cmp_len = ch->length; fw_download_proc()
453 ch->status = CH_STAT_SUCCESS; fw_download_proc()
456 ch = &fi->cmp_hdr[CH_IT_EFI]; fw_download_proc()
457 if (ch->length == 0) fw_download_proc()
466 fc->sgc.length = ch->length; fw_download_proc()
468 ch->image_offset; fw_download_proc()
475 fc->flsh_addr -= ch->length; fw_download_proc()
476 fc->cmp_len = ch->length; fw_download_proc()
489 ch->status = CH_STAT_SUCCESS; fw_download_proc()
492 ch = &fi->cmp_hdr[CH_IT_CFG]; fw_download_proc()
494 if (ch->length == 0) fw_download_proc()
499 fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; fw_download_proc()
500 fc->sgc.length = ch->length; fw_download_proc()
502 ch->image_offset; fw_download_proc()
509 fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; fw_download_proc()
510 fc->cmp_len = ch->length; fw_download_proc()
523 ch->status = CH_STAT_SUCCESS; fw_download_proc()
552 ch = &fi->cmp_hdr[fc->comp_typ]; fw_download_proc()
553 ch->status = CH_STAT_FAILED; fw_download_proc()
700 struct esas2r_component_header *ch; verify_fi() local
725 for (i = 0, len = 0, ch = fi->cmp_hdr; verify_fi()
727 i++, ch++) { verify_fi()
734 if (i != ch->img_type) { verify_fi()
736 ch->status = CH_STAT_INVALID; verify_fi()
740 switch (ch->img_type) { verify_fi()
754 switch (ch->img_type) { verify_fi()
762 if (ch->length & 0x1ff) verify_fi()
766 if (ch->length == 0) verify_fi()
770 if (chk_boot((u8 *)fi + ch->image_offset, ch->length) verify_fi()
779 if (ch->length == 0) { verify_fi()
785 if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length, verify_fi()
786 ch->length, NULL)) verify_fi()
799 ch->status = CH_STAT_INVALID; verify_fi()
801 ch->status = CH_STAT_PENDING; verify_fi()
802 len += ch->length; verify_fi()
811 /* Compare fi->length to the sum of ch->length fields */ verify_fi()
1394 struct esas2r_component_header *ch; esas2r_fm_api() local
1430 ch = &fi->cmp_hdr[CH_IT_BIOS]; esas2r_fm_api()
1432 if (ch->length) esas2r_fm_api()
1436 ch = &fi->cmp_hdr[CH_IT_EFI]; esas2r_fm_api()
1438 if (ch->length) esas2r_fm_api()
1474 for (j = 0, ch = fi->cmp_hdr; esas2r_fm_api()
1476 j++, ch++) { esas2r_fm_api()
1477 ch->img_type = j; esas2r_fm_api()
1478 ch->status = CH_STAT_PENDING; esas2r_fm_api()
1479 ch->length = 0; esas2r_fm_api()
1480 ch->version = 0xffffffff; esas2r_fm_api()
1481 ch->image_offset = 0; esas2r_fm_api()
1482 ch->pad[0] = 0; esas2r_fm_api()
1483 ch->pad[1] = 0; esas2r_fm_api()
/linux-4.1.27/arch/arm/boot/compressed/
H A Dmisc.c34 static void icedcc_putc(int ch) icedcc_putc() argument
45 asm("mcr p14, 0, %0, c0, c5, 0" : : "r" (ch)); icedcc_putc()
51 static void icedcc_putc(int ch) icedcc_putc() argument
62 asm("mcr p14, 0, %0, c8, c0, 0" : : "r" (ch)); icedcc_putc()
67 static void icedcc_putc(int ch) icedcc_putc() argument
78 asm("mcr p14, 0, %0, c1, c0, 0" : : "r" (ch)); icedcc_putc()
83 #define putc(ch) icedcc_putc(ch)
/linux-4.1.27/lib/
H A Dhexdump.c22 * @ch: ascii character represents hex digit
27 int hex_to_bin(char ch) hex_to_bin() argument
29 if ((ch >= '0') && (ch <= '9')) hex_to_bin()
30 return ch - '0'; hex_to_bin()
31 ch = tolower(ch); hex_to_bin()
32 if ((ch >= 'a') && (ch <= 'f')) hex_to_bin()
33 return ch - 'a' + 10; hex_to_bin()
112 u8 ch; hex_dump_to_buffer() local
173 ch = ptr[j]; hex_dump_to_buffer()
174 linebuf[lx++] = hex_asc_hi(ch); hex_dump_to_buffer()
175 linebuf[lx++] = hex_asc_lo(ch); hex_dump_to_buffer()
192 ch = ptr[j]; hex_dump_to_buffer()
193 linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; hex_dump_to_buffer()
H A Dearlycpio.c73 unsigned int ch[C_NFIELDS], *chp, v; find_cpio_data() local
89 chp = ch; find_cpio_data()
114 if ((ch[C_MAGIC] - 0x070701) > 1) find_cpio_data()
119 dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4); find_cpio_data()
120 nptr = PTR_ALIGN(dptr + ch[C_FILESIZE], 4); find_cpio_data()
125 if ((ch[C_MODE] & 0170000) == 0100000 && find_cpio_data()
126 ch[C_NAMESIZE] >= mypathsize && find_cpio_data()
129 if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) { find_cpio_data()
137 cd.size = ch[C_FILESIZE]; find_cpio_data()
/linux-4.1.27/sound/mips/
H A Dsgio2audio.c359 unsigned int ch, unsigned int count) snd_sgio2audio_dma_pull_frag()
369 struct snd_pcm_runtime *runtime = chip->channel[ch].substream->runtime; snd_sgio2audio_dma_pull_frag()
371 spin_lock_irqsave(&chip->channel[ch].lock, flags); snd_sgio2audio_dma_pull_frag()
373 src_base = (unsigned long) chip->ring_base | (ch << CHANNEL_RING_SHIFT); snd_sgio2audio_dma_pull_frag()
374 src_pos = readq(&mace->perif.audio.chan[ch].read_ptr); snd_sgio2audio_dma_pull_frag()
376 dst_pos = chip->channel[ch].pos; snd_sgio2audio_dma_pull_frag()
380 chip->channel[ch].size += (count >> 3); /* in frames */ snd_sgio2audio_dma_pull_frag()
381 ret = chip->channel[ch].size >= runtime->period_size; snd_sgio2audio_dma_pull_frag()
382 chip->channel[ch].size %= runtime->period_size; snd_sgio2audio_dma_pull_frag()
397 writeq(src_pos, &mace->perif.audio.chan[ch].read_ptr); /* in bytes */ snd_sgio2audio_dma_pull_frag()
398 chip->channel[ch].pos = dst_pos; snd_sgio2audio_dma_pull_frag()
400 spin_unlock_irqrestore(&chip->channel[ch].lock, flags); snd_sgio2audio_dma_pull_frag()
407 unsigned int ch, unsigned int count) snd_sgio2audio_dma_push_frag()
417 struct snd_pcm_runtime *runtime = chip->channel[ch].substream->runtime; snd_sgio2audio_dma_push_frag()
419 spin_lock_irqsave(&chip->channel[ch].lock, flags); snd_sgio2audio_dma_push_frag()
421 dst_base = (unsigned long)chip->ring_base | (ch << CHANNEL_RING_SHIFT); snd_sgio2audio_dma_push_frag()
422 dst_pos = readq(&mace->perif.audio.chan[ch].write_ptr); snd_sgio2audio_dma_push_frag()
424 src_pos = chip->channel[ch].pos; snd_sgio2audio_dma_push_frag()
428 chip->channel[ch].size += (count >> 3); /* in frames */ snd_sgio2audio_dma_push_frag()
429 ret = chip->channel[ch].size >= runtime->period_size; snd_sgio2audio_dma_push_frag()
430 chip->channel[ch].size %= runtime->period_size; snd_sgio2audio_dma_push_frag()
447 writeq(dst_pos, &mace->perif.audio.chan[ch].write_ptr); /* in bytes */ snd_sgio2audio_dma_push_frag()
448 chip->channel[ch].pos = src_pos; snd_sgio2audio_dma_push_frag()
450 spin_unlock_irqrestore(&chip->channel[ch].lock, flags); snd_sgio2audio_dma_push_frag()
458 int ch = chan->idx; snd_sgio2audio_dma_start() local
461 writeq(CHANNEL_CONTROL_RESET, &mace->perif.audio.chan[ch].control); snd_sgio2audio_dma_start()
463 writeq(0, &mace->perif.audio.chan[ch].control); snd_sgio2audio_dma_start()
467 snd_sgio2audio_dma_push_frag(chip, ch, CHANNEL_RING_SIZE - 32); snd_sgio2audio_dma_start()
471 &mace->perif.audio.chan[ch].control); snd_sgio2audio_dma_start()
488 int count, ch; snd_sgio2audio_dma_in_isr() local
492 ch = chan->idx; snd_sgio2audio_dma_in_isr()
496 readq(&mace->perif.audio.chan[ch].depth) - 32; snd_sgio2audio_dma_in_isr()
497 if (snd_sgio2audio_dma_pull_frag(chip, ch, count)) snd_sgio2audio_dma_in_isr()
508 int count, ch; snd_sgio2audio_dma_out_isr() local
512 ch = chan->idx; snd_sgio2audio_dma_out_isr()
515 readq(&mace->perif.audio.chan[ch].depth) - 32; snd_sgio2audio_dma_out_isr()
516 if (snd_sgio2audio_dma_push_frag(chip, ch, count)) snd_sgio2audio_dma_out_isr()
615 int ch = chan->idx; snd_sgio2audio_pcm_prepare() local
618 spin_lock_irqsave(&chip->channel[ch].lock, flags); snd_sgio2audio_pcm_prepare()
621 chip->channel[ch].pos = 0; snd_sgio2audio_pcm_prepare()
622 chip->channel[ch].size = 0; snd_sgio2audio_pcm_prepare()
623 chip->channel[ch].substream = substream; snd_sgio2audio_pcm_prepare()
630 ch - 1, snd_sgio2audio_pcm_prepare()
642 spin_unlock_irqrestore(&chip->channel[ch].lock, flags); snd_sgio2audio_pcm_prepare()
358 snd_sgio2audio_dma_pull_frag(struct snd_sgio2audio *chip, unsigned int ch, unsigned int count) snd_sgio2audio_dma_pull_frag() argument
406 snd_sgio2audio_dma_push_frag(struct snd_sgio2audio *chip, unsigned int ch, unsigned int count) snd_sgio2audio_dma_push_frag() argument
/linux-4.1.27/sound/core/
H A Dvmaster.c64 int err, ch; slave_update() local
71 for (ch = 0; ch < slave->info.count; ch++) slave_update()
72 slave->vals[ch] = uctl->value.integer.value[ch]; slave_update()
141 int err, ch; slave_get_val() local
146 for (ch = 0; ch < slave->info.count; ch++) slave_get_val()
147 ucontrol->value.integer.value[ch] = slave->vals[ch]; slave_get_val()
154 int err, ch, vol; slave_put_val() local
162 for (ch = 0; ch < slave->info.count; ch++) slave_put_val()
163 ucontrol->value.integer.value[ch] &= slave_put_val()
167 for (ch = 0; ch < slave->info.count; ch++) { slave_put_val()
169 vol = ucontrol->value.integer.value[ch]; slave_put_val()
175 ucontrol->value.integer.value[ch] = vol; slave_put_val()
203 int err, ch, changed = 0; slave_put() local
208 for (ch = 0; ch < slave->info.count; ch++) { slave_put()
209 if (slave->vals[ch] != ucontrol->value.integer.value[ch]) { slave_put()
211 slave->vals[ch] = ucontrol->value.integer.value[ch]; slave_put()
/linux-4.1.27/drivers/isdn/i4l/
H A Disdn_common.h30 extern void isdn_free_channel(int di, int ch, int usage);
31 extern void isdn_all_eaz(int di, int ch);
33 extern int isdn_dc2minor(int di, int ch);
37 extern void isdn_unexclusive_channel(int di, int ch);
/linux-4.1.27/drivers/staging/rtl8188eu/core/
H A Drtw_rf.c77 u32 ch = 0; rtw_freq2ch() local
81 ch = ch_freq_map[i].channel; rtw_freq2ch()
86 ch = 1; rtw_freq2ch()
88 return ch; rtw_freq2ch()
/linux-4.1.27/arch/mips/mti-sead3/
H A Dsead3-display.c44 char ch; mips_display_message() local
53 ch = *str++; mips_display_message()
55 ch = ' '; mips_display_message()
60 __raw_writel(ch, display + DISPLAY_LCDDATA); mips_display_message()
/linux-4.1.27/arch/nios2/include/asm/
H A Dmmu.h2 * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
H A Dtraps.h2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
H A Dpgtable-bits.h2 * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
/linux-4.1.27/arch/ia64/hp/sim/
H A Dhpsim_console.c50 unsigned long ch; simcons_write() local
53 ch = *buf++; simcons_write()
54 ia64_ssc(ch, 0, 0, 0, SSC_PUTCHAR); simcons_write()
55 if (ch == '\n') simcons_write()
H A Dsimserial.c58 unsigned char ch; receive_chars() local
61 while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) { receive_chars()
62 if (ch == 27 && seen_esc == 0) { receive_chars()
65 } else if (seen_esc == 1 && ch == 'O') { receive_chars()
69 if (ch == 'P') /* F1 */ receive_chars()
72 if (ch == 'S') { /* F4 */ receive_chars()
74 ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR); receive_chars()
75 } while (!ch); receive_chars()
76 handle_sysrq(ch); receive_chars()
84 if (tty_insert_flip_char(port, ch, TTY_NORMAL) == 0) receive_chars()
108 static int rs_put_char(struct tty_struct *tty, unsigned char ch) rs_put_char() argument
121 info->xmit.buf[info->xmit.head] = ch; rs_put_char()
255 static void rs_send_xchar(struct tty_struct *tty, char ch) rs_send_xchar() argument
259 info->x_char = ch; rs_send_xchar()
260 if (ch) { rs_send_xchar()
/linux-4.1.27/arch/arm/mach-mvebu/
H A Dboard.h6 * Andrew Lunn <andrew@lunn.ch>
/linux-4.1.27/drivers/hwmon/
H A Dmax6639.c41 #define MAX6639_REG_TEMP(ch) (0x00 + (ch))
45 #define MAX6639_REG_TEMP_EXT(ch) (0x05 + (ch))
46 #define MAX6639_REG_ALERT_LIMIT(ch) (0x08 + (ch))
47 #define MAX6639_REG_OT_LIMIT(ch) (0x0A + (ch))
48 #define MAX6639_REG_THERM_LIMIT(ch) (0x0C + (ch))
49 #define MAX6639_REG_FAN_CONFIG1(ch) (0x10 + (ch) * 4)
50 #define MAX6639_REG_FAN_CONFIG2a(ch) (0x11 + (ch) * 4)
51 #define MAX6639_REG_FAN_CONFIG2b(ch) (0x12 + (ch) * 4)
52 #define MAX6639_REG_FAN_CONFIG3(ch) (0x13 + (ch) * 4)
53 #define MAX6639_REG_FAN_CNT(ch) (0x20 + (ch))
54 #define MAX6639_REG_TARGET_CNT(ch) (0x22 + (ch))
55 #define MAX6639_REG_FAN_PPR(ch) (0x24 + (ch))
56 #define MAX6639_REG_TARGTDUTY(ch) (0x26 + (ch))
57 #define MAX6639_REG_FAN_START_TEMP(ch) (0x28 + (ch))
/linux-4.1.27/drivers/staging/vt6656/
H A Dchannel.c146 struct ieee80211_channel *ch; vnt_init_bands() local
153 ch = vnt_channels_5ghz; vnt_init_bands()
156 ch[i].max_power = VNT_RF_MAX_POWER; vnt_init_bands()
157 ch[i].flags = IEEE80211_CHAN_NO_HT40; vnt_init_bands()
167 ch = vnt_channels_2ghz; vnt_init_bands()
170 ch[i].max_power = VNT_RF_MAX_POWER; vnt_init_bands()
171 ch[i].flags = IEEE80211_CHAN_NO_HT40; vnt_init_bands()
/linux-4.1.27/sound/pci/au88x0/
H A Dau8830.h79 #define OFFSET_SRCOUT 0x20 /* ch 0x11 */
80 #define OFFSET_MIXIN 0x50 /* ch 0x11 */
81 #define OFFSET_MIXOUT 0x30 /* ch 0x11 */
82 #define OFFSET_CODECIN 0x70 /* ch 0x11 */ /* adb source */
83 #define OFFSET_CODECOUT 0x88 /* ch 0x11 */ /* adb target */
84 #define OFFSET_SPORTIN 0x78 /* ch 0x13 ADB source. 2 routes. */
85 #define OFFSET_SPORTOUT 0x90 /* ch 0x13 ADB sink. 2 routes. */
86 #define OFFSET_SPDIFIN 0x7A /* ch 0x14 ADB source. */
87 #define OFFSET_SPDIFOUT 0x92 /* ch 0x14 ADB sink. */
88 #define OFFSET_AC98IN 0x7c /* ch 0x14 ADB source. */
89 #define OFFSET_AC98OUT 0x94 /* ch 0x14 ADB sink. */
90 #define OFFSET_EQIN 0xa0 /* ch 0x11 */
91 #define OFFSET_EQOUT 0x7e /* ch 0x11 */ /* 2 routes on ch 0x11 */
/linux-4.1.27/crypto/
H A Dgf128mul.c187 u8 ch = ((u8 *)b)[15 - i]; gf128mul_lle() local
189 if (ch & 0x80) gf128mul_lle()
191 if (ch & 0x40) gf128mul_lle()
193 if (ch & 0x20) gf128mul_lle()
195 if (ch & 0x10) gf128mul_lle()
197 if (ch & 0x08) gf128mul_lle()
199 if (ch & 0x04) gf128mul_lle()
201 if (ch & 0x02) gf128mul_lle()
203 if (ch & 0x01) gf128mul_lle()
225 u8 ch = ((u8 *)b)[i]; gf128mul_bbe() local
227 if (ch & 0x80) gf128mul_bbe()
229 if (ch & 0x40) gf128mul_bbe()
231 if (ch & 0x20) gf128mul_bbe()
233 if (ch & 0x10) gf128mul_bbe()
235 if (ch & 0x08) gf128mul_bbe()
237 if (ch & 0x04) gf128mul_bbe()
239 if (ch & 0x02) gf128mul_bbe()
241 if (ch & 0x01) gf128mul_bbe()
/linux-4.1.27/include/video/
H A Dimx-ipu-v3.h181 void ipu_cpmem_zero(struct ipuv3_channel *ch);
182 void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres);
183 void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
184 void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
185 void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
186 void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride);
187 void ipu_cpmem_set_axi_id(struct ipuv3_channel *ch, u32 id);
188 void ipu_cpmem_set_burstsize(struct ipuv3_channel *ch, int burstsize);
189 void ipu_cpmem_set_block_mode(struct ipuv3_channel *ch);
190 void ipu_cpmem_set_rotation(struct ipuv3_channel *ch,
192 int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch,
194 int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width);
195 void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format);
196 void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
199 void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
201 int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc);
202 int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image);
203 void ipu_cpmem_dump(struct ipuv3_channel *ch);
/linux-4.1.27/arch/powerpc/crypto/
H A Dsha256-spe-asm.S112 and rT3,e,f; /* 1: ch = e and f */ \
114 andc rT1,g,e; /* 1: ch' = ~e and g */ \
116 xor rT3,rT3,rT1; /* 1: ch = ch xor ch' */ \
118 add rT3,rT3,w; /* 1: temp1' = ch + w */ \
139 and rT3,d,e; /* 2: ch = e and f */ \
141 andc rT1,f,d; /* 2: ch' = ~e and g */ \
143 xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \
145 add rT3,rT3,w; /* 2: temp1' = ch + w */ \
174 and rT2,e,f; /* 1: ch = e and f */ \
176 andc rT3,g,e; /* 1: ch' = ~e and g */ \
178 xor rT2,rT2,rT3; /* 1: ch = ch xor ch' */ \
180 add h,h,rT2; /* 1: temp1 = temp1 + ch */ \
209 and rT3,d,e; /* 2: ch = e and f */ \
211 andc rT1,f,d; /* 2: ch' = ~e and g */ \
213 xor rT3,rT3,rT1; /* 2: ch = ch xor ch' */ \
215 add g,g,rT3; /* 2: temp1 = temp1 + ch */ \
/linux-4.1.27/drivers/net/wireless/ath/wcn36xx/
H A Ddxe.c31 struct wcn36xx_dxe_ch *ch = is_low ? wcn36xx_dxe_get_next_bd() local
35 return ch->head_blk_ctl->bd_cpu_addr; wcn36xx_dxe_get_next_bd()
64 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch) wcn36xx_dxe_free_ctl_block() argument
66 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next; wcn36xx_dxe_free_ctl_block()
69 for (i = 0; i < ch->desc_num && ctl; i++) { wcn36xx_dxe_free_ctl_block()
76 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch) wcn36xx_dxe_allocate_ctl_block() argument
82 for (i = 0; i < ch->desc_num; i++) { wcn36xx_dxe_allocate_ctl_block()
90 ch->head_blk_ctl = cur_ctl; wcn36xx_dxe_allocate_ctl_block()
91 ch->tail_blk_ctl = cur_ctl; wcn36xx_dxe_allocate_ctl_block()
92 } else if (ch->desc_num - 1 == i) { wcn36xx_dxe_allocate_ctl_block()
94 cur_ctl->next = ch->head_blk_ctl; wcn36xx_dxe_allocate_ctl_block()
104 wcn36xx_dxe_free_ctl_block(ch); wcn36xx_dxe_allocate_ctl_block()
233 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch, wcn36xx_dxe_init_tx_bd() argument
239 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl; wcn36xx_dxe_init_tx_bd()
241 for (i = 0; i < ch->desc_num; i++) { wcn36xx_dxe_init_tx_bd()
346 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) reap_tx_dxes() argument
348 struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl; reap_tx_dxes()
378 } while (ctl != ch->head_blk_ctl && reap_tx_dxes()
381 ch->tail_blk_ctl = ctl; reap_tx_dxes()
467 struct wcn36xx_dxe_ch *ch) wcn36xx_rx_handle_packets()
469 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl; wcn36xx_rx_handle_packets()
479 switch (ch->ch_type) { wcn36xx_rx_handle_packets()
501 ch->head_blk_ctl = ctl; wcn36xx_rx_handle_packets()
597 struct wcn36xx_dxe_ch *ch = NULL; wcn36xx_dxe_tx_frame() local
600 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch; wcn36xx_dxe_tx_frame()
602 ctl = ch->head_blk_ctl; wcn36xx_dxe_tx_frame()
625 desc->dst_addr_l = ch->dxe_wq; wcn36xx_dxe_tx_frame()
627 desc->ctrl = ch->ctrl_bd; wcn36xx_dxe_tx_frame()
651 desc->dst_addr_l = ch->dxe_wq; wcn36xx_dxe_tx_frame()
655 desc->ctrl = ch->ctrl_skb; wcn36xx_dxe_tx_frame()
663 ch->head_blk_ctl = ctl->next; wcn36xx_dxe_tx_frame()
679 ch->reg_ctrl, ch->def_ctrl); wcn36xx_dxe_tx_frame()
466 wcn36xx_rx_handle_packets(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) wcn36xx_rx_handle_packets() argument
/linux-4.1.27/drivers/s390/char/
H A Dkeyboard.h44 kbd_put_queue(struct tty_port *port, int ch) kbd_put_queue() argument
46 tty_insert_flip_char(port, ch, 0); kbd_put_queue()
/linux-4.1.27/arch/arm/mach-w90x900/include/mach/
H A Duncompress.h30 static void putc(int ch) putc() argument
37 *uart_base = ch; putc()
/linux-4.1.27/drivers/dma/sh/
H A Drcar-hpbdma.c143 static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch) hsrstr_write() argument
145 iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch)); hsrstr_write()
148 static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch) dintsr_read() argument
152 if (ch < 32) dintsr_read()
153 v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch; dintsr_read()
155 v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32); dintsr_read()
159 static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch) dintcr_write() argument
161 if (ch < 32) dintcr_write()
162 iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0); dintcr_write()
164 iowrite32((0x1 << (ch - 32)), dintcr_write()
178 static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch) hpb_dmae_enable_int() argument
183 if (ch < 32) { hpb_dmae_enable_int()
185 iowrite32(BIT(ch) | intreg, hpb_dmae_enable_int()
189 iowrite32(BIT(ch - 32) | intreg, hpb_dmae_enable_int()
240 u32 ch; hpb_dmae_reset() local
242 for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++) hpb_dmae_reset()
243 hsrstr_write(hpbdev, ch); hpb_dmae_reset()
350 int ch = chan->cfg->dma_ch; hpb_dmae_chan_irq() local
353 if (dintsr_read(hpbdev, ch)) { hpb_dmae_chan_irq()
355 dintcr_write(hpbdev, ch); hpb_dmae_chan_irq()
/linux-4.1.27/arch/mn10300/unit-asb2305/include/unit/
H A Dserial.h101 char ch; __debug_to_serial() local
109 ch = *p++; __debug_to_serial()
110 if (ch == 0x0a) { __debug_to_serial()
115 TTYS0_TX = ch; __debug_to_serial()
/linux-4.1.27/arch/nios2/mm/
H A Dextable.c2 * Copyright (C) 2010, Tobias Klauser <tklauser@distanz.ch>
/linux-4.1.27/drivers/iio/adc/
H A Dtwl6030-gpadc.c153 [0] = { /* ch 0, external, battery type, resistor value */
160 [1] = { /* ch 1, external, battery temperature, NTC resistor value */
167 [2] = { /* ch 2, external, audio accessory/general purpose */
174 [3] = { /* ch 3, external, general purpose */
181 [4] = { /* ch 4, external, temperature measurement/general purpose */
188 [5] = { /* ch 5, external, general purpose */
195 [6] = { /* ch 6, external, general purpose */
202 [7] = { /* ch 7, internal, main battery */
209 [8] = { /* ch 8, internal, backup battery */
216 [9] = { /* ch 9, internal, external charger input */
223 [10] = { /* ch 10, internal, VBUS */
230 [11] = { /* ch 11, internal, VBUS charging current */
233 /* ch 12, internal, Die temperature */
234 /* ch 13, internal, Die temperature */
235 [12] = { /* ch 14, internal, USB ID line */
246 [0] = { /* ch 0, external, battery type, resistor value */
253 [1] = { /* ch 1, external, battery temperature, NTC resistor value */
260 [2] = { /* ch 2, external, audio accessory/general purpose */
267 [3] = { /* ch 3, external, temperature with external diode/general
275 [4] = { /* ch 4, external, temperature measurement/general purpose */
282 [5] = { /* ch 5, external, general purpose */
289 [6] = { /* ch 6, external, general purpose */
310 [9] = { /* ch 9, internal, external charger input */
324 [11] = { /* ch 11, internal, VBUS DC-DC output current */
331 /* ch 12, internal, Die temperature */
332 /* ch 13, internal, Die temperature */
333 [12] = { /* ch 14, internal, USB ID line */
340 /* ch 15, internal, test network */
341 /* ch 16, internal, test network */
342 [13] = { /* ch 17, internal, battery charging current */
345 [14] = { /* ch 18, internal, battery voltage */
/linux-4.1.27/drivers/rapidio/devices/
H A Dtsi721.c41 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
42 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
504 int ch; tsi721_irqhandler() local
519 for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { tsi721_irqhandler()
520 if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) tsi721_irqhandler()
522 tsi721_imsg_handler(priv, ch); tsi721_irqhandler()
536 for (ch = 0; ch < RIO_MAX_MBOX; ch++) { tsi721_irqhandler()
537 if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) tsi721_irqhandler()
539 tsi721_omsg_handler(priv, ch); tsi721_irqhandler()
553 int ch; tsi721_irqhandler() local
559 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { tsi721_irqhandler()
560 if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) tsi721_irqhandler()
562 tsi721_bdma_handler(&priv->bdma[ch]); tsi721_irqhandler()
1172 tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, tsi721_imsg_interrupt_enable() argument
1181 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); tsi721_imsg_interrupt_enable()
1184 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); tsi721_imsg_interrupt_enable()
1185 iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); tsi721_imsg_interrupt_enable()
1196 iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), tsi721_imsg_interrupt_enable()
1202 tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, tsi721_imsg_interrupt_disable() argument
1211 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); tsi721_imsg_interrupt_disable()
1214 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); tsi721_imsg_interrupt_disable()
1216 iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); tsi721_imsg_interrupt_disable()
1227 rval &= ~TSI721_INT_IMSG_CHAN(ch); tsi721_imsg_interrupt_disable()
1233 tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, tsi721_omsg_interrupt_enable() argument
1242 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); tsi721_omsg_interrupt_enable()
1245 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); tsi721_omsg_interrupt_enable()
1246 iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); tsi721_omsg_interrupt_enable()
1257 iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), tsi721_omsg_interrupt_enable()
1263 tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, tsi721_omsg_interrupt_disable() argument
1272 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); tsi721_omsg_interrupt_disable()
1275 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); tsi721_omsg_interrupt_disable()
1277 iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); tsi721_omsg_interrupt_disable()
1288 rval &= ~TSI721_INT_OMSG_CHAN(ch); tsi721_omsg_interrupt_disable()
1357 * @ch: number of OB MSG channel to service
1361 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) tsi721_omsg_handler() argument
1365 spin_lock(&priv->omsg_ring[ch].lock); tsi721_omsg_handler()
1367 omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); tsi721_omsg_handler()
1371 "OB MBOX%d: Status FIFO is full\n", ch); tsi721_omsg_handler()
1384 srd_ptr = priv->omsg_ring[ch].sts_rdptr; tsi721_omsg_handler()
1385 sts_ptr = priv->omsg_ring[ch].sts_base; tsi721_omsg_handler()
1395 srd_ptr %= priv->omsg_ring[ch].sts_size; tsi721_omsg_handler()
1402 priv->omsg_ring[ch].sts_rdptr = srd_ptr; tsi721_omsg_handler()
1403 iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); tsi721_omsg_handler()
1405 if (!priv->mport->outb_msg[ch].mcback) tsi721_omsg_handler()
1410 tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ tsi721_omsg_handler()
1418 if (tx_slot == priv->omsg_ring[ch].size) { tsi721_omsg_handler()
1421 (u64)priv->omsg_ring[ch].omd_phys)/ tsi721_omsg_handler()
1429 if (tx_slot == priv->omsg_ring[ch].size) tsi721_omsg_handler()
1431 BUG_ON(tx_slot >= priv->omsg_ring[ch].size); tsi721_omsg_handler()
1432 priv->mport->outb_msg[ch].mcback(priv->mport, tsi721_omsg_handler()
1433 priv->omsg_ring[ch].dev_id, ch, tsi721_omsg_handler()
1446 ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); tsi721_omsg_handler()
1449 priv->regs + TSI721_OBDMAC_INT(ch)); tsi721_omsg_handler()
1451 priv->regs + TSI721_OBDMAC_CTL(ch)); tsi721_omsg_handler()
1452 ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); tsi721_omsg_handler()
1455 if (priv->mport->outb_msg[ch].mcback) tsi721_omsg_handler()
1456 priv->mport->outb_msg[ch].mcback(priv->mport, tsi721_omsg_handler()
1457 priv->omsg_ring[ch].dev_id, ch, tsi721_omsg_handler()
1458 priv->omsg_ring[ch].tx_slot); tsi721_omsg_handler()
1460 iowrite32(priv->omsg_ring[ch].tx_slot, tsi721_omsg_handler()
1461 priv->regs + TSI721_OBDMAC_DRDCNT(ch)); tsi721_omsg_handler()
1462 ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); tsi721_omsg_handler()
1463 priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; tsi721_omsg_handler()
1464 priv->omsg_ring[ch].sts_rdptr = 0; tsi721_omsg_handler()
1468 iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); tsi721_omsg_handler()
1475 ch_inte |= TSI721_INT_OMSG_CHAN(ch); tsi721_omsg_handler()
1479 spin_unlock(&priv->omsg_ring[ch].lock); tsi721_omsg_handler()
1726 * @ch: inbound message channel number to service
1730 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) tsi721_imsg_handler() argument
1732 u32 mbox = ch - 4; tsi721_imsg_handler()
1737 imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); tsi721_imsg_handler()
1752 iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); tsi721_imsg_handler()
1765 ch_inte |= TSI721_INT_IMSG_CHAN(ch); tsi721_imsg_handler()
1783 int ch = mbox + 4; tsi721_open_inb_mbox() local
1868 * Configure Inbound Messaging channel (ch = mbox + 4) tsi721_open_inb_mbox()
1873 priv->regs + TSI721_IBDMAC_FQBH(ch)); tsi721_open_inb_mbox()
1876 priv->regs+TSI721_IBDMAC_FQBL(ch)); tsi721_open_inb_mbox()
1878 priv->regs + TSI721_IBDMAC_FQSZ(ch)); tsi721_open_inb_mbox()
1882 priv->regs + TSI721_IBDMAC_DQBH(ch)); tsi721_open_inb_mbox()
1885 priv->regs+TSI721_IBDMAC_DQBL(ch)); tsi721_open_inb_mbox()
1887 priv->regs + TSI721_IBDMAC_DQSZ(ch)); tsi721_open_inb_mbox()
1923 tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); tsi721_open_inb_mbox()
1926 iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); tsi721_open_inb_mbox()
1927 ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); tsi721_open_inb_mbox()
1930 iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); tsi721_open_inb_mbox()
1974 int ch = mbox + 4; tsi721_close_inb_mbox() local
1983 tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); tsi721_close_inb_mbox()
2069 int ch = mbox + 4; tsi721_get_inb_message() local
2106 priv->regs + TSI721_IBDMAC_DQRP(ch)); tsi721_get_inb_message()
2116 priv->regs + TSI721_IBDMAC_FQWP(ch)); tsi721_get_inb_message()
2129 int ch; tsi721_messages_init() local
2139 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { tsi721_messages_init()
2142 priv->regs + TSI721_IBDMAC_INT(ch)); tsi721_messages_init()
2144 iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); tsi721_messages_init()
2147 priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); tsi721_messages_init()
2149 priv->regs + TSI721_SMSG_ECC_NCOR(ch)); tsi721_messages_init()
2161 int ch; tsi721_disable_ints() local
2170 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) tsi721_disable_ints()
2171 iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); tsi721_disable_ints()
2174 for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) tsi721_disable_ints()
2175 iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); tsi721_disable_ints()
2181 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) tsi721_disable_ints()
2183 priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); tsi721_disable_ints()
2189 for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) tsi721_disable_ints()
2190 iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); tsi721_disable_ints()
/linux-4.1.27/drivers/staging/unisys/uislib/
H A Duisqueue.c53 unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue, spar_signal_insert() argument
61 ((char __iomem *)ch + readq(&ch->ch_space_offset)) spar_signal_insert()
112 spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig) spar_signal_remove() argument
117 (struct signal_queue_header __iomem *)((char __iomem *)ch + spar_signal_remove()
118 readq(&ch->ch_space_offset)) + queue; spar_signal_remove()
166 unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue, spar_signal_remove_all() argument
172 (struct signal_queue_header *)((char *)ch + spar_signal_remove_all()
173 ch->ch_space_offset) + queue; spar_signal_remove_all()
217 unsigned char spar_signalqueue_empty(struct channel_header __iomem *ch, spar_signalqueue_empty() argument
221 (struct signal_queue_header __iomem *)((char __iomem *)ch + spar_signalqueue_empty()
222 readq(&ch->ch_space_offset)) + queue; spar_signalqueue_empty()
/linux-4.1.27/arch/sparc/prom/
H A Dprintf.c40 char ch = *buf++; prom_write() local
41 if (ch == '\n') { prom_write()
45 *dest++ = ch; prom_write()
/linux-4.1.27/fs/affs/
H A Dnamei.c15 static int affs_toupper(int ch);
19 static int affs_intl_toupper(int ch);
38 affs_toupper(int ch) affs_toupper() argument
40 return ch >= 'a' && ch <= 'z' ? ch -= ('a' - 'A') : ch; affs_toupper()
46 affs_intl_toupper(int ch) affs_intl_toupper() argument
48 return (ch >= 'a' && ch <= 'z') || (ch >= 0xE0 affs_intl_toupper()
49 && ch <= 0xFE && ch != 0xF7) ? affs_intl_toupper()
50 ch - ('a' - 'A') : ch; affs_intl_toupper()
/linux-4.1.27/drivers/net/wireless/ath/
H A Dregd.c263 struct ieee80211_channel *ch) ath_force_clear_no_ir_chan()
267 reg_rule = freq_reg_info(wiphy, MHZ_TO_KHZ(ch->center_freq)); ath_force_clear_no_ir_chan()
272 if (ch->flags & IEEE80211_CHAN_NO_IR) ath_force_clear_no_ir_chan()
273 ch->flags &= ~IEEE80211_CHAN_NO_IR; ath_force_clear_no_ir_chan()
278 struct ieee80211_channel *ch; ath_force_clear_no_ir_freq() local
280 ch = ieee80211_get_channel(wiphy, center_freq); ath_force_clear_no_ir_freq()
281 if (!ch) ath_force_clear_no_ir_freq()
284 ath_force_clear_no_ir_chan(wiphy, ch); ath_force_clear_no_ir_freq()
287 static void ath_force_no_ir_chan(struct ieee80211_channel *ch) ath_force_no_ir_chan() argument
289 ch->flags |= IEEE80211_CHAN_NO_IR; ath_force_no_ir_chan()
294 struct ieee80211_channel *ch; ath_force_no_ir_freq() local
296 ch = ieee80211_get_channel(wiphy, center_freq); ath_force_no_ir_freq()
297 if (!ch) ath_force_no_ir_freq()
300 ath_force_no_ir_chan(ch); ath_force_no_ir_freq()
307 struct ieee80211_channel *ch) __ath_reg_apply_beaconing_flags()
309 if (ath_is_radar_freq(ch->center_freq) || __ath_reg_apply_beaconing_flags()
310 (ch->flags & IEEE80211_CHAN_RADAR)) __ath_reg_apply_beaconing_flags()
315 ath_force_clear_no_ir_chan(wiphy, ch); __ath_reg_apply_beaconing_flags()
319 ath_force_clear_no_ir_chan(wiphy, ch); __ath_reg_apply_beaconing_flags()
322 if (ch->beacon_found) __ath_reg_apply_beaconing_flags()
323 ch->flags &= ~IEEE80211_CHAN_NO_IR; __ath_reg_apply_beaconing_flags()
341 struct ieee80211_channel *ch; ath_reg_apply_beaconing_flags() local
349 ch = &sband->channels[i]; ath_reg_apply_beaconing_flags()
351 initiator, ch); ath_reg_apply_beaconing_flags()
402 struct ieee80211_channel *ch; ath_reg_apply_radar_flags() local
411 ch = &sband->channels[i]; ath_reg_apply_radar_flags()
412 if (!ath_is_radar_freq(ch->center_freq)) ath_reg_apply_radar_flags()
424 if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ath_reg_apply_radar_flags()
425 ch->flags |= IEEE80211_CHAN_RADAR | ath_reg_apply_radar_flags()
262 ath_force_clear_no_ir_chan(struct wiphy *wiphy, struct ieee80211_channel *ch) ath_force_clear_no_ir_chan() argument
304 __ath_reg_apply_beaconing_flags(struct wiphy *wiphy, struct ath_regulatory *reg, enum nl80211_reg_initiator initiator, struct ieee80211_channel *ch) __ath_reg_apply_beaconing_flags() argument
/linux-4.1.27/drivers/macintosh/
H A Dans-lcd.c84 char ch, __user *temp; anslcd_ioctl() local
104 __get_user(ch, temp); anslcd_ioctl()
105 for (; ch; temp++) { /* FIXME: This is ugly, but should work, as a \0 byte is not a valid command code */ anslcd_ioctl()
106 anslcd_write_byte_ctrl ( ch ); anslcd_ioctl()
107 __get_user(ch, temp); anslcd_ioctl()
/linux-4.1.27/net/ipv4/netfilter/
H A Dnf_nat_snmp_basic.c162 static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) asn1_octet_decode() argument
168 *ch = *(ctx->pointer)++; asn1_octet_decode()
174 unsigned char ch; asn1_tag_decode() local
180 if (!asn1_octet_decode(ctx, &ch)) asn1_tag_decode()
183 *tag |= ch & 0x7F; asn1_tag_decode()
184 } while ((ch & 0x80) == 0x80); asn1_tag_decode()
193 unsigned char ch; asn1_id_decode() local
195 if (!asn1_octet_decode(ctx, &ch)) asn1_id_decode()
198 *cls = (ch & 0xC0) >> 6; asn1_id_decode()
199 *con = (ch & 0x20) >> 5; asn1_id_decode()
200 *tag = (ch & 0x1F); asn1_id_decode()
213 unsigned char ch, cnt; asn1_length_decode() local
215 if (!asn1_octet_decode(ctx, &ch)) asn1_length_decode()
218 if (ch == 0x80) asn1_length_decode()
223 if (ch < 0x80) asn1_length_decode()
224 *len = ch; asn1_length_decode()
226 cnt = ch & 0x7F; asn1_length_decode()
230 if (!asn1_octet_decode(ctx, &ch)) asn1_length_decode()
233 *len |= ch; asn1_length_decode()
274 unsigned char ch; asn1_eoc_decode() local
277 if (!asn1_octet_decode(ctx, &ch)) asn1_eoc_decode()
280 if (ch != 0x00) { asn1_eoc_decode()
285 if (!asn1_octet_decode(ctx, &ch)) asn1_eoc_decode()
288 if (ch != 0x00) { asn1_eoc_decode()
312 unsigned char ch; asn1_long_decode() local
315 if (!asn1_octet_decode(ctx, &ch)) asn1_long_decode()
318 *integer = (signed char) ch; asn1_long_decode()
327 if (!asn1_octet_decode(ctx, &ch)) asn1_long_decode()
331 *integer |= ch; asn1_long_decode()
340 unsigned char ch; asn1_uint_decode() local
343 if (!asn1_octet_decode(ctx, &ch)) asn1_uint_decode()
346 *integer = ch; asn1_uint_decode()
347 if (ch == 0) len = 0; asn1_uint_decode()
356 if (!asn1_octet_decode(ctx, &ch)) asn1_uint_decode()
360 *integer |= ch; asn1_uint_decode()
369 unsigned char ch; asn1_ulong_decode() local
372 if (!asn1_octet_decode(ctx, &ch)) asn1_ulong_decode()
375 *integer = ch; asn1_ulong_decode()
376 if (ch == 0) len = 0; asn1_ulong_decode()
385 if (!asn1_octet_decode(ctx, &ch)) asn1_ulong_decode()
389 *integer |= ch; asn1_ulong_decode()
422 unsigned char ch; asn1_subid_decode() local
427 if (!asn1_octet_decode(ctx, &ch)) asn1_subid_decode()
431 *subid |= ch & 0x7F; asn1_subid_decode()
432 } while ((ch & 0x80) == 0x80); asn1_subid_decode()
/linux-4.1.27/drivers/spi/
H A Dspi-rockchip.c158 struct dma_chan *ch; member in struct:rockchip_spi_dma_data
322 dmaengine_terminate_all(rs->dma_rx.ch); rockchip_spi_handle_err()
327 dmaengine_terminate_all(rs->dma_tx.ch); rockchip_spi_handle_err()
453 dmaengine_slave_config(rs->dma_rx.ch, &rxconf); rockchip_spi_prepare_dma()
456 rs->dma_rx.ch, rockchip_spi_prepare_dma()
470 dmaengine_slave_config(rs->dma_tx.ch, &txconf); rockchip_spi_prepare_dma()
473 rs->dma_tx.ch, rockchip_spi_prepare_dma()
487 dma_async_issue_pending(rs->dma_rx.ch); rockchip_spi_prepare_dma()
495 dma_async_issue_pending(rs->dma_tx.ch); rockchip_spi_prepare_dma()
720 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); rockchip_spi_probe()
721 if (!rs->dma_tx.ch) rockchip_spi_probe()
724 rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx"); rockchip_spi_probe()
725 if (!rs->dma_rx.ch) { rockchip_spi_probe()
726 if (rs->dma_tx.ch) { rockchip_spi_probe()
727 dma_release_channel(rs->dma_tx.ch); rockchip_spi_probe()
728 rs->dma_tx.ch = NULL; rockchip_spi_probe()
733 if (rs->dma_tx.ch && rs->dma_rx.ch) { rockchip_spi_probe()
740 master->dma_tx = rs->dma_tx.ch; rockchip_spi_probe()
741 master->dma_rx = rs->dma_rx.ch; rockchip_spi_probe()
753 if (rs->dma_tx.ch) rockchip_spi_probe()
754 dma_release_channel(rs->dma_tx.ch); rockchip_spi_probe()
755 if (rs->dma_rx.ch) rockchip_spi_probe()
756 dma_release_channel(rs->dma_rx.ch); rockchip_spi_probe()
777 if (rs->dma_tx.ch) rockchip_spi_remove()
778 dma_release_channel(rs->dma_tx.ch); rockchip_spi_remove()
779 if (rs->dma_rx.ch) rockchip_spi_remove()
780 dma_release_channel(rs->dma_rx.ch); rockchip_spi_remove()
/linux-4.1.27/sound/usb/6fire/
H A Dcontrol.c193 unsigned int ch = kcontrol->private_value; usb6fire_control_output_vol_put() local
196 if (ch > 4) { usb6fire_control_output_vol_put()
202 if (rt->output_vol[ch] != ucontrol->value.integer.value[0]) { usb6fire_control_output_vol_put()
203 rt->output_vol[ch] = ucontrol->value.integer.value[0]; usb6fire_control_output_vol_put()
204 rt->ovol_updated &= ~(1 << ch); usb6fire_control_output_vol_put()
207 if (rt->output_vol[ch + 1] != ucontrol->value.integer.value[1]) { usb6fire_control_output_vol_put()
208 rt->output_vol[ch + 1] = ucontrol->value.integer.value[1]; usb6fire_control_output_vol_put()
209 rt->ovol_updated &= ~(2 << ch); usb6fire_control_output_vol_put()
223 unsigned int ch = kcontrol->private_value; usb6fire_control_output_vol_get() local
225 if (ch > 4) { usb6fire_control_output_vol_get()
231 ucontrol->value.integer.value[0] = rt->output_vol[ch]; usb6fire_control_output_vol_get()
232 ucontrol->value.integer.value[1] = rt->output_vol[ch + 1]; usb6fire_control_output_vol_get()
240 unsigned int ch = kcontrol->private_value; usb6fire_control_output_mute_put() local
244 if (ch > 4) { usb6fire_control_output_mute_put()
250 rt->output_mute &= ~(3 << ch); usb6fire_control_output_mute_put()
255 rt->output_mute |= value << ch; usb6fire_control_output_mute_put()
267 unsigned int ch = kcontrol->private_value; usb6fire_control_output_mute_get() local
268 u8 value = rt->output_mute >> ch; usb6fire_control_output_mute_get()
270 if (ch > 4) { usb6fire_control_output_mute_get()
/linux-4.1.27/tools/thermal/tmon/
H A Dtui.c415 static void handle_input_val(int ch) handle_input_val() argument
427 if (ch == ptdata.nr_cooling_dev) { handle_input_val()
439 CDEV, ptdata.cdi[ch].instance); handle_input_val()
451 static void handle_input_choice(int ch) handle_input_choice() argument
457 if ((ch >= 'A' && ch <= 'A' + ptdata.nr_cooling_dev) || handle_input_choice()
458 (ch >= 'a' && ch <= 'a' + ptdata.nr_cooling_dev)) { handle_input_choice()
459 base = (ch < 'a') ? 'A' : 'a'; handle_input_choice()
460 cdev_id = ch - base; handle_input_choice()
470 snprintf(buf, sizeof(buf), "Invalid selection %d", ch); handle_input_choice()
477 int ch; handle_tui_events() local
480 while ((ch = wgetch(cooling_device_window)) != EOF) { handle_tui_events()
494 handle_input_choice(ch); handle_tui_events()
496 if (ch == 'q' || ch == 'Q') handle_tui_events()
497 ch = 0; handle_tui_events()
499 switch (ch) { handle_tui_events()
651 syslog(LOG_DEBUG, "draw tz %d tp %d ch:%c\n", show_sensors_w()
/linux-4.1.27/drivers/usb/serial/
H A Dquatech2.c133 static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch);
134 static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch);
500 static void qt2_process_status(struct usb_serial_port *port, unsigned char *ch) qt2_process_status() argument
502 switch (*ch) { qt2_process_status()
504 qt2_update_lsr(port, ch + 1); qt2_process_status()
507 qt2_update_msr(port, ch + 1); qt2_process_status()
514 unsigned char *ch) qt2_process_xmit_empty()
518 bytes_written = (int)(*ch) + (int)(*(ch + 1) << 4); qt2_process_xmit_empty()
522 static void qt2_process_flush(struct usb_serial_port *port, unsigned char *ch) qt2_process_flush() argument
534 unsigned char *ch; qt2_process_read_urb() local
542 ch = urb->transfer_buffer; qt2_process_read_urb()
549 ch = (unsigned char *)urb->transfer_buffer + i; qt2_process_read_urb()
551 (*ch == QT2_CONTROL_BYTE) && qt2_process_read_urb()
552 (*(ch + 1) == QT2_CONTROL_BYTE)) { qt2_process_read_urb()
554 switch (*(ch + 2)) { qt2_process_read_urb()
563 qt2_process_status(port, ch + 2); qt2_process_read_urb()
574 qt2_process_xmit_empty(port, ch + 3); qt2_process_read_urb()
587 newport = *(ch + 3); qt2_process_read_urb()
604 qt2_process_flush(port, ch + 2); qt2_process_read_urb()
610 tty_insert_flip_string(&port->port, ch, 2); qt2_process_read_urb()
617 __func__, *(ch + 2)); qt2_process_read_urb()
625 tty_insert_flip_string(&port->port, ch, 1); qt2_process_read_urb()
871 static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) qt2_update_msr() argument
874 u8 newMSR = (u8) *ch; qt2_update_msr()
898 static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch) qt2_update_lsr() argument
903 u8 newLSR = (u8) *ch; qt2_update_lsr()
513 qt2_process_xmit_empty(struct usb_serial_port *port, unsigned char *ch) qt2_process_xmit_empty() argument
/linux-4.1.27/drivers/tty/
H A Dmoxa.c294 struct moxa_port *ch = tty->driver_data; moxa_ioctl() local
302 } else if (!ch) moxa_ioctl()
312 MoxaPortFlushData(ch, arg); moxa_ioctl()
336 status = MoxaPortTxQueue(ch); moxa_ioctl()
340 status = MoxaPortRxQueue(ch); moxa_ioctl()
384 mutex_lock(&ch->port.mutex); moxa_ioctl()
385 ret = moxa_get_serial_info(ch, argp); moxa_ioctl()
386 mutex_unlock(&ch->port.mutex); moxa_ioctl()
389 mutex_lock(&ch->port.mutex); moxa_ioctl()
390 ret = moxa_set_serial_info(ch, argp); moxa_ioctl()
391 mutex_unlock(&ch->port.mutex); moxa_ioctl()
1145 struct moxa_port *ch = container_of(port, struct moxa_port, port); moxa_shutdown() local
1146 MoxaPortDisable(ch); moxa_shutdown()
1147 MoxaPortFlushData(ch, 2); moxa_shutdown()
1152 struct moxa_port *ch = container_of(port, struct moxa_port, port); moxa_carrier_raised() local
1156 dcd = ch->DCDState; moxa_carrier_raised()
1163 struct moxa_port *ch = container_of(port, struct moxa_port, port); moxa_dtr_rts() local
1164 MoxaPortLineCtrl(ch, onoff, onoff); moxa_dtr_rts()
1171 struct moxa_port *ch; moxa_open() local
1191 ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; moxa_open()
1192 ch->port.count++; moxa_open()
1193 tty->driver_data = ch; moxa_open()
1194 tty_port_tty_set(&ch->port, tty); moxa_open()
1195 mutex_lock(&ch->port.mutex); moxa_open()
1196 if (!(ch->port.flags & ASYNC_INITIALIZED)) { moxa_open()
1197 ch->statusflags = 0; moxa_open()
1199 MoxaPortLineCtrl(ch, 1, 1); moxa_open()
1200 MoxaPortEnable(ch); moxa_open()
1201 MoxaSetFifo(ch, ch->type == PORT_16550A); moxa_open()
1202 ch->port.flags |= ASYNC_INITIALIZED; moxa_open()
1204 mutex_unlock(&ch->port.mutex); moxa_open()
1207 return tty_port_block_til_ready(&ch->port, tty, filp); moxa_open()
1212 struct moxa_port *ch = tty->driver_data; moxa_close() local
1213 ch->cflag = tty->termios.c_cflag; moxa_close()
1214 tty_port_close(&ch->port, tty, filp); moxa_close()
1220 struct moxa_port *ch = tty->driver_data; moxa_write() local
1224 if (ch == NULL) moxa_write()
1231 set_bit(LOWWAIT, &ch->statusflags); moxa_write()
1237 struct moxa_port *ch; moxa_write_room() local
1241 ch = tty->driver_data; moxa_write_room()
1242 if (ch == NULL) moxa_write_room()
1244 return MoxaPortTxFree(ch); moxa_write_room()
1249 struct moxa_port *ch = tty->driver_data; moxa_flush_buffer() local
1251 if (ch == NULL) moxa_flush_buffer()
1253 MoxaPortFlushData(ch, 1); moxa_flush_buffer()
1259 struct moxa_port *ch = tty->driver_data; moxa_chars_in_buffer() local
1262 chars = MoxaPortTxQueue(ch); moxa_chars_in_buffer()
1268 set_bit(EMPTYWAIT, &ch->statusflags); moxa_chars_in_buffer()
1274 struct moxa_port *ch = tty->driver_data; moxa_tiocmget() local
1277 MoxaPortGetLineOut(ch, &dtr, &rts); moxa_tiocmget()
1282 dtr = MoxaPortLineStatus(ch); moxa_tiocmget()
1295 struct moxa_port *ch; moxa_tiocmset() local
1299 ch = tty->driver_data; moxa_tiocmset()
1300 if (!ch) { moxa_tiocmset()
1305 MoxaPortGetLineOut(ch, &dtr, &rts); moxa_tiocmset()
1314 MoxaPortLineCtrl(ch, dtr, rts); moxa_tiocmset()
1322 struct moxa_port *ch = tty->driver_data; moxa_set_termios() local
1324 if (ch == NULL) moxa_set_termios()
1328 wake_up_interruptible(&ch->port.open_wait); moxa_set_termios()
1333 struct moxa_port *ch = tty->driver_data; moxa_stop() local
1335 if (ch == NULL) moxa_stop()
1337 MoxaPortTxDisable(ch); moxa_stop()
1338 set_bit(TXSTOPPED, &ch->statusflags); moxa_stop()
1344 struct moxa_port *ch = tty->driver_data; moxa_start() local
1346 if (ch == NULL) moxa_start()
1349 if (!test_bit(TXSTOPPED, &ch->statusflags)) moxa_start()
1352 MoxaPortTxEnable(ch); moxa_start()
1353 clear_bit(TXSTOPPED, &ch->statusflags); moxa_start()
1358 struct moxa_port *ch = tty->driver_data; moxa_hangup() local
1359 tty_port_hangup(&ch->port); moxa_hangup()
1482 struct moxa_port *ch = tty->driver_data; moxa_set_tty_param() local
1497 MoxaPortFlowCtrl(ch, rts, cts, txflow, rxflow, xany); moxa_set_tty_param()
1498 baud = MoxaPortSetTermio(ch, ts, tty_get_baud_rate(tty)); moxa_set_tty_param()
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-debugifc.c36 char ch; debugifc_count_whitespace() local
39 ch = buf[scnt]; debugifc_count_whitespace()
40 if (ch == ' ') continue; debugifc_count_whitespace()
41 if (ch == '\t') continue; debugifc_count_whitespace()
42 if (ch == '\n') continue; debugifc_count_whitespace()
53 char ch; debugifc_count_nonwhitespace() local
56 ch = buf[scnt]; debugifc_count_nonwhitespace()
57 if (ch == ' ') break; debugifc_count_nonwhitespace()
58 if (ch == '\t') break; debugifc_count_nonwhitespace()
59 if (ch == '\n') break; debugifc_count_nonwhitespace()
/linux-4.1.27/include/sound/
H A Dhda_regmap.h38 * @ch: channel (left = 0, right = 1)
44 #define snd_hdac_regmap_encode_amp(nid, ch, dir, idx) \
46 ((ch) ? AC_AMP_GET_RIGHT : AC_AMP_GET_LEFT) | \
121 * @ch: channel (left=0 or right=1)
131 int ch, int dir, int idx) snd_hdac_regmap_get_amp()
133 unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx); snd_hdac_regmap_get_amp()
144 * @ch: channel (left=0 or right=1)
155 int ch, int dir, int idx, int mask, int val) snd_hdac_regmap_update_amp()
157 unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx); snd_hdac_regmap_update_amp()
166 * @ch: channel (left=0 or right=1)
130 snd_hdac_regmap_get_amp(struct hdac_device *codec, hda_nid_t nid, int ch, int dir, int idx) snd_hdac_regmap_get_amp() argument
154 snd_hdac_regmap_update_amp(struct hdac_device *codec, hda_nid_t nid, int ch, int dir, int idx, int mask, int val) snd_hdac_regmap_update_amp() argument
/linux-4.1.27/arch/powerpc/platforms/embedded6xx/
H A Dusbgecko_udbg.c102 static void ug_raw_putc(char ch) ug_raw_putc() argument
104 ug_io_transaction(0xb0000000 | (ch << 20)); ug_raw_putc()
111 static void ug_putc(char ch) ug_putc() argument
118 if (ch == '\n') ug_putc()
124 ug_raw_putc(ch); ug_putc()
172 void ug_udbg_putc(char ch) ug_udbg_putc() argument
174 ug_putc(ch); ug_udbg_putc()
182 int ch; ug_udbg_getc() local
184 while ((ch = ug_getc()) == -1) ug_udbg_getc()
186 return ch; ug_udbg_getc()
/linux-4.1.27/fs/nfs/
H A Ddns_resolve.c131 struct cache_head *ch, nfs_dns_request()
134 struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); nfs_dns_request()
141 struct cache_head *ch) nfs_dns_upcall()
143 struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h); nfs_dns_upcall()
148 ret = sunrpc_cache_pipe_upcall(cd, ch); nfs_dns_upcall()
195 struct cache_head *ch; nfs_dns_lookup() local
197 ch = sunrpc_cache_lookup(cd, nfs_dns_lookup()
200 if (!ch) nfs_dns_lookup()
202 return container_of(ch, struct nfs_dns_ent, h); nfs_dns_lookup()
209 struct cache_head *ch; nfs_dns_update() local
211 ch = sunrpc_cache_update(cd, nfs_dns_update()
214 if (!ch) nfs_dns_update()
216 return container_of(ch, struct nfs_dns_ent, h); nfs_dns_update()
130 nfs_dns_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) nfs_dns_request() argument
140 nfs_dns_upcall(struct cache_detail *cd, struct cache_head *ch) nfs_dns_upcall() argument
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmsmac/
H A Dchannel.c377 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan; brcms_c_channel_set_chanspec() local
387 if (ch->flags & IEEE80211_CHAN_NO_OFDM) brcms_c_channel_set_chanspec()
393 !!(ch->flags & IEEE80211_CHAN_NO_IR), brcms_c_channel_set_chanspec()
402 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan; brcms_c_channel_reg_limits() local
417 if (WARN_ON(!ch)) brcms_c_channel_reg_limits()
433 maxpwr = QDB(ch->max_power) - delta; brcms_c_channel_reg_limits()
636 struct ieee80211_channel *ch; brcms_reg_apply_radar_flags() local
644 ch = &sband->channels[i]; brcms_reg_apply_radar_flags()
646 if (!brcms_is_radar_freq(ch->center_freq)) brcms_reg_apply_radar_flags()
653 if (!(ch->flags & IEEE80211_CHAN_DISABLED)) brcms_reg_apply_radar_flags()
654 ch->flags |= IEEE80211_CHAN_RADAR | brcms_reg_apply_radar_flags()
665 struct ieee80211_channel *ch; brcms_reg_apply_beaconing_flags() local
675 ch = &sband->channels[i]; brcms_reg_apply_beaconing_flags()
677 if (ch->flags & brcms_reg_apply_beaconing_flags()
683 MHZ_TO_KHZ(ch->center_freq)); brcms_reg_apply_beaconing_flags()
688 ch->flags &= ~IEEE80211_CHAN_NO_IR; brcms_reg_apply_beaconing_flags()
689 } else if (ch->beacon_found) { brcms_reg_apply_beaconing_flags()
690 ch->flags &= ~IEEE80211_CHAN_NO_IR; brcms_reg_apply_beaconing_flags()
703 struct ieee80211_channel *ch; brcms_reg_notifier() local
719 ch = &sband->channels[i]; brcms_reg_notifier()
721 if (!(ch->flags & IEEE80211_CHAN_DISABLED)) brcms_reg_notifier()
745 struct ieee80211_channel *ch; brcms_c_regd_init() local
763 ch = &sband->channels[i]; brcms_c_regd_init()
764 if (!isset(sup_chan.vec, ch->hw_value)) brcms_c_regd_init()
765 ch->flags |= IEEE80211_CHAN_DISABLED; brcms_c_regd_init()
/linux-4.1.27/fs/ubifs/
H A Dscan.c72 struct ubifs_ch *ch = buf; ubifs_scan_a_node() local
75 magic = le32_to_cpu(ch->magic); ubifs_scan_a_node()
89 dbg_ntype(ch->node_type), lnum, offs); ubifs_scan_a_node()
94 if (ch->node_type == UBIFS_PAD_NODE) { ubifs_scan_a_node()
97 int node_len = le32_to_cpu(ch->len); ubifs_scan_a_node()
197 struct ubifs_ch *ch = buf; ubifs_add_snod() local
205 snod->sqnum = le64_to_cpu(ch->sqnum); ubifs_add_snod()
206 snod->type = ch->node_type; ubifs_add_snod()
208 snod->len = le32_to_cpu(ch->len); ubifs_add_snod()
211 switch (ch->node_type) { ubifs_add_snod()
279 struct ubifs_ch *ch = buf; ubifs_scan() local
320 node_len = ALIGN(le32_to_cpu(ch->len), 8); ubifs_scan()
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dsvc_rdma_marshal.c60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; decode_read_list() local
62 while (ch->rc_discrim != xdr_zero) { decode_read_list()
63 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) > decode_read_list()
65 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch); decode_read_list()
68 ch++; decode_read_list()
70 return (u32 *)&ch->rc_position; decode_read_list()
217 struct rpcrdma_read_chunk *ch; svc_rdma_xdr_decode_deferred_req() local
241 for (ch = (struct rpcrdma_read_chunk *)va; svc_rdma_xdr_decode_deferred_req()
242 ch->rc_discrim != xdr_zero; ch++); svc_rdma_xdr_decode_deferred_req()
243 va = (u32 *)&ch->rc_position; svc_rdma_xdr_decode_deferred_req()
/linux-4.1.27/drivers/iio/pressure/
H A Dst_pressure_core.c184 .ch = (struct iio_chan_spec *)st_press_1_channels,
231 .ch = (struct iio_chan_spec *)st_press_lps001wp_channels,
266 .ch = (struct iio_chan_spec *)st_press_1_channels,
311 struct iio_chan_spec const *ch, st_press_write_raw()
332 struct iio_chan_spec const *ch, int *val, st_press_read_raw()
340 err = st_sensors_read_info_raw(indio_dev, ch, val); st_press_read_raw()
348 switch (ch->type) { st_press_read_raw()
362 switch (ch->type) { st_press_read_raw()
432 indio_dev->channels = press_data->sensor_settings->ch; st_press_common_probe()
310 st_press_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *ch, int val, int val2, long mask) st_press_write_raw() argument
331 st_press_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *ch, int *val, int *val2, long mask) st_press_read_raw() argument
/linux-4.1.27/arch/mn10300/unit-asb2303/include/unit/
H A Dserial.h117 char ch; __debug_to_serial() local
125 ch = *p++; __debug_to_serial()
126 if (ch == 0x0a) { __debug_to_serial()
131 GDBPORT_SERIAL_TX = ch; __debug_to_serial()
/linux-4.1.27/arch/mn10300/unit-asb2364/include/unit/
H A Dserial.h84 char ch; __debug_to_serial() local
103 ch = *p++; __debug_to_serial()
104 if (ch == 0x0a) { __debug_to_serial()
109 GDBPORT_SERIAL_TX = ch; __debug_to_serial()
/linux-4.1.27/arch/nios2/include/uapi/asm/
H A Dswab.h2 * Copyright (C) 2012 Tobias Klauser <tklauser@distanz.ch>
/linux-4.1.27/arch/ia64/hp/sim/boot/
H A Dbootloader.c43 unsigned long ch; cons_write() local
45 while ((ch = *buf++) != '\0') { cons_write()
46 ssc(ch, 0, 0, 0, SSC_PUTCHAR); cons_write()
47 if (ch == '\n') cons_write()
/linux-4.1.27/fs/cachefiles/
H A Dkey.c39 unsigned char csum, ch; cachefiles_cook_key() local
51 ch = raw[loop]; cachefiles_cook_key()
52 csum += ch; cachefiles_cook_key()
53 print &= cachefiles_filecharmap[ch]; cachefiles_cook_key()
/linux-4.1.27/net/sctp/
H A Dinqueue.c102 sctp_chunkhdr_t *ch = NULL; sctp_inq_peek() local
111 ch = (sctp_chunkhdr_t *)chunk->chunk_end; sctp_inq_peek()
113 return ch; sctp_inq_peek()
125 sctp_chunkhdr_t *ch = NULL; sctp_inq_pop() local
142 ch = (sctp_chunkhdr_t *) chunk->chunk_end; sctp_inq_pop()
164 ch = (sctp_chunkhdr_t *) chunk->skb->data; sctp_inq_pop()
168 chunk->chunk_hdr = ch; sctp_inq_pop()
169 chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); sctp_inq_pop()
/linux-4.1.27/arch/mips/include/asm/sibyte/
H A Dsb1250_mac.h440 * In the following definitions we use ch (0/1) and txrx (TX=1, RX=0, see
443 #define S_MAC_STATUS_CH_OFFSET(ch, txrx) _SB_MAKE64(((ch) + 2 * (txrx)) * S_MAC_CHANWIDTH)
445 #define M_MAC_STATUS_CHANNEL(ch, txrx) _SB_MAKEVALUE(_SB_MAKEMASK(8, 0), S_MAC_STATUS_CH_OFFSET(ch, txrx))
446 #define M_MAC_STATUS_EOP_COUNT(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_COUNT, S_MAC_STATUS_CH_OFFSET(ch, txrx))
447 #define M_MAC_STATUS_EOP_TIMER(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_TIMER, S_MAC_STATUS_CH_OFFSET(ch, txrx))
448 #define M_MAC_STATUS_EOP_SEEN(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_SEEN, S_MAC_STATUS_CH_OFFSET(ch, txrx))
449 #define M_MAC_STATUS_HWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_HWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
450 #define M_MAC_STATUS_LWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_LWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
451 #define M_MAC_STATUS_DSCR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DSCR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
452 #define M_MAC_STATUS_ERR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_ERR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
453 #define M_MAC_STATUS_DZERO(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DZERO, S_MAC_STATUS_CH_OFFSET(ch, txrx))
454 #define M_MAC_STATUS_DROP(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DROP, S_MAC_STATUS_CH_OFFSET(ch, txrx))
/linux-4.1.27/drivers/staging/iio/adc/
H A Dad7280a.c404 unsigned devaddr, ch; ad7280_store_balance_sw() local
411 ch = this_attr->address & 0xFF; ad7280_store_balance_sw()
415 st->cb_mask[devaddr] |= 1 << (ch + 2); ad7280_store_balance_sw()
417 st->cb_mask[devaddr] &= ~(1 << (ch + 2)); ad7280_store_balance_sw()
487 int dev, ch, cnt; ad7280_channel_init() local
495 for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_AUX_ADC_6; ch++, ad7280_channel_init()
497 if (ch < AD7280A_AUX_ADC_1) { ad7280_channel_init()
500 st->channels[cnt].channel = (dev * 6) + ch; ad7280_channel_init()
505 st->channels[cnt].channel = (dev * 6) + ch - 6; ad7280_channel_init()
513 AD7280A_DEVADDR(dev) << 8 | ch; ad7280_channel_init()
548 int dev, ch, cnt; ad7280_attr_init() local
557 for (ch = AD7280A_CELL_VOLTAGE_1; ch <= AD7280A_CELL_VOLTAGE_6; ad7280_attr_init()
558 ch++, cnt++) { ad7280_attr_init()
560 AD7280A_DEVADDR(dev) << 8 | ch; ad7280_attr_init()
570 (dev * AD7280A_CELLS_PER_DEV) + ch, ad7280_attr_init()
571 (dev * AD7280A_CELLS_PER_DEV) + ch + 1); ad7280_attr_init()
577 (AD7280A_CB1_TIMER + ch); ad7280_attr_init()
586 (dev * AD7280A_CELLS_PER_DEV) + ch, ad7280_attr_init()
587 (dev * AD7280A_CELLS_PER_DEV) + ch + 1); ad7280_attr_init()
/linux-4.1.27/sound/ppc/
H A Dsnd_ps3.c143 pr_info("%s: DMA ch %d is not stopped.", snd_ps3_verify_dma_stop()
170 * 5.7ms is from 16bit/sample 2ch 44.1Khz; the time next snd_ps3_wait_for_dma_stop()
201 static dma_addr_t v_to_bus(struct snd_ps3_card_info *card, void *paddr, int ch) v_to_bus() argument
203 return card->dma_start_bus_addr[ch] + v_to_bus()
204 (paddr - card->dma_start_vaddr[ch]); v_to_bus()
213 enum snd_ps3_ch ch, size_t byte_count, snd_ps3_bump_buffer()
217 card->dma_last_transfer_vaddr[ch] = snd_ps3_bump_buffer()
218 card->dma_next_transfer_vaddr[ch]; snd_ps3_bump_buffer()
219 card->dma_next_transfer_vaddr[ch] += byte_count; snd_ps3_bump_buffer()
220 if ((card->dma_start_vaddr[ch] + (card->dma_buffer_size / 2)) <= snd_ps3_bump_buffer()
221 card->dma_next_transfer_vaddr[ch]) { snd_ps3_bump_buffer()
222 card->dma_next_transfer_vaddr[ch] = card->dma_start_vaddr[ch]; snd_ps3_bump_buffer()
234 enum snd_ps3_ch ch; snd_ps3_program_dma() local
259 for (ch = 0; ch < 2; ch++) { snd_ps3_program_dma()
262 dma_ch = stage * 2 + ch; snd_ps3_program_dma()
268 card->dma_next_transfer_vaddr[ch], snd_ps3_program_dma()
269 ch); snd_ps3_program_dma()
276 if (ch == 0) snd_ps3_program_dma()
289 snd_ps3_bump_buffer(card, ch, snd_ps3_program_dma()
212 snd_ps3_bump_buffer(struct snd_ps3_card_info *card, enum snd_ps3_ch ch, size_t byte_count, int stage) snd_ps3_bump_buffer() argument
/linux-4.1.27/drivers/isdn/hisax/
H A Dhfc_2bs0.h30 #define HFC_CHANNEL(ch) (ch ? HFC_B2 : HFC_B1)
/linux-4.1.27/drivers/isdn/pcbit/
H A Dlayer2.h93 #define SET_MSG_SCMD(msg, ch) (msg = (msg & 0xffffff00) | (((ch) & 0xff)))
94 #define SET_MSG_CMD(msg, ch) (msg = (msg & 0xffff00ff) | (((ch) & 0xff) << 8))
95 #define SET_MSG_PROC(msg, ch) (msg = (msg & 0xff00ffff) | (((ch) & 0xff) << 16))
96 #define SET_MSG_CPU(msg, ch) (msg = (msg & 0x00ffffff) | (((ch) & 0xff) << 24))
/linux-4.1.27/drivers/media/pci/cx88/
H A Dcx88-core.c334 const struct sram_channel *ch, cx88_sram_channel_setup()
341 cdt = ch->cdt; cx88_sram_channel_setup()
342 lines = ch->fifo_size / bpl; cx88_sram_channel_setup()
349 cx_write(cdt + 16*i, ch->fifo_start + bpl*i); cx88_sram_channel_setup()
352 cx_write(ch->cmds_start + 0, risc); cx88_sram_channel_setup()
353 cx_write(ch->cmds_start + 4, cdt); cx88_sram_channel_setup()
354 cx_write(ch->cmds_start + 8, (lines*16) >> 3); cx88_sram_channel_setup()
355 cx_write(ch->cmds_start + 12, ch->ctrl_start); cx88_sram_channel_setup()
356 cx_write(ch->cmds_start + 16, 64 >> 2); cx88_sram_channel_setup()
358 cx_write(ch->cmds_start + i, 0); cx88_sram_channel_setup()
361 cx_write(ch->ptr1_reg, ch->fifo_start); cx88_sram_channel_setup()
362 cx_write(ch->ptr2_reg, cdt); cx88_sram_channel_setup()
363 cx_write(ch->cnt1_reg, (bpl >> 3) -1); cx88_sram_channel_setup()
364 cx_write(ch->cnt2_reg, (lines*16) >> 3); cx88_sram_channel_setup()
366 dprintk(2,"sram setup %s: bpl=%d lines=%d\n", ch->name, bpl, lines); cx88_sram_channel_setup()
413 const struct sram_channel *ch) cx88_sram_channel_dump()
432 core->name,ch->name); cx88_sram_channel_dump()
436 cx_read(ch->cmds_start + 4*i)); cx88_sram_channel_dump()
438 risc = cx_read(ch->cmds_start + 4 * (i+11)); cx88_sram_channel_dump()
446 risc = cx_read(ch->ctrl_start + 4 * i); cx88_sram_channel_dump()
450 risc = cx_read(ch->ctrl_start + 4 * (i+j)); cx88_sram_channel_dump()
457 core->name, ch->fifo_start, ch->fifo_start+ch->fifo_size); cx88_sram_channel_dump()
459 core->name, ch->ctrl_start, ch->ctrl_start+6*16); cx88_sram_channel_dump()
461 core->name,cx_read(ch->ptr1_reg)); cx88_sram_channel_dump()
463 core->name,cx_read(ch->ptr2_reg)); cx88_sram_channel_dump()
465 core->name,cx_read(ch->cnt1_reg)); cx88_sram_channel_dump()
467 core->name,cx_read(ch->cnt2_reg)); cx88_sram_channel_dump()
333 cx88_sram_channel_setup(struct cx88_core *core, const struct sram_channel *ch, unsigned int bpl, u32 risc) cx88_sram_channel_setup() argument
412 cx88_sram_channel_dump(struct cx88_core *core, const struct sram_channel *ch) cx88_sram_channel_dump() argument
/linux-4.1.27/drivers/media/rc/keymaps/
H A Drc-dm1105-nec.c33 { 0x1c, KEY_CHANNELUP}, /* ch+ */
34 { 0x0f, KEY_CHANNELDOWN}, /* ch- */
H A Drc-tbs-nec.c31 { 0x96, KEY_CHANNELUP}, /* ch+ */
32 { 0x91, KEY_CHANNELDOWN}, /* ch- */
/linux-4.1.27/drivers/cpuidle/
H A Dcpuidle-kirkwood.c14 * Maintainer: Andrew Lunn <andrew@lunn.ch>
84 MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");

Completed in 6970 milliseconds

1234567