fdq               789 drivers/net/ethernet/ti/netcp_core.c static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
fdq               797 drivers/net/ethernet/ti/netcp_core.c 	while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
fdq               822 drivers/net/ethernet/ti/netcp_core.c 		if (fdq == 0) {
fdq               852 drivers/net/ethernet/ti/netcp_core.c static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
fdq               869 drivers/net/ethernet/ti/netcp_core.c 	if (likely(fdq == 0)) {
fdq               923 drivers/net/ethernet/ti/netcp_core.c 	knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
fdq              1677 drivers/net/ethernet/ti/netcp_core.c 		config.u.rx.fdq[i] = last_fdq;
fdq               190 drivers/soc/ti/knav_dma.c 		v =  cfg->u.rx.fdq[0] << 16;
fdq               191 drivers/soc/ti/knav_dma.c 		v |=  cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
fdq               194 drivers/soc/ti/knav_dma.c 		v =  cfg->u.rx.fdq[2] << 16;
fdq               195 drivers/soc/ti/knav_dma.c 		v |=  cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
fdq               342 drivers/soc/ti/knav_dma.c 			seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
fdq               812 fs/gfs2/quota.c 			     struct qc_dqblk *fdq)
fdq               837 fs/gfs2/quota.c 	if (fdq) {
fdq               838 fs/gfs2/quota.c 		if (fdq->d_fieldmask & QC_SPC_SOFT) {
fdq               839 fs/gfs2/quota.c 			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
fdq               842 fs/gfs2/quota.c 		if (fdq->d_fieldmask & QC_SPC_HARD) {
fdq               843 fs/gfs2/quota.c 			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
fdq               846 fs/gfs2/quota.c 		if (fdq->d_fieldmask & QC_SPACE) {
fdq               847 fs/gfs2/quota.c 			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
fdq              1611 fs/gfs2/quota.c 			  struct qc_dqblk *fdq)
fdq              1619 fs/gfs2/quota.c 	memset(fdq, 0, sizeof(*fdq));
fdq              1636 fs/gfs2/quota.c 	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
fdq              1637 fs/gfs2/quota.c 	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
fdq              1638 fs/gfs2/quota.c 	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
fdq              1650 fs/gfs2/quota.c 			  struct qc_dqblk *fdq)
fdq              1669 fs/gfs2/quota.c 	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
fdq              1694 fs/gfs2/quota.c 	if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
fdq              1695 fs/gfs2/quota.c 	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
fdq              1696 fs/gfs2/quota.c 		fdq->d_fieldmask ^= QC_SPC_SOFT;
fdq              1698 fs/gfs2/quota.c 	if ((fdq->d_fieldmask & QC_SPC_HARD) &&
fdq              1699 fs/gfs2/quota.c 	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
fdq              1700 fs/gfs2/quota.c 		fdq->d_fieldmask ^= QC_SPC_HARD;
fdq              1702 fs/gfs2/quota.c 	if ((fdq->d_fieldmask & QC_SPACE) &&
fdq              1703 fs/gfs2/quota.c 	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
fdq              1704 fs/gfs2/quota.c 		fdq->d_fieldmask ^= QC_SPACE;
fdq              1706 fs/gfs2/quota.c 	if (fdq->d_fieldmask == 0)
fdq              1732 fs/gfs2/quota.c 	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
fdq               203 fs/quota/quota.c 	struct qc_dqblk fdq;
fdq               212 fs/quota/quota.c 	ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
fdq               215 fs/quota/quota.c 	copy_to_if_dqblk(&idq, &fdq);
fdq               229 fs/quota/quota.c 	struct qc_dqblk fdq;
fdq               238 fs/quota/quota.c 	ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
fdq               242 fs/quota/quota.c 	copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
fdq               278 fs/quota/quota.c 	struct qc_dqblk fdq;
fdq               289 fs/quota/quota.c 	copy_from_if_dqblk(&fdq, &idq);
fdq               290 fs/quota/quota.c 	return sb->s_qcop->set_dqblk(sb, qid, &fdq);
fdq               563 fs/quota/quota.c 	struct fs_disk_quota fdq;
fdq               567 fs/quota/quota.c 	if (copy_from_user(&fdq, addr, sizeof(fdq)))
fdq               576 fs/quota/quota.c 	    fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) {
fdq               582 fs/quota/quota.c 		copy_qcinfo_from_xfs_dqblk(&qinfo, &fdq);
fdq               587 fs/quota/quota.c 		fdq.d_fieldmask &= ~(FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK);
fdq               589 fs/quota/quota.c 	copy_from_xfs_dqblk(&qdq, &fdq);
fdq               625 fs/quota/quota.c 	struct fs_disk_quota fdq;
fdq               638 fs/quota/quota.c 	copy_to_xfs_dqblk(&fdq, &qdq, type, id);
fdq               639 fs/quota/quota.c 	if (copy_to_user(addr, &fdq, sizeof(fdq)))
fdq               651 fs/quota/quota.c 	struct fs_disk_quota fdq;
fdq               666 fs/quota/quota.c 	copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
fdq               667 fs/quota/quota.c 	if (copy_to_user(addr, &fdq, sizeof(fdq)))
fdq               118 include/linux/soc/ti/knav_dma.h 	unsigned int			fdq[KNAV_DMA_FDQ_PER_CHAN];