Searched refs:pch (Results 1 - 22 of 22) sorted by relevance

/linux-4.4.14/drivers/net/ppp/
H A Dppp_generic.c253 static void ppp_channel_push(struct channel *pch);
255 struct channel *pch);
262 struct channel *pch);
278 static int ppp_connect_channel(struct channel *pch, int unit);
279 static int ppp_disconnect_channel(struct channel *pch);
280 static void ppp_destroy_channel(struct channel *pch);
619 struct channel *pch; ppp_ioctl() local
622 pch = PF_TO_CHANNEL(pf); ppp_ioctl()
628 err = ppp_connect_channel(pch, unit); ppp_ioctl()
632 err = ppp_disconnect_channel(pch); ppp_ioctl()
636 down_read(&pch->chan_sem); ppp_ioctl()
637 chan = pch->chan; ppp_ioctl()
641 up_read(&pch->chan_sem); ppp_ioctl()
1362 struct channel *pch; ppp_push() local
1379 pch = list_entry(list, struct channel, clist); ppp_push()
1381 spin_lock_bh(&pch->downl); ppp_push()
1382 if (pch->chan) { ppp_push()
1383 if (pch->chan->ops->start_xmit(pch->chan, skb)) ppp_push()
1390 spin_unlock_bh(&pch->downl); ppp_push()
1426 struct channel *pch; ppp_mp_explode() local
1440 list_for_each_entry(pch, &ppp->channels, clist) { ppp_mp_explode()
1441 if (pch->chan) { ppp_mp_explode()
1442 pch->avail = 1; ppp_mp_explode()
1444 pch->speed = pch->chan->speed; ppp_mp_explode()
1446 pch->avail = 0; ppp_mp_explode()
1448 if (pch->avail) { ppp_mp_explode()
1449 if (skb_queue_empty(&pch->file.xq) || ppp_mp_explode()
1450 !pch->had_frag) { ppp_mp_explode()
1451 if (pch->speed == 0) ppp_mp_explode()
1454 totspeed += pch->speed; ppp_mp_explode()
1456 pch->avail = 2; ppp_mp_explode()
1460 if (!pch->had_frag && i < ppp->nxchan) ppp_mp_explode()
1503 pch = list_entry(list, struct channel, clist); ppp_mp_explode()
1505 if (!pch->avail) ppp_mp_explode()
1512 if (pch->avail == 1) { ppp_mp_explode()
1516 pch->avail = 1; ppp_mp_explode()
1520 spin_lock_bh(&pch->downl); ppp_mp_explode()
1521 if (pch->chan == NULL) { ppp_mp_explode()
1523 if (pch->speed == 0) ppp_mp_explode()
1526 totspeed -= pch->speed; ppp_mp_explode()
1528 spin_unlock_bh(&pch->downl); ppp_mp_explode()
1529 pch->avail = 0; ppp_mp_explode()
1546 if (pch->speed == 0) { ppp_mp_explode()
1554 ((totspeed*totfree)/pch->speed)) - hdrlen; ppp_mp_explode()
1556 flen += ((totfree - nzero)*pch->speed)/totspeed; ppp_mp_explode()
1557 nbigger -= ((totfree - nzero)*pch->speed)/ ppp_mp_explode()
1578 pch->avail = 2; ppp_mp_explode()
1579 spin_unlock_bh(&pch->downl); ppp_mp_explode()
1588 mtu = pch->chan->mtu - (hdrlen - 2); ppp_mp_explode()
1615 chan = pch->chan; ppp_mp_explode()
1616 if (!skb_queue_empty(&pch->file.xq) || ppp_mp_explode()
1618 skb_queue_tail(&pch->file.xq, frag); ppp_mp_explode()
1619 pch->had_frag = 1; ppp_mp_explode()
1624 spin_unlock_bh(&pch->downl); ppp_mp_explode()
1631 spin_unlock_bh(&pch->downl); ppp_mp_explode()
1644 ppp_channel_push(struct channel *pch) ppp_channel_push() argument
1649 spin_lock_bh(&pch->downl); ppp_channel_push()
1650 if (pch->chan) { ppp_channel_push()
1651 while (!skb_queue_empty(&pch->file.xq)) { ppp_channel_push()
1652 skb = skb_dequeue(&pch->file.xq); ppp_channel_push()
1653 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { ppp_channel_push()
1655 skb_queue_head(&pch->file.xq, skb); ppp_channel_push()
1661 skb_queue_purge(&pch->file.xq); ppp_channel_push()
1663 spin_unlock_bh(&pch->downl); ppp_channel_push()
1665 if (skb_queue_empty(&pch->file.xq)) { ppp_channel_push()
1666 read_lock_bh(&pch->upl); ppp_channel_push()
1667 ppp = pch->ppp; ppp_channel_push()
1670 read_unlock_bh(&pch->upl); ppp_channel_push()
1685 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) ppp_do_recv() argument
1689 ppp_receive_frame(ppp, skb, pch); ppp_do_recv()
1698 struct channel *pch = chan->ppp; ppp_input() local
1701 if (!pch) { ppp_input()
1706 read_lock_bh(&pch->upl); ppp_input()
1709 if (pch->ppp) { ppp_input()
1710 ++pch->ppp->dev->stats.rx_length_errors; ppp_input()
1711 ppp_receive_error(pch->ppp); ppp_input()
1717 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { ppp_input()
1719 skb_queue_tail(&pch->file.rq, skb); ppp_input()
1721 while (pch->file.rq.qlen > PPP_MAX_RQLEN && ppp_input()
1722 (skb = skb_dequeue(&pch->file.rq))) ppp_input()
1724 wake_up_interruptible(&pch->file.rwait); ppp_input()
1726 ppp_do_recv(pch->ppp, skb, pch); ppp_input()
1730 read_unlock_bh(&pch->upl); ppp_input()
1737 struct channel *pch = chan->ppp; ppp_input_error() local
1740 if (!pch) ppp_input_error()
1743 read_lock_bh(&pch->upl); ppp_input_error()
1744 if (pch->ppp) { ppp_input_error()
1749 ppp_do_recv(pch->ppp, skb, pch); ppp_input_error()
1752 read_unlock_bh(&pch->upl); ppp_input_error()
1760 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) ppp_receive_frame() argument
1768 ppp_receive_mp_frame(ppp, skb, pch); ppp_receive_frame()
1998 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) ppp_receive_mp_frame() argument
2034 pch->lastseq = seq; ppp_receive_mp_frame()
2285 struct channel *pch; ppp_register_net_channel() local
2288 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); ppp_register_net_channel()
2289 if (!pch) ppp_register_net_channel()
2294 pch->ppp = NULL; ppp_register_net_channel()
2295 pch->chan = chan; ppp_register_net_channel()
2296 pch->chan_net = get_net(net); ppp_register_net_channel()
2297 chan->ppp = pch; ppp_register_net_channel()
2298 init_ppp_file(&pch->file, CHANNEL); ppp_register_net_channel()
2299 pch->file.hdrlen = chan->hdrlen; ppp_register_net_channel()
2301 pch->lastseq = -1; ppp_register_net_channel()
2303 init_rwsem(&pch->chan_sem); ppp_register_net_channel()
2304 spin_lock_init(&pch->downl); ppp_register_net_channel()
2305 rwlock_init(&pch->upl); ppp_register_net_channel()
2308 pch->file.index = ++pn->last_channel_index; ppp_register_net_channel()
2309 list_add(&pch->list, &pn->new_channels); ppp_register_net_channel()
2321 struct channel *pch = chan->ppp; ppp_channel_index() local
2323 if (pch) ppp_channel_index()
2324 return pch->file.index; ppp_channel_index()
2333 struct channel *pch = chan->ppp; ppp_unit_number() local
2336 if (pch) { ppp_unit_number()
2337 read_lock_bh(&pch->upl); ppp_unit_number()
2338 if (pch->ppp) ppp_unit_number()
2339 unit = pch->ppp->file.index; ppp_unit_number()
2340 read_unlock_bh(&pch->upl); ppp_unit_number()
2350 struct channel *pch = chan->ppp; ppp_dev_name() local
2353 if (pch) { ppp_dev_name()
2354 read_lock_bh(&pch->upl); ppp_dev_name()
2355 if (pch->ppp && pch->ppp->dev) ppp_dev_name()
2356 name = pch->ppp->dev->name; ppp_dev_name()
2357 read_unlock_bh(&pch->upl); ppp_dev_name()
2370 struct channel *pch = chan->ppp; ppp_unregister_channel() local
2373 if (!pch) ppp_unregister_channel()
2382 down_write(&pch->chan_sem); ppp_unregister_channel()
2383 spin_lock_bh(&pch->downl); ppp_unregister_channel()
2384 pch->chan = NULL; ppp_unregister_channel()
2385 spin_unlock_bh(&pch->downl); ppp_unregister_channel()
2386 up_write(&pch->chan_sem); ppp_unregister_channel()
2387 ppp_disconnect_channel(pch); ppp_unregister_channel()
2389 pn = ppp_pernet(pch->chan_net); ppp_unregister_channel()
2391 list_del(&pch->list); ppp_unregister_channel()
2393 put_net(pch->chan_net); ppp_unregister_channel()
2394 pch->chan_net = NULL; ppp_unregister_channel()
2396 pch->file.dead = 1; ppp_unregister_channel()
2397 wake_up_interruptible(&pch->file.rwait); ppp_unregister_channel()
2398 if (atomic_dec_and_test(&pch->file.refcnt)) ppp_unregister_channel()
2399 ppp_destroy_channel(pch); ppp_unregister_channel()
2409 struct channel *pch = chan->ppp; ppp_output_wakeup() local
2411 if (!pch) ppp_output_wakeup()
2413 ppp_channel_push(pch); ppp_output_wakeup()
2896 struct channel *pch; ppp_find_channel() local
2898 list_for_each_entry(pch, &pn->new_channels, list) { ppp_find_channel()
2899 if (pch->file.index == unit) { ppp_find_channel()
2900 list_move(&pch->list, &pn->all_channels); ppp_find_channel()
2901 return pch; ppp_find_channel()
2905 list_for_each_entry(pch, &pn->all_channels, list) { ppp_find_channel()
2906 if (pch->file.index == unit) ppp_find_channel()
2907 return pch; ppp_find_channel()
2917 ppp_connect_channel(struct channel *pch, int unit) ppp_connect_channel() argument
2924 pn = ppp_pernet(pch->chan_net); ppp_connect_channel()
2930 write_lock_bh(&pch->upl); ppp_connect_channel()
2932 if (pch->ppp) ppp_connect_channel()
2936 if (pch->file.hdrlen > ppp->file.hdrlen) ppp_connect_channel()
2937 ppp->file.hdrlen = pch->file.hdrlen; ppp_connect_channel()
2938 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ ppp_connect_channel()
2941 list_add_tail(&pch->clist, &ppp->channels); ppp_connect_channel()
2943 pch->ppp = ppp; ppp_connect_channel()
2949 write_unlock_bh(&pch->upl); ppp_connect_channel()
2959 ppp_disconnect_channel(struct channel *pch) ppp_disconnect_channel() argument
2964 write_lock_bh(&pch->upl); ppp_disconnect_channel()
2965 ppp = pch->ppp; ppp_disconnect_channel()
2966 pch->ppp = NULL; ppp_disconnect_channel()
2967 write_unlock_bh(&pch->upl); ppp_disconnect_channel()
2971 list_del(&pch->clist); ppp_disconnect_channel()
2985 static void ppp_destroy_channel(struct channel *pch) ppp_destroy_channel() argument
2989 if (!pch->file.dead) { ppp_destroy_channel()
2991 pr_err("ppp: destroying undead channel %p !\n", pch); ppp_destroy_channel()
2994 skb_queue_purge(&pch->file.xq); ppp_destroy_channel()
2995 skb_queue_purge(&pch->file.rq); ppp_destroy_channel()
2996 kfree(pch); ppp_destroy_channel()
/linux-4.4.14/drivers/dma/
H A Dpl330.c1451 struct dma_pl330_chan *pch; dma_pl330_rqcb() local
1457 pch = desc->pchan; dma_pl330_rqcb()
1460 if (!pch) dma_pl330_rqcb()
1463 spin_lock_irqsave(&pch->lock, flags); dma_pl330_rqcb()
1467 spin_unlock_irqrestore(&pch->lock, flags); dma_pl330_rqcb()
1469 tasklet_schedule(&pch->task); dma_pl330_rqcb()
1945 static inline void fill_queue(struct dma_pl330_chan *pch) fill_queue() argument
1950 list_for_each_entry(desc, &pch->work_list, node) { fill_queue()
1956 ret = pl330_submit_req(pch->thread, desc); fill_queue()
1965 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n", fill_queue()
1967 tasklet_schedule(&pch->task); fill_queue()
1974 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; pl330_tasklet() local
1979 spin_lock_irqsave(&pch->lock, flags); pl330_tasklet()
1982 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) pl330_tasklet()
1984 if (!pch->cyclic) pl330_tasklet()
1986 list_move_tail(&desc->node, &pch->completed_list); pl330_tasklet()
1990 fill_queue(pch); pl330_tasklet()
1992 if (list_empty(&pch->work_list)) { pl330_tasklet()
1993 spin_lock(&pch->thread->dmac->lock); pl330_tasklet()
1994 _stop(pch->thread); pl330_tasklet()
1995 spin_unlock(&pch->thread->dmac->lock); pl330_tasklet()
1999 spin_lock(&pch->thread->dmac->lock); pl330_tasklet()
2000 _start(pch->thread); pl330_tasklet()
2001 spin_unlock(&pch->thread->dmac->lock); pl330_tasklet()
2004 while (!list_empty(&pch->completed_list)) { pl330_tasklet()
2008 desc = list_first_entry(&pch->completed_list, pl330_tasklet()
2014 if (pch->cyclic) { pl330_tasklet()
2016 list_move_tail(&desc->node, &pch->work_list); pl330_tasklet()
2018 spin_lock(&pch->thread->dmac->lock); pl330_tasklet()
2019 _start(pch->thread); pl330_tasklet()
2020 spin_unlock(&pch->thread->dmac->lock); pl330_tasklet()
2025 list_move_tail(&desc->node, &pch->dmac->desc_pool); pl330_tasklet()
2031 spin_unlock_irqrestore(&pch->lock, flags); pl330_tasklet()
2033 spin_lock_irqsave(&pch->lock, flags); pl330_tasklet()
2036 spin_unlock_irqrestore(&pch->lock, flags); pl330_tasklet()
2040 pm_runtime_mark_last_busy(pch->dmac->ddma.dev); pl330_tasklet()
2041 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); pl330_tasklet()
2079 struct dma_pl330_chan *pch = to_pchan(chan); pl330_alloc_chan_resources() local
2080 struct pl330_dmac *pl330 = pch->dmac; pl330_alloc_chan_resources()
2083 spin_lock_irqsave(&pch->lock, flags); pl330_alloc_chan_resources()
2086 pch->cyclic = false; pl330_alloc_chan_resources()
2088 pch->thread = pl330_request_channel(pl330); pl330_alloc_chan_resources()
2089 if (!pch->thread) { pl330_alloc_chan_resources()
2090 spin_unlock_irqrestore(&pch->lock, flags); pl330_alloc_chan_resources()
2094 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); pl330_alloc_chan_resources()
2096 spin_unlock_irqrestore(&pch->lock, flags); pl330_alloc_chan_resources()
2104 struct dma_pl330_chan *pch = to_pchan(chan); pl330_config() local
2108 pch->fifo_addr = slave_config->dst_addr; pl330_config()
2110 pch->burst_sz = __ffs(slave_config->dst_addr_width); pl330_config()
2112 pch->burst_len = slave_config->dst_maxburst; pl330_config()
2115 pch->fifo_addr = slave_config->src_addr; pl330_config()
2117 pch->burst_sz = __ffs(slave_config->src_addr_width); pl330_config()
2119 pch->burst_len = slave_config->src_maxburst; pl330_config()
2127 struct dma_pl330_chan *pch = to_pchan(chan); pl330_terminate_all() local
2130 struct pl330_dmac *pl330 = pch->dmac; pl330_terminate_all()
2134 spin_lock_irqsave(&pch->lock, flags); pl330_terminate_all()
2136 _stop(pch->thread); pl330_terminate_all()
2139 pch->thread->req[0].desc = NULL; pl330_terminate_all()
2140 pch->thread->req[1].desc = NULL; pl330_terminate_all()
2141 pch->thread->req_running = -1; pl330_terminate_all()
2144 list_for_each_entry(desc, &pch->submitted_list, node) { pl330_terminate_all()
2149 list_for_each_entry(desc, &pch->work_list , node) { pl330_terminate_all()
2154 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); pl330_terminate_all()
2155 list_splice_tail_init(&pch->work_list, &pl330->desc_pool); pl330_terminate_all()
2156 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); pl330_terminate_all()
2157 spin_unlock_irqrestore(&pch->lock, flags); pl330_terminate_all()
2173 struct dma_pl330_chan *pch = to_pchan(chan); pl330_pause() local
2174 struct pl330_dmac *pl330 = pch->dmac; pl330_pause()
2178 spin_lock_irqsave(&pch->lock, flags); pl330_pause()
2181 _stop(pch->thread); pl330_pause()
2184 spin_unlock_irqrestore(&pch->lock, flags); pl330_pause()
2193 struct dma_pl330_chan *pch = to_pchan(chan); pl330_free_chan_resources() local
2196 tasklet_kill(&pch->task); pl330_free_chan_resources()
2198 pm_runtime_get_sync(pch->dmac->ddma.dev); pl330_free_chan_resources()
2199 spin_lock_irqsave(&pch->lock, flags); pl330_free_chan_resources()
2201 pl330_release_channel(pch->thread); pl330_free_chan_resources()
2202 pch->thread = NULL; pl330_free_chan_resources()
2204 if (pch->cyclic) pl330_free_chan_resources()
2205 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); pl330_free_chan_resources()
2207 spin_unlock_irqrestore(&pch->lock, flags); pl330_free_chan_resources()
2208 pm_runtime_mark_last_busy(pch->dmac->ddma.dev); pl330_free_chan_resources()
2209 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); pl330_free_chan_resources()
2212 static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, pl330_get_current_xferred_count() argument
2215 struct pl330_thread *thrd = pch->thread; pl330_get_current_xferred_count()
2216 struct pl330_dmac *pl330 = pch->dmac; pl330_get_current_xferred_count()
2229 pm_runtime_mark_last_busy(pch->dmac->ddma.dev); pl330_get_current_xferred_count()
2241 struct dma_pl330_chan *pch = to_pchan(chan); pl330_tx_status() local
2252 spin_lock_irqsave(&pch->lock, flags); pl330_tx_status()
2254 if (pch->thread->req_running != -1) pl330_tx_status()
2255 running = pch->thread->req[pch->thread->req_running].desc; pl330_tx_status()
2258 list_for_each_entry(desc, &pch->work_list, node) { pl330_tx_status()
2263 pl330_get_current_xferred_count(pch, desc); pl330_tx_status()
2284 spin_unlock_irqrestore(&pch->lock, flags); pl330_tx_status()
2294 struct dma_pl330_chan *pch = to_pchan(chan); pl330_issue_pending() local
2297 spin_lock_irqsave(&pch->lock, flags); pl330_issue_pending()
2298 if (list_empty(&pch->work_list)) { pl330_issue_pending()
2304 WARN_ON(list_empty(&pch->submitted_list)); pl330_issue_pending()
2305 pm_runtime_get_sync(pch->dmac->ddma.dev); pl330_issue_pending()
2307 list_splice_tail_init(&pch->submitted_list, &pch->work_list); pl330_issue_pending()
2308 spin_unlock_irqrestore(&pch->lock, flags); pl330_issue_pending()
2310 pl330_tasklet((unsigned long)pch); pl330_issue_pending()
2321 struct dma_pl330_chan *pch = to_pchan(tx->chan); pl330_tx_submit() local
2325 spin_lock_irqsave(&pch->lock, flags); pl330_tx_submit()
2330 if (pch->cyclic) { pl330_tx_submit()
2338 list_move_tail(&desc->node, &pch->submitted_list); pl330_tx_submit()
2343 list_add_tail(&last->node, &pch->submitted_list); pl330_tx_submit()
2344 spin_unlock_irqrestore(&pch->lock, flags); pl330_tx_submit()
2404 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) pl330_get_desc() argument
2406 struct pl330_dmac *pl330 = pch->dmac; pl330_get_desc()
2407 u8 *peri_id = pch->chan.private; pl330_get_desc()
2421 dev_err(pch->dmac->ddma.dev, pl330_get_desc()
2428 desc->pchan = pch; pl330_get_desc()
2432 desc->peri = peri_id ? pch->chan.chan_id : 0; pl330_get_desc()
2433 desc->rqcfg.pcfg = &pch->dmac->pcfg; pl330_get_desc()
2435 dma_async_tx_descriptor_init(&desc->txd, &pch->chan); pl330_get_desc()
2449 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, __pl330_prep_dma_memcpy() argument
2452 struct dma_pl330_desc *desc = pl330_get_desc(pch); __pl330_prep_dma_memcpy()
2455 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __pl330_prep_dma_memcpy()
2478 struct dma_pl330_chan *pch = desc->pchan; get_burst_len() local
2479 struct pl330_dmac *pl330 = pch->dmac; get_burst_len()
2505 struct dma_pl330_chan *pch = to_pchan(chan); pl330_prep_dma_cyclic() local
2506 struct pl330_dmac *pl330 = pch->dmac; pl330_prep_dma_cyclic()
2515 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n", pl330_prep_dma_cyclic()
2521 desc = pl330_get_desc(pch); pl330_prep_dma_cyclic()
2523 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", pl330_prep_dma_cyclic()
2549 dst = pch->fifo_addr; pl330_prep_dma_cyclic()
2554 src = pch->fifo_addr; pl330_prep_dma_cyclic()
2562 desc->rqcfg.brst_size = pch->burst_sz; pl330_prep_dma_cyclic()
2578 pch->cyclic = true; pl330_prep_dma_cyclic()
2589 struct dma_pl330_chan *pch = to_pchan(chan); pl330_prep_dma_memcpy() local
2593 if (unlikely(!pch || !len)) pl330_prep_dma_memcpy()
2596 pl330 = pch->dmac; pl330_prep_dma_memcpy()
2598 desc = __pl330_prep_dma_memcpy(pch, dst, src, len); pl330_prep_dma_memcpy()
2664 struct dma_pl330_chan *pch = to_pchan(chan); pl330_prep_slave_sg() local
2669 if (unlikely(!pch || !sgl || !sg_len)) pl330_prep_slave_sg()
2672 addr = pch->fifo_addr; pl330_prep_slave_sg()
2678 desc = pl330_get_desc(pch); for_each_sg()
2680 struct pl330_dmac *pl330 = pch->dmac; for_each_sg()
2682 dev_err(pch->dmac->ddma.dev, for_each_sg()
2707 desc->rqcfg.brst_size = pch->burst_sz; for_each_sg()
2779 struct dma_pl330_chan *pch, *_p; pl330_probe() local
2847 pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); pl330_probe()
2855 pch = &pl330->peripherals[i]; pl330_probe()
2857 pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; pl330_probe()
2859 pch->chan.private = adev->dev.of_node; pl330_probe()
2861 INIT_LIST_HEAD(&pch->submitted_list); pl330_probe()
2862 INIT_LIST_HEAD(&pch->work_list); pl330_probe()
2863 INIT_LIST_HEAD(&pch->completed_list); pl330_probe()
2864 spin_lock_init(&pch->lock); pl330_probe()
2865 pch->thread = NULL; pl330_probe()
2866 pch->chan.device = pd; pl330_probe()
2867 pch->dmac = pl330; pl330_probe()
2870 list_add_tail(&pch->chan.device_node, &pd->channels); pl330_probe()
2941 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, pl330_probe()
2945 list_del(&pch->chan.device_node); pl330_probe()
2948 if (pch->thread) { pl330_probe()
2949 pl330_terminate_all(&pch->chan); pl330_probe()
2950 pl330_free_chan_resources(&pch->chan); pl330_probe()
2962 struct dma_pl330_chan *pch, *_p; pl330_remove() local
2972 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, pl330_remove()
2976 list_del(&pch->chan.device_node); pl330_remove()
2979 if (pch->thread) { pl330_remove()
2980 pl330_terminate_all(&pch->chan); pl330_remove()
2981 pl330_free_chan_resources(&pch->chan); pl330_remove()
H A Dsa11x0-dma.c335 unsigned pch, pch_alloc = 0; sa11x0_dma_tasklet() local
357 for (pch = 0; pch < NR_PHY_CHAN; pch++) { sa11x0_dma_tasklet()
358 p = &d->phy[pch]; sa11x0_dma_tasklet()
365 pch_alloc |= 1 << pch; sa11x0_dma_tasklet()
370 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); sa11x0_dma_tasklet()
375 for (pch = 0; pch < NR_PHY_CHAN; pch++) { sa11x0_dma_tasklet()
376 if (pch_alloc & (1 << pch)) { sa11x0_dma_tasklet()
377 p = &d->phy[pch]; sa11x0_dma_tasklet()
977 unsigned pch; sa11x0_dma_remove() local
982 for (pch = 0; pch < NR_PHY_CHAN; pch++) sa11x0_dma_remove()
983 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); sa11x0_dma_remove()
994 unsigned pch; sa11x0_dma_suspend() local
996 for (pch = 0; pch < NR_PHY_CHAN; pch++) { sa11x0_dma_suspend()
997 struct sa11x0_dma_phy *p = &d->phy[pch]; sa11x0_dma_suspend()
1032 unsigned pch; sa11x0_dma_resume() local
1034 for (pch = 0; pch < NR_PHY_CHAN; pch++) { sa11x0_dma_resume()
1035 struct sa11x0_dma_phy *p = &d->phy[pch]; sa11x0_dma_resume()
H A Dk3dma.c265 unsigned pch, pch_alloc = 0; k3_dma_tasklet() local
285 for (pch = 0; pch < d->dma_channels; pch++) { k3_dma_tasklet()
286 p = &d->phy[pch]; k3_dma_tasklet()
293 pch_alloc |= 1 << pch; k3_dma_tasklet()
297 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); k3_dma_tasklet()
302 for (pch = 0; pch < d->dma_channels; pch++) { k3_dma_tasklet()
303 if (pch_alloc & (1 << pch)) { k3_dma_tasklet()
304 p = &d->phy[pch]; k3_dma_tasklet()
H A Dzx296702_dma.c224 unsigned pch, pch_alloc = 0; zx_dma_task() local
261 for (pch = 0; pch < d->dma_channels; pch++) { zx_dma_task()
262 if (pch_alloc & (1 << pch)) { zx_dma_task()
263 p = &d->phy[pch]; zx_dma_task()
H A Dpch_dma.c27 #define DRV_NAME "pch-dma"
/linux-4.4.14/drivers/isdn/mISDN/
H A Dstack.c572 struct mISDNchannel *pch; delete_channel() local
599 pch = get_channel4id(ch->st, ch->nr); delete_channel()
600 if (pch) { delete_channel()
602 list_del(&pch->list); delete_channel()
604 pch->ctrl(pch, CLOSE_CHANNEL, NULL); delete_channel()
605 pch = ch->st->dev->teimgr; delete_channel()
606 pch->ctrl(pch, CLOSE_CHANNEL, NULL); delete_channel()
612 pch = ch->st->dev->teimgr; delete_channel()
613 if (pch) { delete_channel()
614 pch->ctrl(pch, CLOSE_CHANNEL, NULL); delete_channel()
/linux-4.4.14/drivers/gpu/drm/i915/
H A Dintel_fifo_underrun.c224 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", cpt_set_fifo_underrun_reporting()
309 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT intel_set_pch_fifo_underrun_reporting()
310 * has only one pch transcoder A that all pipes can use. To avoid racy intel_set_pch_fifo_underrun_reporting()
311 * pch transcoder -> pipe lookups from interrupt code simply store the intel_set_pch_fifo_underrun_reporting()
H A Di915_drv.c477 struct pci_dev *pch = NULL; intel_detect_pch() local
498 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { intel_detect_pch()
499 if (pch->vendor == PCI_VENDOR_ID_INTEL) { intel_detect_pch()
500 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; intel_detect_pch()
536 pch->subsystem_vendor == 0x1af4 && intel_detect_pch()
537 pch->subsystem_device == 0x1100)) { intel_detect_pch()
545 if (!pch) intel_detect_pch()
548 pci_dev_put(pch); intel_detect_pch()
H A Dintel_panel.c870 DRM_DEBUG_KMS("pch backlight already enabled\n"); lpt_enable_backlight()
913 DRM_DEBUG_KMS("pch backlight already enabled\n"); pch_enable_backlight()
H A Dintel_lvds.c184 * special lvds dither control bit on pch-split platforms, dithering is intel_pre_enable_lvds()
H A Dintel_drv.h359 * between pch encoders and cpu encoders. */
H A Di915_debugfs.c82 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); i915_capabilities()
2983 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", for_each_intel_crtc()
H A Dintel_display.c1981 * pch transcoder. */ ironlake_enable_pch_transcoder()
4182 /* XXX: pch pll's can be enabled any time before we enable the PCH ironlake_pch_enable()
4928 * cpu pipes, hence this is separate from all the other fdi/pch for_each_encoder_on_crtc()
12012 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", intel_dump_pipe_config()
12054 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", intel_dump_pipe_config()
H A Di915_drv.h365 /* i9xx, pch plls */
H A Di915_irq.c2346 * on older pch-split platforms. But this needs testing.
H A Di915_reg.h3173 /* CPT uses bits 29:30 for pch transcoder select */
/linux-4.4.14/drivers/spi/
H A DMakefile89 obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
H A Dspi-topcliff-pch.c1559 .name = "pch-spi",
1603 pd_dev = platform_device_alloc("pch-spi", i); pch_spi_probe()
/linux-4.4.14/drivers/thermal/
H A Dintel_pch_thermal.c205 dev_err(&pdev->dev, "unknown pch thermal device\n"); intel_pch_thermal_probe()
/linux-4.4.14/drivers/isdn/i4l/
H A Disdn_net.c2154 printk(KERN_DEBUG "n_fi: match1, pdev=%d pch=%d\n", isdn_net_find_icall()
/linux-4.4.14/drivers/net/ethernet/oki-semi/pch_gbe/
H A Dpch_gbe_main.c561 netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n"); pch_gbe_mac_ctrl_miim()

Completed in 1148 milliseconds