Searched refs:ss (Results 1 - 200 of 285) sorted by relevance

12

/linux-4.1.27/drivers/spi/
H A Dspi-sh.c93 static void spi_sh_write(struct spi_sh_data *ss, unsigned long data, spi_sh_write() argument
96 if (ss->width == 8) spi_sh_write()
97 iowrite8(data, ss->addr + (offset >> 2)); spi_sh_write()
98 else if (ss->width == 32) spi_sh_write()
99 iowrite32(data, ss->addr + offset); spi_sh_write()
102 static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset) spi_sh_read() argument
104 if (ss->width == 8) spi_sh_read()
105 return ioread8(ss->addr + (offset >> 2)); spi_sh_read()
106 else if (ss->width == 32) spi_sh_read()
107 return ioread32(ss->addr + offset); spi_sh_read()
112 static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val, spi_sh_set_bit() argument
117 tmp = spi_sh_read(ss, offset); spi_sh_set_bit()
119 spi_sh_write(ss, tmp, offset); spi_sh_set_bit()
122 static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val, spi_sh_clear_bit() argument
127 tmp = spi_sh_read(ss, offset); spi_sh_clear_bit()
129 spi_sh_write(ss, tmp, offset); spi_sh_clear_bit()
132 static void clear_fifo(struct spi_sh_data *ss) clear_fifo() argument
134 spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2); clear_fifo()
135 spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2); clear_fifo()
138 static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss) spi_sh_wait_receive_buffer() argument
142 while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) { spi_sh_wait_receive_buffer()
150 static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss) spi_sh_wait_write_buffer_empty() argument
154 while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) { spi_sh_wait_write_buffer_empty()
162 static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg, spi_sh_send() argument
172 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); spi_sh_send()
178 !(spi_sh_read(ss, SPI_SH_CR4) & spi_sh_send()
180 !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF); spi_sh_send()
182 spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR); spi_sh_send()
184 if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) { spi_sh_send()
186 spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4); spi_sh_send()
197 ss->cr1 &= ~SPI_SH_TBE; spi_sh_send()
198 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4); spi_sh_send()
199 ret = wait_event_interruptible_timeout(ss->wait, spi_sh_send()
200 ss->cr1 & SPI_SH_TBE, spi_sh_send()
202 if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) { spi_sh_send()
210 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1); spi_sh_send()
211 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); spi_sh_send()
213 ss->cr1 &= ~SPI_SH_TBE; spi_sh_send()
214 spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4); spi_sh_send()
215 ret = wait_event_interruptible_timeout(ss->wait, spi_sh_send()
216 ss->cr1 & SPI_SH_TBE, spi_sh_send()
218 if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) { spi_sh_send()
227 static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg, spi_sh_receive() argument
237 spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3); spi_sh_receive()
239 spi_sh_write(ss, t->len, SPI_SH_CR3); spi_sh_receive()
241 spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1); spi_sh_receive()
242 spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); spi_sh_receive()
244 spi_sh_wait_write_buffer_empty(ss); spi_sh_receive()
249 ss->cr1 &= ~SPI_SH_RBF; spi_sh_receive()
250 spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4); spi_sh_receive()
251 ret = wait_event_interruptible_timeout(ss->wait, spi_sh_receive()
252 ss->cr1 & SPI_SH_RBF, spi_sh_receive()
255 spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) { spi_sh_receive()
263 if (spi_sh_wait_receive_buffer(ss)) spi_sh_receive()
265 data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR); spi_sh_receive()
274 clear_fifo(ss); spi_sh_receive()
275 spi_sh_write(ss, 1, SPI_SH_CR3); spi_sh_receive()
277 spi_sh_write(ss, 0, SPI_SH_CR3); spi_sh_receive()
285 struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws); spi_sh_work() local
293 spin_lock_irqsave(&ss->lock, flags); spi_sh_work()
294 while (!list_empty(&ss->queue)) { spi_sh_work()
295 mesg = list_entry(ss->queue.next, struct spi_message, queue); spi_sh_work()
298 spin_unlock_irqrestore(&ss->lock, flags); spi_sh_work()
306 ret = spi_sh_send(ss, mesg, t); spi_sh_work()
311 ret = spi_sh_receive(ss, mesg, t); spi_sh_work()
317 spin_lock_irqsave(&ss->lock, flags); spi_sh_work()
324 clear_fifo(ss); spi_sh_work()
325 spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1); spi_sh_work()
328 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, spi_sh_work()
331 clear_fifo(ss); spi_sh_work()
333 spin_unlock_irqrestore(&ss->lock, flags); spi_sh_work()
342 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, spi_sh_work()
344 clear_fifo(ss); spi_sh_work()
350 struct spi_sh_data *ss = spi_master_get_devdata(spi->master); spi_sh_setup() local
354 spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */ spi_sh_setup()
355 spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */ spi_sh_setup()
356 spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */ spi_sh_setup()
358 clear_fifo(ss); spi_sh_setup()
361 spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2); spi_sh_setup()
369 struct spi_sh_data *ss = spi_master_get_devdata(spi->master); spi_sh_transfer() local
375 spin_lock_irqsave(&ss->lock, flags); spi_sh_transfer()
380 spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); spi_sh_transfer()
382 list_add_tail(&mesg->queue, &ss->queue); spi_sh_transfer()
383 queue_work(ss->workqueue, &ss->ws); spi_sh_transfer()
385 spin_unlock_irqrestore(&ss->lock, flags); spi_sh_transfer()
392 struct spi_sh_data *ss = spi_master_get_devdata(spi->master); spi_sh_cleanup() local
396 spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, spi_sh_cleanup()
402 struct spi_sh_data *ss = (struct spi_sh_data *)_ss; spi_sh_irq() local
405 cr1 = spi_sh_read(ss, SPI_SH_CR1); spi_sh_irq()
407 ss->cr1 |= SPI_SH_TBE; spi_sh_irq()
409 ss->cr1 |= SPI_SH_TBF; spi_sh_irq()
411 ss->cr1 |= SPI_SH_RBE; spi_sh_irq()
413 ss->cr1 |= SPI_SH_RBF; spi_sh_irq()
415 if (ss->cr1) { spi_sh_irq()
416 spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4); spi_sh_irq()
417 wake_up(&ss->wait); spi_sh_irq()
425 struct spi_sh_data *ss = platform_get_drvdata(pdev); spi_sh_remove() local
427 spi_unregister_master(ss->master); spi_sh_remove()
428 destroy_workqueue(ss->workqueue); spi_sh_remove()
429 free_irq(ss->irq, ss); spi_sh_remove()
438 struct spi_sh_data *ss; spi_sh_probe() local
460 ss = spi_master_get_devdata(master); spi_sh_probe()
461 platform_set_drvdata(pdev, ss); spi_sh_probe()
465 ss->width = 8; spi_sh_probe()
468 ss->width = 32; spi_sh_probe()
475 ss->irq = irq; spi_sh_probe()
476 ss->master = master; spi_sh_probe()
477 ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); spi_sh_probe()
478 if (ss->addr == NULL) { spi_sh_probe()
483 INIT_LIST_HEAD(&ss->queue); spi_sh_probe()
484 spin_lock_init(&ss->lock); spi_sh_probe()
485 INIT_WORK(&ss->ws, spi_sh_work); spi_sh_probe()
486 init_waitqueue_head(&ss->wait); spi_sh_probe()
487 ss->workqueue = create_singlethread_workqueue( spi_sh_probe()
489 if (ss->workqueue == NULL) { spi_sh_probe()
495 ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss); spi_sh_probe()
516 free_irq(irq, ss); spi_sh_probe()
518 destroy_workqueue(ss->workqueue); spi_sh_probe()
H A Dspi-xilinx.c383 of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits", xilinx_spi_probe()
/linux-4.1.27/security/selinux/
H A DMakefile9 ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
10 ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
/linux-4.1.27/arch/m68k/lib/
H A Dmemset.c27 short *ss = s; memset() local
28 *ss++ = c; memset()
29 s = ss; memset()
64 short *ss = s; memset() local
65 *ss++ = c; memset()
66 s = ss; memset()
/linux-4.1.27/include/net/
H A Dtcp_memcontrol.h5 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
/linux-4.1.27/drivers/media/pci/solo6x10/
H A Dsolo6x10-g723.c91 struct snd_pcm_substream *ss; solo_g723_isr() local
94 for (ss = pstr->substream; ss != NULL; ss = ss->next) { solo_g723_isr()
95 if (snd_pcm_substream_chip(ss) == NULL) solo_g723_isr()
99 if (snd_pcm_substream_chip(ss) == solo_dev) solo_g723_isr()
103 solo_pcm = snd_pcm_substream_chip(ss); solo_g723_isr()
107 snd_pcm_period_elapsed(ss); solo_g723_isr()
111 static int snd_solo_hw_params(struct snd_pcm_substream *ss, snd_solo_hw_params() argument
114 return snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params)); snd_solo_hw_params()
117 static int snd_solo_hw_free(struct snd_pcm_substream *ss) snd_solo_hw_free() argument
119 return snd_pcm_lib_free_pages(ss); snd_solo_hw_free()
140 static int snd_solo_pcm_open(struct snd_pcm_substream *ss) snd_solo_pcm_open() argument
142 struct solo_dev *solo_dev = snd_pcm_substream_chip(ss); snd_solo_pcm_open()
157 ss->runtime->hw = snd_solo_pcm_hw; snd_solo_pcm_open()
159 snd_pcm_substream_chip(ss) = solo_pcm; snd_solo_pcm_open()
168 static int snd_solo_pcm_close(struct snd_pcm_substream *ss) snd_solo_pcm_close() argument
170 struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss); snd_solo_pcm_close()
172 snd_pcm_substream_chip(ss) = solo_pcm->solo_dev; snd_solo_pcm_close()
180 static int snd_solo_pcm_trigger(struct snd_pcm_substream *ss, int cmd) snd_solo_pcm_trigger() argument
182 struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss); snd_solo_pcm_trigger()
214 static int snd_solo_pcm_prepare(struct snd_pcm_substream *ss) snd_solo_pcm_prepare() argument
219 static snd_pcm_uframes_t snd_solo_pcm_pointer(struct snd_pcm_substream *ss) snd_solo_pcm_pointer() argument
221 struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss); snd_solo_pcm_pointer()
228 static int snd_solo_pcm_copy(struct snd_pcm_substream *ss, int channel, snd_solo_pcm_copy() argument
232 struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss); snd_solo_pcm_copy()
242 (ss->number * G723_PERIOD_BYTES), snd_solo_pcm_copy()
320 struct snd_pcm_substream *ss; solo_snd_pcm_init() local
336 for (i = 0, ss = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; solo_snd_pcm_init()
337 ss; ss = ss->next, i++) solo_snd_pcm_init()
338 sprintf(ss->name, "Camera #%d Audio", i); solo_snd_pcm_init()
/linux-4.1.27/arch/mips/boot/compressed/
H A Dstring.c23 char *ss = s; memset() local
26 ss[i] = c; memset()
/linux-4.1.27/drivers/net/ethernet/myricom/myri10ge/
H A Dmyri10ge.c213 struct myri10ge_slice_state *ss; member in struct:myri10ge_priv
929 static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) myri10ge_ss_init_lock() argument
931 spin_lock_init(&ss->lock); myri10ge_ss_init_lock()
932 ss->state = SLICE_STATE_IDLE; myri10ge_ss_init_lock()
935 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) myri10ge_ss_lock_napi() argument
938 spin_lock(&ss->lock); myri10ge_ss_lock_napi()
939 if ((ss->state & SLICE_LOCKED)) { myri10ge_ss_lock_napi()
940 WARN_ON((ss->state & SLICE_STATE_NAPI)); myri10ge_ss_lock_napi()
941 ss->state |= SLICE_STATE_NAPI_YIELD; myri10ge_ss_lock_napi()
943 ss->lock_napi_yield++; myri10ge_ss_lock_napi()
945 ss->state = SLICE_STATE_NAPI; myri10ge_ss_lock_napi()
946 spin_unlock(&ss->lock); myri10ge_ss_lock_napi()
950 static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) myri10ge_ss_unlock_napi() argument
952 spin_lock(&ss->lock); myri10ge_ss_unlock_napi()
953 WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD))); myri10ge_ss_unlock_napi()
954 ss->state = SLICE_STATE_IDLE; myri10ge_ss_unlock_napi()
955 spin_unlock(&ss->lock); myri10ge_ss_unlock_napi()
958 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) myri10ge_ss_lock_poll() argument
961 spin_lock_bh(&ss->lock); myri10ge_ss_lock_poll()
962 if ((ss->state & SLICE_LOCKED)) { myri10ge_ss_lock_poll()
963 ss->state |= SLICE_STATE_POLL_YIELD; myri10ge_ss_lock_poll()
965 ss->lock_poll_yield++; myri10ge_ss_lock_poll()
967 ss->state |= SLICE_STATE_POLL; myri10ge_ss_lock_poll()
968 spin_unlock_bh(&ss->lock); myri10ge_ss_lock_poll()
972 static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) myri10ge_ss_unlock_poll() argument
974 spin_lock_bh(&ss->lock); myri10ge_ss_unlock_poll()
975 WARN_ON((ss->state & SLICE_STATE_NAPI)); myri10ge_ss_unlock_poll()
976 ss->state = SLICE_STATE_IDLE; myri10ge_ss_unlock_poll()
977 spin_unlock_bh(&ss->lock); myri10ge_ss_unlock_poll()
980 static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) myri10ge_ss_busy_polling() argument
982 WARN_ON(!(ss->state & SLICE_LOCKED)); myri10ge_ss_busy_polling()
983 return (ss->state & SLICE_USER_PEND); myri10ge_ss_busy_polling()
986 static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss) myri10ge_ss_init_lock() argument
990 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss) myri10ge_ss_lock_napi() argument
995 static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss) myri10ge_ss_unlock_napi() argument
999 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss) myri10ge_ss_lock_poll() argument
1004 static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss) myri10ge_ss_unlock_poll() argument
1008 static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss) myri10ge_ss_busy_polling() argument
1017 struct myri10ge_slice_state *ss; myri10ge_reset() local
1045 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry); myri10ge_reset()
1102 ss = &mgp->ss[i]; myri10ge_reset()
1103 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus); myri10ge_reset()
1104 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus); myri10ge_reset()
1113 ss = &mgp->ss[i]; myri10ge_reset()
1114 ss->irq_claim = myri10ge_reset()
1134 ss = &mgp->ss[i]; myri10ge_reset()
1136 ss->dca_tag = (__iomem __be32 *) myri10ge_reset()
1139 ss->dca_tag = NULL; myri10ge_reset()
1148 ss = &mgp->ss[i]; myri10ge_reset()
1150 memset(ss->rx_done.entry, 0, bytes); myri10ge_reset()
1151 ss->tx.req = 0; myri10ge_reset()
1152 ss->tx.done = 0; myri10ge_reset()
1153 ss->tx.pkt_start = 0; myri10ge_reset()
1154 ss->tx.pkt_done = 0; myri10ge_reset()
1155 ss->rx_big.cnt = 0; myri10ge_reset()
1156 ss->rx_small.cnt = 0; myri10ge_reset()
1157 ss->rx_done.idx = 0; myri10ge_reset()
1158 ss->rx_done.cnt = 0; myri10ge_reset()
1159 ss->tx.wake_queue = 0; myri10ge_reset()
1160 ss->tx.stop_queue = 0; myri10ge_reset()
1187 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) myri10ge_write_dca() argument
1189 ss->cached_dca_tag = tag; myri10ge_write_dca()
1190 put_be32(htonl(tag), ss->dca_tag); myri10ge_write_dca()
1193 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss) myri10ge_update_dca() argument
1198 if (cpu != ss->cpu) { myri10ge_update_dca()
1199 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu); myri10ge_update_dca()
1200 if (ss->cached_dca_tag != tag) myri10ge_update_dca()
1201 myri10ge_write_dca(ss, cpu, tag); myri10ge_update_dca()
1202 ss->cpu = cpu; myri10ge_update_dca()
1212 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled) myri10ge_setup_dca()
1228 mgp->ss[i].cpu = -1; myri10ge_setup_dca()
1229 mgp->ss[i].cached_dca_tag = -1; myri10ge_setup_dca()
1230 myri10ge_update_dca(&mgp->ss[i]); myri10ge_setup_dca()
1419 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum) myri10ge_rx_done() argument
1421 struct myri10ge_priv *mgp = ss->mgp; myri10ge_rx_done()
1432 rx = &ss->rx_small; myri10ge_rx_done()
1435 rx = &ss->rx_big; myri10ge_rx_done()
1448 polling = myri10ge_ss_busy_polling(ss); myri10ge_rx_done()
1452 skb = napi_get_frags(&ss->napi); myri10ge_rx_done()
1454 ss->stats.rx_dropped++; myri10ge_rx_done()
1490 skb_record_rx_queue(skb, ss - &mgp->ss[0]); myri10ge_rx_done()
1491 skb_mark_napi_id(skb, &ss->napi); myri10ge_rx_done()
1512 napi_gro_frags(&ss->napi); myri10ge_rx_done()
1518 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) myri10ge_tx_done() argument
1520 struct pci_dev *pdev = ss->mgp->pdev; myri10ge_tx_done()
1521 struct myri10ge_tx_buf *tx = &ss->tx; myri10ge_tx_done()
1540 ss->stats.tx_bytes += skb->len; myri10ge_tx_done()
1541 ss->stats.tx_packets++; myri10ge_tx_done()
1557 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss); myri10ge_tx_done()
1567 if ((ss->mgp->dev->real_num_tx_queues > 1) && myri10ge_tx_done()
1581 ss->mgp->running == MYRI10GE_ETH_RUNNING) { myri10ge_tx_done()
1588 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) myri10ge_clean_rx_done() argument
1590 struct myri10ge_rx_done *rx_done = &ss->rx_done; myri10ge_clean_rx_done()
1591 struct myri10ge_priv *mgp = ss->mgp; myri10ge_clean_rx_done()
1605 rx_ok = myri10ge_rx_done(ss, length, checksum); myri10ge_clean_rx_done()
1614 ss->stats.rx_packets += rx_packets; myri10ge_clean_rx_done()
1615 ss->stats.rx_bytes += rx_bytes; myri10ge_clean_rx_done()
1618 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) myri10ge_clean_rx_done()
1619 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, myri10ge_clean_rx_done()
1621 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) myri10ge_clean_rx_done()
1622 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); myri10ge_clean_rx_done()
1629 struct mcp_irq_data *stats = mgp->ss[0].fw_stats; myri10ge_check_statblock()
1664 struct myri10ge_slice_state *ss = myri10ge_poll() local
1669 if (ss->mgp->dca_enabled) myri10ge_poll()
1670 myri10ge_update_dca(ss); myri10ge_poll()
1673 if (!myri10ge_ss_lock_napi(ss)) myri10ge_poll()
1677 work_done = myri10ge_clean_rx_done(ss, budget); myri10ge_poll()
1679 myri10ge_ss_unlock_napi(ss); myri10ge_poll()
1682 put_be32(htonl(3), ss->irq_claim); myri10ge_poll()
1690 struct myri10ge_slice_state *ss = myri10ge_busy_poll() local
1692 struct myri10ge_priv *mgp = ss->mgp; myri10ge_busy_poll()
1699 if (!myri10ge_ss_lock_poll(ss)) myri10ge_busy_poll()
1703 work_done = myri10ge_clean_rx_done(ss, 4); myri10ge_busy_poll()
1705 ss->busy_poll_cnt += work_done; myri10ge_busy_poll()
1707 ss->busy_poll_miss++; myri10ge_busy_poll()
1709 myri10ge_ss_unlock_poll(ss); myri10ge_busy_poll()
1717 struct myri10ge_slice_state *ss = arg; myri10ge_intr() local
1718 struct myri10ge_priv *mgp = ss->mgp; myri10ge_intr()
1719 struct mcp_irq_data *stats = ss->fw_stats; myri10ge_intr()
1720 struct myri10ge_tx_buf *tx = &ss->tx; myri10ge_intr()
1726 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { myri10ge_intr()
1727 napi_schedule(&ss->napi); myri10ge_intr()
1738 napi_schedule(&ss->napi); myri10ge_intr()
1755 myri10ge_tx_done(ss, (int)send_done_count); myri10ge_intr()
1768 if (ss == mgp->ss) myri10ge_intr()
1771 put_be32(htonl(3), ss->irq_claim + 1); myri10ge_intr()
1885 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1; myri10ge_get_ringparam()
1886 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1; myri10ge_get_ringparam()
1888 ring->tx_max_pending = mgp->ss[0].tx.mask + 1; myri10ge_get_ringparam()
1970 struct myri10ge_slice_state *ss; myri10ge_get_ethtool_stats() local
1991 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); myri10ge_get_ethtool_stats()
1997 ss = &mgp->ss[0]; myri10ge_get_ethtool_stats()
1998 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); myri10ge_get_ethtool_stats()
1999 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); myri10ge_get_ethtool_stats()
2001 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); myri10ge_get_ethtool_stats()
2002 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); myri10ge_get_ethtool_stats()
2003 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); myri10ge_get_ethtool_stats()
2004 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); myri10ge_get_ethtool_stats()
2005 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); myri10ge_get_ethtool_stats()
2007 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); myri10ge_get_ethtool_stats()
2008 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); myri10ge_get_ethtool_stats()
2009 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); myri10ge_get_ethtool_stats()
2010 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); myri10ge_get_ethtool_stats()
2011 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); myri10ge_get_ethtool_stats()
2014 ss = &mgp->ss[slice]; myri10ge_get_ethtool_stats()
2016 data[i++] = (unsigned int)ss->tx.pkt_start; myri10ge_get_ethtool_stats()
2017 data[i++] = (unsigned int)ss->tx.pkt_done; myri10ge_get_ethtool_stats()
2018 data[i++] = (unsigned int)ss->tx.req; myri10ge_get_ethtool_stats()
2019 data[i++] = (unsigned int)ss->tx.done; myri10ge_get_ethtool_stats()
2020 data[i++] = (unsigned int)ss->rx_small.cnt; myri10ge_get_ethtool_stats()
2021 data[i++] = (unsigned int)ss->rx_big.cnt; myri10ge_get_ethtool_stats()
2022 data[i++] = (unsigned int)ss->tx.wake_queue; myri10ge_get_ethtool_stats()
2023 data[i++] = (unsigned int)ss->tx.stop_queue; myri10ge_get_ethtool_stats()
2024 data[i++] = (unsigned int)ss->tx.linearized; myri10ge_get_ethtool_stats()
2026 data[i++] = ss->lock_napi_yield; myri10ge_get_ethtool_stats()
2027 data[i++] = ss->lock_poll_yield; myri10ge_get_ethtool_stats()
2028 data[i++] = ss->busy_poll_miss; myri10ge_get_ethtool_stats()
2029 data[i++] = ss->busy_poll_cnt; myri10ge_get_ethtool_stats()
2117 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) myri10ge_allocate_rings() argument
2119 struct myri10ge_priv *mgp = ss->mgp; myri10ge_allocate_rings()
2128 slice = ss - mgp->ss; myri10ge_allocate_rings()
2140 ss->tx.mask = tx_ring_entries - 1; myri10ge_allocate_rings()
2141 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; myri10ge_allocate_rings()
2148 * sizeof(*ss->tx.req_list); myri10ge_allocate_rings()
2149 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); myri10ge_allocate_rings()
2150 if (ss->tx.req_bytes == NULL) myri10ge_allocate_rings()
2154 ss->tx.req_list = (struct mcp_kreq_ether_send *) myri10ge_allocate_rings()
2155 ALIGN((unsigned long)ss->tx.req_bytes, 8); myri10ge_allocate_rings()
2156 ss->tx.queue_active = 0; myri10ge_allocate_rings()
2158 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); myri10ge_allocate_rings()
2159 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); myri10ge_allocate_rings()
2160 if (ss->rx_small.shadow == NULL) myri10ge_allocate_rings()
2163 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); myri10ge_allocate_rings()
2164 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); myri10ge_allocate_rings()
2165 if (ss->rx_big.shadow == NULL) myri10ge_allocate_rings()
2170 bytes = tx_ring_entries * sizeof(*ss->tx.info); myri10ge_allocate_rings()
2171 ss->tx.info = kzalloc(bytes, GFP_KERNEL); myri10ge_allocate_rings()
2172 if (ss->tx.info == NULL) myri10ge_allocate_rings()
2175 bytes = rx_ring_entries * sizeof(*ss->rx_small.info); myri10ge_allocate_rings()
2176 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); myri10ge_allocate_rings()
2177 if (ss->rx_small.info == NULL) myri10ge_allocate_rings()
2180 bytes = rx_ring_entries * sizeof(*ss->rx_big.info); myri10ge_allocate_rings()
2181 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); myri10ge_allocate_rings()
2182 if (ss->rx_big.info == NULL) myri10ge_allocate_rings()
2186 ss->rx_big.cnt = 0; myri10ge_allocate_rings()
2187 ss->rx_small.cnt = 0; myri10ge_allocate_rings()
2188 ss->rx_big.fill_cnt = 0; myri10ge_allocate_rings()
2189 ss->rx_small.fill_cnt = 0; myri10ge_allocate_rings()
2190 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; myri10ge_allocate_rings()
2191 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; myri10ge_allocate_rings()
2192 ss->rx_small.watchdog_needed = 0; myri10ge_allocate_rings()
2193 ss->rx_big.watchdog_needed = 0; myri10ge_allocate_rings()
2195 ss->rx_small.fill_cnt = ss->rx_small.mask + 1; myri10ge_allocate_rings()
2197 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, myri10ge_allocate_rings()
2201 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { myri10ge_allocate_rings()
2203 slice, ss->rx_small.fill_cnt); myri10ge_allocate_rings()
2207 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); myri10ge_allocate_rings()
2208 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { myri10ge_allocate_rings()
2210 slice, ss->rx_big.fill_cnt); myri10ge_allocate_rings()
2217 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { myri10ge_allocate_rings()
2218 int idx = i & ss->rx_big.mask; myri10ge_allocate_rings()
2219 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], myri10ge_allocate_rings()
2221 put_page(ss->rx_big.info[idx].page); myri10ge_allocate_rings()
2226 ss->rx_small.fill_cnt = ss->rx_small.cnt; myri10ge_allocate_rings()
2227 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { myri10ge_allocate_rings()
2228 int idx = i & ss->rx_small.mask; myri10ge_allocate_rings()
2229 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], myri10ge_allocate_rings()
2231 put_page(ss->rx_small.info[idx].page); myri10ge_allocate_rings()
2234 kfree(ss->rx_big.info); myri10ge_allocate_rings()
2237 kfree(ss->rx_small.info); myri10ge_allocate_rings()
2240 kfree(ss->tx.info); myri10ge_allocate_rings()
2243 kfree(ss->rx_big.shadow); myri10ge_allocate_rings()
2246 kfree(ss->rx_small.shadow); myri10ge_allocate_rings()
2249 kfree(ss->tx.req_bytes); myri10ge_allocate_rings()
2250 ss->tx.req_bytes = NULL; myri10ge_allocate_rings()
2251 ss->tx.req_list = NULL; myri10ge_allocate_rings()
2257 static void myri10ge_free_rings(struct myri10ge_slice_state *ss) myri10ge_free_rings() argument
2259 struct myri10ge_priv *mgp = ss->mgp; myri10ge_free_rings()
2265 if (ss->tx.req_list == NULL) myri10ge_free_rings()
2268 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { myri10ge_free_rings()
2269 idx = i & ss->rx_big.mask; myri10ge_free_rings()
2270 if (i == ss->rx_big.fill_cnt - 1) myri10ge_free_rings()
2271 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; myri10ge_free_rings()
2272 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], myri10ge_free_rings()
2274 put_page(ss->rx_big.info[idx].page); myri10ge_free_rings()
2278 ss->rx_small.fill_cnt = ss->rx_small.cnt; myri10ge_free_rings()
2279 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { myri10ge_free_rings()
2280 idx = i & ss->rx_small.mask; myri10ge_free_rings()
2281 if (i == ss->rx_small.fill_cnt - 1) myri10ge_free_rings()
2282 ss->rx_small.info[idx].page_offset = myri10ge_free_rings()
2284 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], myri10ge_free_rings()
2286 put_page(ss->rx_small.info[idx].page); myri10ge_free_rings()
2288 tx = &ss->tx; myri10ge_free_rings()
2299 ss->stats.tx_dropped++; myri10ge_free_rings()
2314 kfree(ss->rx_big.info); myri10ge_free_rings()
2316 kfree(ss->rx_small.info); myri10ge_free_rings()
2318 kfree(ss->tx.info); myri10ge_free_rings()
2320 kfree(ss->rx_big.shadow); myri10ge_free_rings()
2322 kfree(ss->rx_small.shadow); myri10ge_free_rings()
2324 kfree(ss->tx.req_bytes); myri10ge_free_rings()
2325 ss->tx.req_bytes = NULL; myri10ge_free_rings()
2326 ss->tx.req_list = NULL; myri10ge_free_rings()
2332 struct myri10ge_slice_state *ss; myri10ge_request_irq() local
2364 ss = &mgp->ss[i]; myri10ge_request_irq()
2365 snprintf(ss->irq_desc, sizeof(ss->irq_desc), myri10ge_request_irq()
2368 myri10ge_intr, 0, ss->irq_desc, myri10ge_request_irq()
2369 ss); myri10ge_request_irq()
2376 &mgp->ss[i]); myri10ge_request_irq()
2385 mgp->dev->name, &mgp->ss[0]); myri10ge_request_irq()
2402 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]); myri10ge_free_irq()
2404 free_irq(pdev->irq, &mgp->ss[0]); myri10ge_free_irq()
2415 struct myri10ge_slice_state *ss; myri10ge_get_txrx() local
2418 ss = &mgp->ss[slice]; myri10ge_get_txrx()
2424 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) myri10ge_get_txrx()
2430 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *) myri10ge_get_txrx()
2435 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) myri10ge_get_txrx()
2438 ss->tx.send_go = (__iomem __be32 *) myri10ge_get_txrx()
2440 ss->tx.send_stop = (__iomem __be32 *) myri10ge_get_txrx()
2449 struct myri10ge_slice_state *ss; myri10ge_set_stats() local
2452 ss = &mgp->ss[slice]; myri10ge_set_stats()
2453 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus); myri10ge_set_stats()
2454 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus); myri10ge_set_stats()
2458 dma_addr_t bus = ss->fw_stats_bus; myri10ge_set_stats()
2477 struct myri10ge_slice_state *ss; myri10ge_open() local
2571 ss = &mgp->ss[slice]; myri10ge_open()
2578 status = myri10ge_allocate_rings(ss); myri10ge_open()
2593 myri10ge_ss_init_lock(ss); myri10ge_open()
2596 napi_enable(&(ss)->napi); myri10ge_open()
2644 napi_disable(&mgp->ss[slice].napi); myri10ge_open()
2647 myri10ge_free_rings(&mgp->ss[i]); myri10ge_open()
2666 if (mgp->ss[0].tx.req_bytes == NULL) myri10ge_close()
2673 napi_disable(&mgp->ss[i].napi); myri10ge_close()
2678 while (!myri10ge_ss_lock_napi(&mgp->ss[i])) { myri10ge_close()
2703 myri10ge_free_rings(&mgp->ss[i]); myri10ge_close()
2819 struct myri10ge_slice_state *ss; myri10ge_xmit() local
2834 ss = &mgp->ss[queue]; myri10ge_xmit()
2836 tx = &ss->tx; myri10ge_xmit()
2913 ss->stats.tx_dropped += 1; myri10ge_xmit()
3079 ss->stats.tx_dropped += 1; myri10ge_xmit()
3089 struct myri10ge_slice_state *ss; myri10ge_sw_tso() local
3116 ss = &mgp->ss[skb_get_queue_mapping(skb)]; myri10ge_sw_tso()
3118 ss->stats.tx_dropped += 1; myri10ge_sw_tso()
3130 slice_stats = &mgp->ss[i].stats; myri10ge_get_stats()
3567 myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed, myri10ge_check_slice() argument
3570 struct myri10ge_priv *mgp = ss->mgp; myri10ge_check_slice()
3571 int slice = ss - mgp->ss; myri10ge_check_slice()
3573 if (ss->tx.req != ss->tx.done && myri10ge_check_slice()
3574 ss->tx.done == ss->watchdog_tx_done && myri10ge_check_slice()
3575 ss->watchdog_tx_req != ss->watchdog_tx_done) { myri10ge_check_slice()
3584 slice, ss->tx.queue_active, ss->tx.req, myri10ge_check_slice()
3585 ss->tx.done, ss->tx.pkt_start, myri10ge_check_slice()
3586 ss->tx.pkt_done, myri10ge_check_slice()
3587 (int)ntohl(mgp->ss[slice].fw_stats-> myri10ge_check_slice()
3590 ss->stuck = 1; myri10ge_check_slice()
3593 if (ss->watchdog_tx_done != ss->tx.done || myri10ge_check_slice()
3594 ss->watchdog_rx_done != ss->rx_done.cnt) { myri10ge_check_slice()
3597 ss->watchdog_tx_done = ss->tx.done; myri10ge_check_slice()
3598 ss->watchdog_tx_req = ss->tx.req; myri10ge_check_slice()
3599 ss->watchdog_rx_done = ss->rx_done.cnt; myri10ge_check_slice()
3610 struct myri10ge_slice_state *ss; myri10ge_watchdog() local
3661 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); myri10ge_watchdog()
3663 ss = mgp->ss; myri10ge_watchdog()
3664 if (ss->stuck) { myri10ge_watchdog()
3665 myri10ge_check_slice(ss, &reset_needed, myri10ge_watchdog()
3668 ss->stuck = 0; myri10ge_watchdog()
3701 struct myri10ge_slice_state *ss; myri10ge_watchdog_timer() local
3708 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause); myri10ge_watchdog_timer()
3713 ss = &mgp->ss[i]; myri10ge_watchdog_timer()
3714 if (ss->rx_small.watchdog_needed) { myri10ge_watchdog_timer()
3715 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, myri10ge_watchdog_timer()
3718 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= myri10ge_watchdog_timer()
3720 ss->rx_small.watchdog_needed = 0; myri10ge_watchdog_timer()
3722 if (ss->rx_big.watchdog_needed) { myri10ge_watchdog_timer()
3723 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, myri10ge_watchdog_timer()
3725 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= myri10ge_watchdog_timer()
3727 ss->rx_big.watchdog_needed = 0; myri10ge_watchdog_timer()
3729 myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt, myri10ge_watchdog_timer()
3754 struct myri10ge_slice_state *ss; myri10ge_free_slices() local
3759 if (mgp->ss == NULL) myri10ge_free_slices()
3763 ss = &mgp->ss[i]; myri10ge_free_slices()
3764 if (ss->rx_done.entry != NULL) { myri10ge_free_slices()
3766 sizeof(*ss->rx_done.entry); myri10ge_free_slices()
3768 ss->rx_done.entry, ss->rx_done.bus); myri10ge_free_slices()
3769 ss->rx_done.entry = NULL; myri10ge_free_slices()
3771 if (ss->fw_stats != NULL) { myri10ge_free_slices()
3772 bytes = sizeof(*ss->fw_stats); myri10ge_free_slices()
3774 ss->fw_stats, ss->fw_stats_bus); myri10ge_free_slices()
3775 ss->fw_stats = NULL; myri10ge_free_slices()
3777 napi_hash_del(&ss->napi); myri10ge_free_slices()
3778 netif_napi_del(&ss->napi); myri10ge_free_slices()
3780 /* Wait till napi structs are no longer used, and then free ss. */ myri10ge_free_slices()
3782 kfree(mgp->ss); myri10ge_free_slices()
3783 mgp->ss = NULL; myri10ge_free_slices()
3788 struct myri10ge_slice_state *ss; myri10ge_alloc_slices() local
3793 bytes = sizeof(*mgp->ss) * mgp->num_slices; myri10ge_alloc_slices()
3794 mgp->ss = kzalloc(bytes, GFP_KERNEL); myri10ge_alloc_slices()
3795 if (mgp->ss == NULL) { myri10ge_alloc_slices()
3800 ss = &mgp->ss[i]; myri10ge_alloc_slices()
3801 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); myri10ge_alloc_slices()
3802 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, myri10ge_alloc_slices()
3803 &ss->rx_done.bus, myri10ge_alloc_slices()
3805 if (ss->rx_done.entry == NULL) myri10ge_alloc_slices()
3807 bytes = sizeof(*ss->fw_stats); myri10ge_alloc_slices()
3808 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, myri10ge_alloc_slices()
3809 &ss->fw_stats_bus, myri10ge_alloc_slices()
3811 if (ss->fw_stats == NULL) myri10ge_alloc_slices()
3813 ss->mgp = mgp; myri10ge_alloc_slices()
3814 ss->dev = mgp->dev; myri10ge_alloc_slices()
3815 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll, myri10ge_alloc_slices()
3817 napi_hash_add(&ss->napi); myri10ge_alloc_slices()
/linux-4.1.27/arch/x86/boot/compressed/
H A Dstring.c36 char *ss = s; memset() local
39 ss[i] = c; memset()
H A Defi_thunk_64.S40 movl %ss, %eax
78 movl %ebx, %ss
118 movl %eax, %ss
H A Dhead_64.S57 movl %eax, %ss
301 movl %eax, %ss
H A Dhead_32.S115 movl %eax, %ss
/linux-4.1.27/drivers/usb/gadget/function/
H A Df_sourcesink.c345 struct f_sourcesink *ss = func_to_ss(f); sourcesink_bind() local
357 ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); sourcesink_bind()
358 if (!ss->in_ep) { sourcesink_bind()
364 ss->in_ep->driver_data = cdev; /* claim */ sourcesink_bind()
366 ss->out_ep = usb_ep_autoconfig(cdev->gadget, &fs_sink_desc); sourcesink_bind()
367 if (!ss->out_ep) sourcesink_bind()
369 ss->out_ep->driver_data = cdev; /* claim */ sourcesink_bind()
390 ss->iso_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_iso_source_desc); sourcesink_bind()
391 if (!ss->iso_in_ep) sourcesink_bind()
393 ss->iso_in_ep->driver_data = cdev; /* claim */ sourcesink_bind()
395 ss->iso_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_iso_sink_desc); sourcesink_bind()
396 if (ss->iso_out_ep) { sourcesink_bind()
397 ss->iso_out_ep->driver_data = cdev; /* claim */ sourcesink_bind()
399 ss->iso_in_ep->driver_data = NULL; sourcesink_bind()
400 ss->iso_in_ep = NULL; sourcesink_bind()
471 f->name, ss->in_ep->name, ss->out_ep->name, sourcesink_bind()
472 ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", sourcesink_bind()
473 ss->iso_out_ep ? ss->iso_out_ep->name : "<none>"); sourcesink_bind()
493 static int check_read_data(struct f_sourcesink *ss, struct usb_request *req) check_read_data() argument
497 struct usb_composite_dev *cdev = ss->function.config->cdev; check_read_data()
524 usb_ep_set_halt(ss->out_ep); check_read_data()
551 struct f_sourcesink *ss = ep->driver_data; source_sink_complete() local
555 if (!ss) source_sink_complete()
558 cdev = ss->function.config->cdev; source_sink_complete()
563 if (ep == ss->out_ep) { source_sink_complete()
564 check_read_data(ss, req); source_sink_complete()
576 if (ep == ss->out_ep) source_sink_complete()
577 check_read_data(ss, req); source_sink_complete()
603 static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, source_sink_start_ep() argument
625 ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; source_sink_start_ep()
628 ep = is_in ? ss->in_ep : ss->out_ep; source_sink_start_ep()
645 cdev = ss->function.config->cdev; source_sink_start_ep()
659 static void disable_source_sink(struct f_sourcesink *ss) disable_source_sink() argument
663 cdev = ss->function.config->cdev; disable_source_sink()
664 disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, disable_source_sink()
665 ss->iso_out_ep); disable_source_sink()
666 VDBG(cdev, "%s disabled\n", ss->function.name); disable_source_sink()
670 enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss, enable_source_sink() argument
678 ep = ss->in_ep; enable_source_sink()
679 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); enable_source_sink()
685 ep->driver_data = ss; enable_source_sink()
687 result = source_sink_start_ep(ss, true, false, speed); enable_source_sink()
690 ep = ss->in_ep; enable_source_sink()
697 ep = ss->out_ep; enable_source_sink()
698 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); enable_source_sink()
704 ep->driver_data = ss; enable_source_sink()
706 result = source_sink_start_ep(ss, false, false, speed); enable_source_sink()
709 ep = ss->out_ep; enable_source_sink()
719 ep = ss->iso_in_ep; enable_source_sink()
721 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); enable_source_sink()
727 ep->driver_data = ss; enable_source_sink()
729 result = source_sink_start_ep(ss, true, true, speed); enable_source_sink()
732 ep = ss->iso_in_ep; enable_source_sink()
742 ep = ss->iso_out_ep; enable_source_sink()
744 result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); enable_source_sink()
750 ep->driver_data = ss; enable_source_sink()
752 result = source_sink_start_ep(ss, false, true, speed); enable_source_sink()
760 ss->cur_alt = alt; enable_source_sink()
762 DBG(cdev, "%s enabled, alt intf %d\n", ss->function.name, alt); enable_source_sink()
769 struct f_sourcesink *ss = func_to_ss(f); sourcesink_set_alt() local
772 if (ss->in_ep->driver_data) sourcesink_set_alt()
773 disable_source_sink(ss); sourcesink_set_alt()
774 return enable_source_sink(cdev, ss, alt); sourcesink_set_alt()
779 struct f_sourcesink *ss = func_to_ss(f); sourcesink_get_alt() local
781 return ss->cur_alt; sourcesink_get_alt()
786 struct f_sourcesink *ss = func_to_ss(f); sourcesink_disable() local
788 disable_source_sink(ss); sourcesink_disable()
868 struct f_sourcesink *ss; source_sink_alloc_func() local
871 ss = kzalloc(sizeof(*ss), GFP_KERNEL); source_sink_alloc_func()
872 if (!ss) source_sink_alloc_func()
888 ss->function.name = "source/sink"; source_sink_alloc_func()
889 ss->function.bind = sourcesink_bind; source_sink_alloc_func()
890 ss->function.set_alt = sourcesink_set_alt; source_sink_alloc_func()
891 ss->function.get_alt = sourcesink_get_alt; source_sink_alloc_func()
892 ss->function.disable = sourcesink_disable; source_sink_alloc_func()
893 ss->function.setup = sourcesink_setup; source_sink_alloc_func()
894 ss->function.strings = sourcesink_strings; source_sink_alloc_func()
896 ss->function.free_func = sourcesink_free_func; source_sink_alloc_func()
898 return &ss->function; source_sink_alloc_func()
H A Df_uac2.c71 struct snd_pcm_substream *ss; member in struct:uac2_rtd_params
195 substream = prm->ss; agdev_iso_complete()
289 prm->ss = substream; uac2_pcm_trigger()
293 prm->ss = NULL; uac2_pcm_trigger()
302 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss) uac2_pcm_trigger()
/linux-4.1.27/sound/soc/sh/
H A Dsiu_pcm.c296 static int siu_pcm_hw_params(struct snd_pcm_substream *ss, siu_pcm_hw_params() argument
300 struct device *dev = ss->pcm->card->dev; siu_pcm_hw_params()
305 ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params)); siu_pcm_hw_params()
312 static int siu_pcm_hw_free(struct snd_pcm_substream *ss) siu_pcm_hw_free() argument
315 struct siu_port *port_info = siu_port_info(ss); siu_pcm_hw_free()
316 struct device *dev = ss->pcm->card->dev; siu_pcm_hw_free()
319 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) siu_pcm_hw_free()
326 return snd_pcm_lib_free_pages(ss); siu_pcm_hw_free()
339 static int siu_pcm_open(struct snd_pcm_substream *ss) siu_pcm_open() argument
342 struct snd_soc_pcm_runtime *rtd = ss->private_data; siu_pcm_open()
345 struct siu_port *port_info = siu_port_info(ss); siu_pcm_open()
348 struct device *dev = ss->pcm->card->dev; siu_pcm_open()
357 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) { siu_pcm_open()
376 siu_stream->substream = ss; siu_pcm_open()
381 static int siu_pcm_close(struct snd_pcm_substream *ss) siu_pcm_close() argument
384 struct device *dev = ss->pcm->card->dev; siu_pcm_close()
385 struct siu_port *port_info = siu_port_info(ss); siu_pcm_close()
390 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) siu_pcm_close()
403 static int siu_pcm_prepare(struct snd_pcm_substream *ss) siu_pcm_prepare() argument
406 struct siu_port *port_info = siu_port_info(ss); siu_pcm_prepare()
407 struct device *dev = ss->pcm->card->dev; siu_pcm_prepare()
408 struct snd_pcm_runtime *rt = ss->runtime; siu_pcm_prepare()
412 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) siu_pcm_prepare()
419 siu_stream->buf_bytes = snd_pcm_lib_buffer_bytes(ss); siu_pcm_prepare()
420 siu_stream->period_bytes = snd_pcm_lib_period_bytes(ss); siu_pcm_prepare()
449 static int siu_pcm_trigger(struct snd_pcm_substream *ss, int cmd) siu_pcm_trigger() argument
452 struct device *dev = ss->pcm->card->dev; siu_pcm_trigger()
453 struct siu_port *port_info = siu_port_info(ss); siu_pcm_trigger()
461 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) siu_pcm_trigger()
472 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) siu_pcm_trigger()
491 static snd_pcm_uframes_t siu_pcm_pointer_dma(struct snd_pcm_substream *ss) siu_pcm_pointer_dma() argument
493 struct device *dev = ss->pcm->card->dev; siu_pcm_pointer_dma()
496 struct siu_port *port_info = siu_port_info(ss); siu_pcm_pointer_dma()
497 struct snd_pcm_runtime *rt = ss->runtime; siu_pcm_pointer_dma()
501 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) siu_pcm_pointer_dma()
523 return bytes_to_frames(ss->runtime, ptr); siu_pcm_pointer_dma()
/linux-4.1.27/arch/x86/kvm/
H A Dtss.h25 u32 ss; member in struct:tss_segment_32
54 u16 ss; member in struct:tss_segment_16
H A Demulate.c489 struct desc_struct ss; stack_mask() local
493 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); stack_mask()
494 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ stack_mask()
2267 struct desc_struct *cs, struct desc_struct *ss) setup_syscalls_segments()
2280 set_desc_base(ss, 0); /* flat segment */ setup_syscalls_segments()
2281 set_desc_limit(ss, 0xfffff); /* 4GB limit */ setup_syscalls_segments()
2282 ss->g = 1; /* 4kb granularity */ setup_syscalls_segments()
2283 ss->s = 1; setup_syscalls_segments()
2284 ss->type = 0x03; /* Read/Write, Accessed */ setup_syscalls_segments()
2285 ss->d = 1; /* 32bit stack segment */ setup_syscalls_segments()
2286 ss->dpl = 0; setup_syscalls_segments()
2287 ss->p = 1; setup_syscalls_segments()
2288 ss->l = 0; setup_syscalls_segments()
2289 ss->avl = 0; setup_syscalls_segments()
2350 struct desc_struct cs, ss; em_syscall() local
2364 setup_syscalls_segments(ctxt, &cs, &ss); em_syscall()
2379 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); em_syscall()
2409 struct desc_struct cs, ss; em_sysenter() local
2431 setup_syscalls_segments(ctxt, &cs, &ss); em_sysenter()
2446 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); em_sysenter()
2461 struct desc_struct cs, ss; em_sysexit() local
2471 setup_syscalls_segments(ctxt, &cs, &ss); em_sysexit()
2482 ss.dpl = 3; em_sysexit()
2509 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); em_sysexit()
2592 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); save_state_to_tss16()
2621 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); load_state_from_tss16()
2642 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, load_state_from_tss16()
2711 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); save_state_to_tss32()
2746 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); load_state_from_tss32()
2780 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, load_state_from_tss32()
2266 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, struct desc_struct *cs, struct desc_struct *ss) setup_syscalls_segments() argument
H A Dsvm.c1142 init_seg(&save->ss); init_vmcb()
1395 case VCPU_SREG_SS: return &save->ss; svm_seg()
2336 nested_vmcb->save.ss = vmcb->save.ss; nested_svm_vmexit()
2398 svm->vmcb->save.ss = hsave->save.ss; nested_svm_vmexit()
2523 hsave->save.ss = vmcb->save.ss; nested_svm_vmrun()
2555 svm->vmcb->save.ss = nested_vmcb->save.ss; nested_svm_vmrun()
3439 "ss:", dump_vmcb()
3440 save->ss.selector, save->ss.attrib, dump_vmcb()
3441 save->ss.limit, save->ss.base); dump_vmcb()
/linux-4.1.27/net/netfilter/
H A Dnfnetlink.c109 nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) nfnetlink_find_client() argument
113 if (cb_id >= ss->cb_count) nfnetlink_find_client()
116 return &ss->cb[cb_id]; nfnetlink_find_client()
157 const struct nfnetlink_subsystem *ss; nfnetlink_rcv_msg() local
167 ss = nfnetlink_get_subsys(type); nfnetlink_rcv_msg()
168 if (!ss) { nfnetlink_rcv_msg()
173 ss = nfnetlink_get_subsys(type); nfnetlink_rcv_msg()
174 if (!ss) nfnetlink_rcv_msg()
182 nc = nfnetlink_find_client(type, ss); nfnetlink_rcv_msg()
191 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; nfnetlink_rcv_msg()
196 err = nla_parse(cda, ss->cb[cb_id].attr_count, nfnetlink_rcv_msg()
197 attr, attrlen, ss->cb[cb_id].policy); nfnetlink_rcv_msg()
211 lockdep_is_held(&table[subsys_id].mutex)) != ss || nfnetlink_rcv_msg()
212 nfnetlink_find_client(type, ss) != nc) nfnetlink_rcv_msg()
277 const struct nfnetlink_subsystem *ss; nfnetlink_rcv_batch() local
293 ss = rcu_dereference_protected(table[subsys_id].subsys, nfnetlink_rcv_batch()
295 if (!ss) { nfnetlink_rcv_batch()
300 ss = rcu_dereference_protected(table[subsys_id].subsys, nfnetlink_rcv_batch()
302 if (!ss) nfnetlink_rcv_batch()
311 if (!ss->commit || !ss->abort) { nfnetlink_rcv_batch()
357 nc = nfnetlink_find_client(type, ss); nfnetlink_rcv_batch()
366 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; nfnetlink_rcv_batch()
370 err = nla_parse(cda, ss->cb[cb_id].attr_count, nfnetlink_rcv_batch()
371 attr, attrlen, ss->cb[cb_id].policy); nfnetlink_rcv_batch()
386 ss->abort(oskb); nfnetlink_rcv_batch()
423 ss->commit(oskb); nfnetlink_rcv_batch()
425 ss->abort(oskb); nfnetlink_rcv_batch()
474 const struct nfnetlink_subsystem *ss; nfnetlink_bind() local
483 ss = nfnetlink_get_subsys(type); nfnetlink_bind()
485 if (!ss) nfnetlink_bind()
H A Dxt_osf.c238 if (totlen != f->ss || !xt_osf_ttl(skb, info, f->ttl)) xt_osf_match_packet()
/linux-4.1.27/sound/parisc/
H A Dharmony.c306 snd_harmony_playback_trigger(struct snd_pcm_substream *ss, int cmd) snd_harmony_playback_trigger() argument
308 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_playback_trigger()
342 snd_harmony_capture_trigger(struct snd_pcm_substream *ss, int cmd) snd_harmony_capture_trigger() argument
344 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_capture_trigger()
408 snd_harmony_playback_prepare(struct snd_pcm_substream *ss) snd_harmony_playback_prepare() argument
410 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_playback_prepare()
411 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_playback_prepare()
416 h->pbuf.size = snd_pcm_lib_buffer_bytes(ss); snd_harmony_playback_prepare()
417 h->pbuf.count = snd_pcm_lib_period_bytes(ss); snd_harmony_playback_prepare()
438 snd_harmony_capture_prepare(struct snd_pcm_substream *ss) snd_harmony_capture_prepare() argument
440 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_capture_prepare()
441 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_capture_prepare()
446 h->cbuf.size = snd_pcm_lib_buffer_bytes(ss); snd_harmony_capture_prepare()
447 h->cbuf.count = snd_pcm_lib_period_bytes(ss); snd_harmony_capture_prepare()
468 snd_harmony_playback_pointer(struct snd_pcm_substream *ss) snd_harmony_playback_pointer() argument
470 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_playback_pointer()
471 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_playback_pointer()
497 snd_harmony_capture_pointer(struct snd_pcm_substream *ss) snd_harmony_capture_pointer() argument
499 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_capture_pointer()
500 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_capture_pointer()
526 snd_harmony_playback_open(struct snd_pcm_substream *ss) snd_harmony_playback_open() argument
528 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_playback_open()
529 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_playback_open()
532 h->psubs = ss; snd_harmony_playback_open()
545 snd_harmony_capture_open(struct snd_pcm_substream *ss) snd_harmony_capture_open() argument
547 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_capture_open()
548 struct snd_pcm_runtime *rt = ss->runtime; snd_harmony_capture_open()
551 h->csubs = ss; snd_harmony_capture_open()
564 snd_harmony_playback_close(struct snd_pcm_substream *ss) snd_harmony_playback_close() argument
566 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_playback_close()
572 snd_harmony_capture_close(struct snd_pcm_substream *ss) snd_harmony_capture_close() argument
574 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_capture_close()
580 snd_harmony_hw_params(struct snd_pcm_substream *ss, snd_harmony_hw_params() argument
584 struct snd_harmony *h = snd_pcm_substream_chip(ss); snd_harmony_hw_params()
586 err = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw)); snd_harmony_hw_params()
588 ss->runtime->dma_addr = __pa(ss->runtime->dma_area); snd_harmony_hw_params()
594 snd_harmony_hw_free(struct snd_pcm_substream *ss) snd_harmony_hw_free() argument
596 return snd_pcm_lib_free_pages(ss); snd_harmony_hw_free()
/linux-4.1.27/crypto/
H A Dhmac.c49 int ss = crypto_shash_statesize(parent); hmac_setkey() local
51 char *opad = ipad + ss; hmac_setkey()
52 struct hmac_ctx *ctx = align_ptr(opad + ss, hmac_setkey()
128 int ss = crypto_shash_statesize(parent); hmac_final() local
129 char *opad = crypto_shash_ctx_aligned(parent) + ss; hmac_final()
145 int ss = crypto_shash_statesize(parent); hmac_finup() local
146 char *opad = crypto_shash_ctx_aligned(parent) + ss; hmac_finup()
188 int ss; hmac_create() local
200 ss = salg->statesize; hmac_create()
203 ss < alg->cra_blocksize) hmac_create()
220 ss = ALIGN(ss, alg->cra_alignmask + 1); hmac_create()
222 inst->alg.statesize = ss; hmac_create()
225 ALIGN(ss * 2, crypto_tfm_ctx_alignment()); hmac_create()
/linux-4.1.27/tools/perf/util/
H A Dsymbol-minimal.c249 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, symsrc__init() argument
256 ss->name = strdup(name); symsrc__init()
257 if (!ss->name) symsrc__init()
260 ss->fd = fd; symsrc__init()
261 ss->type = type; symsrc__init()
271 bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused) symsrc__possibly_runtime()
277 bool symsrc__has_symtab(struct symsrc *ss __maybe_unused) symsrc__has_symtab()
282 void symsrc__destroy(struct symsrc *ss) symsrc__destroy() argument
284 zfree(&ss->name); symsrc__destroy()
285 close(ss->fd); symsrc__destroy()
289 struct symsrc *ss __maybe_unused, dso__synthesize_plt_symbols()
335 struct symsrc *ss, dso__load_sym()
343 ret = fd__is_64_bit(ss->fd); dso__load_sym()
347 if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) { dso__load_sym()
334 dso__load_sym(struct dso *dso, struct map *map __maybe_unused, struct symsrc *ss, struct symsrc *runtime_ss __maybe_unused, symbol_filter_t filter __maybe_unused, int kmodule __maybe_unused) dso__load_sym() argument
H A Dsymbol-elf.c215 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map, dso__synthesize_plt_symbols() argument
232 if (!ss->dynsym) dso__synthesize_plt_symbols()
235 elf = ss->elf; dso__synthesize_plt_symbols()
236 ehdr = ss->ehdr; dso__synthesize_plt_symbols()
238 scn_dynsym = ss->dynsym; dso__synthesize_plt_symbols()
239 shdr_dynsym = ss->dynshdr; dso__synthesize_plt_symbols()
240 dynsym_idx = ss->dynsym_idx; dso__synthesize_plt_symbols()
616 bool symsrc__possibly_runtime(struct symsrc *ss) symsrc__possibly_runtime() argument
618 return ss->dynsym || ss->opdsec; symsrc__possibly_runtime()
621 bool symsrc__has_symtab(struct symsrc *ss) symsrc__has_symtab() argument
623 return ss->symtab != NULL; symsrc__has_symtab()
626 void symsrc__destroy(struct symsrc *ss) symsrc__destroy() argument
628 zfree(&ss->name); symsrc__destroy()
629 elf_end(ss->elf); symsrc__destroy()
630 close(ss->fd); symsrc__destroy()
633 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, symsrc__init() argument
686 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64); symsrc__init()
688 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", symsrc__init()
690 if (ss->symshdr.sh_type != SHT_SYMTAB) symsrc__init()
691 ss->symtab = NULL; symsrc__init()
693 ss->dynsym_idx = 0; symsrc__init()
694 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", symsrc__init()
695 &ss->dynsym_idx); symsrc__init()
696 if (ss->dynshdr.sh_type != SHT_DYNSYM) symsrc__init()
697 ss->dynsym = NULL; symsrc__init()
699 ss->opdidx = 0; symsrc__init()
700 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", symsrc__init()
701 &ss->opdidx); symsrc__init()
702 if (ss->opdshdr.sh_type != SHT_PROGBITS) symsrc__init()
703 ss->opdsec = NULL; symsrc__init()
707 ss->adjust_symbols = (ehdr.e_type == ET_EXEC || symsrc__init()
714 ss->adjust_symbols = ehdr.e_type == ET_EXEC || symsrc__init()
718 ss->name = strdup(name); symsrc__init()
719 if (!ss->name) { symsrc__init()
724 ss->elf = elf; symsrc__init()
725 ss->fd = fd; symsrc__init()
726 ss->ehdr = ehdr; symsrc__init()
727 ss->type = type; symsrc__init()
H A Dsymbol.h222 void symsrc__destroy(struct symsrc *ss);
223 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
225 bool symsrc__has_symtab(struct symsrc *ss);
226 bool symsrc__possibly_runtime(struct symsrc *ss);
273 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss,
H A Dsymbol.c1433 struct symsrc *ss = &ss_[ss_pos]; dso__load() local
1446 if (symsrc__init(ss, dso, name, symtab_type) < 0) dso__load()
1449 if (!syms_ss && symsrc__has_symtab(ss)) { dso__load()
1450 syms_ss = ss; dso__load()
1456 if (!runtime_ss && symsrc__possibly_runtime(ss)) { dso__load()
1457 runtime_ss = ss; dso__load()
1467 symsrc__destroy(ss); dso__load()
1525 struct symsrc ss; dso__load_vmlinux() local
1539 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) dso__load_vmlinux()
1542 err = dso__load_sym(dso, map, &ss, &ss, filter, 0); dso__load_vmlinux()
1543 symsrc__destroy(&ss); dso__load_vmlinux()
/linux-4.1.27/kernel/
H A Dcgroup.c191 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
241 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
243 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
250 struct cgroup_subsys *ss) cgroup_css()
252 if (ss) cgroup_css()
253 return rcu_dereference_check(cgrp->subsys[ss->id], cgroup_css()
262 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
265 * as the matching css of the nearest ancestor including self which has @ss
266 * enabled. If @ss is associated with the hierarchy @cgrp is on, this
270 struct cgroup_subsys *ss) cgroup_e_css()
274 if (!ss) cgroup_e_css()
277 if (!(cgrp->root->subsys_mask & (1 << ss->id))) cgroup_e_css()
285 !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id))) cgroup_e_css()
288 return cgroup_css(cgrp, ss); cgroup_e_css()
294 * @ss: the subsystem of interest
296 * Find and get the effective css of @cgrp for @ss. The effective css is
298 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
303 struct cgroup_subsys *ss) cgroup_get_e_css()
310 css = cgroup_css(cgrp, ss); cgroup_get_e_css()
317 css = init_css_set.subsys[ss->id]; cgroup_get_e_css()
343 if (cft->ss) of_css()
344 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); of_css()
405 * @ss: the iteration cursor
406 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
408 #define for_each_subsys(ss, ssid) \
410 (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
513 struct cgroup_subsys *ss; css_set_hash() local
516 for_each_subsys(ss, i) css_set_hash()
526 struct cgroup_subsys *ss; put_css_set_locked() local
535 for_each_subsys(ss, ssid) put_css_set_locked()
664 struct cgroup_subsys *ss; find_existing_css_set() local
674 for_each_subsys(ss, i) { for_each_subsys()
677 * @ss is in this hierarchy, so we want the for_each_subsys()
680 template[i] = cgroup_e_css(cgrp, ss); for_each_subsys()
683 * @ss is not in this hierarchy, so we don't want for_each_subsys()
785 struct cgroup_subsys *ss; find_css_set() local
842 for_each_subsys(ss, ssid) find_css_set()
1008 if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && cgroup_file_name()
1011 cft->ss->name, cft->name); cgroup_file_name()
1076 struct cgroup_subsys *ss; cgroup_calc_child_subsys_mask() local
1087 for_each_subsys(ss, ssid) cgroup_calc_child_subsys_mask()
1089 new_ss_mask |= ss->depends_on; cgroup_calc_child_subsys_mask()
1205 struct cgroup_subsys *ss; cgroup_clear_dir() local
1208 for_each_subsys(ss, i) { for_each_subsys()
1213 list_for_each_entry(cfts, &ss->cfts, node) for_each_subsys()
1220 struct cgroup_subsys *ss; rebind_subsystems() local
1226 for_each_subsys(ss, ssid) { for_each_subsys()
1230 /* if @ss has non-root csses attached to it, can't move */ for_each_subsys()
1231 if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss))) for_each_subsys()
1235 if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root) for_each_subsys()
1266 for_each_subsys(ss, ssid) for_each_subsys()
1268 cgroup_clear_dir(&ss->root->cgrp, 1 << ssid); for_each_subsys()
1270 for_each_subsys(ss, ssid) { for_each_subsys()
1278 src_root = ss->root; for_each_subsys()
1279 css = cgroup_css(&src_root->cgrp, ss); for_each_subsys()
1281 WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss)); for_each_subsys()
1285 ss->root = dst_root; for_each_subsys()
1290 list_move_tail(&cset->e_cset_node[ss->id], for_each_subsys()
1291 &dst_root->cgrp.e_csets[ss->id]); for_each_subsys()
1305 if (ss->bind) for_each_subsys()
1306 ss->bind(css); for_each_subsys()
1317 struct cgroup_subsys *ss; cgroup_show_options() local
1320 for_each_subsys(ss, ssid) cgroup_show_options()
1322 seq_show_option(seq, ss->name, NULL); cgroup_show_options()
1356 struct cgroup_subsys *ss; parse_cgroupfs_options() local
1435 for_each_subsys(ss, i) { for_each_subsys()
1436 if (strcmp(token, ss->name)) for_each_subsys()
1438 if (ss->disabled) for_each_subsys()
1468 for_each_subsys(ss, i)
1469 if (!ss->disabled)
1607 struct cgroup_subsys *ss; init_cgroup_housekeeping() local
1618 for_each_subsys(ss, ssid) init_cgroup_housekeeping()
1742 struct cgroup_subsys *ss; cgroup_mount() local
1780 for_each_subsys(ss, i) { for_each_subsys()
1782 ss->root == &cgrp_dfl_root) for_each_subsys()
1785 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) { for_each_subsys()
1791 cgroup_put(&ss->root->cgrp); for_each_subsys()
2287 if (css->ss->can_attach) { for_each_e_css()
2288 ret = css->ss->can_attach(css, &tset); for_each_e_css()
2317 if (css->ss->attach)
2318 css->ss->attach(css, &tset);
2327 if (css->ss->cancel_attach) for_each_e_css()
2328 css->ss->cancel_attach(css, &tset); for_each_e_css()
2546 struct cgroup_subsys *ss; cgroup_print_ss_mask() local
2550 for_each_subsys(ss, ssid) { for_each_subsys()
2554 seq_printf(seq, "%s", ss->name); for_each_subsys()
2694 struct cgroup_subsys *ss; cgroup_subtree_control_write() local
2706 for_each_subsys(ss, ssid) { for_each_subsys()
2707 if (ss->disabled || strcmp(tok + 1, ss->name) || for_each_subsys()
2708 ((1 << ss->id) & cgrp_dfl_root_inhibit_ss_mask)) for_each_subsys()
2730 for_each_subsys(ss, ssid) { for_each_subsys()
2795 for_each_subsys(ss, ssid) { for_each_subsys()
2802 if (!cgroup_css(child, ss)) cgroup_for_each_live_child()
2826 for_each_subsys(ss, ssid) { for_each_subsys()
2832 ret = create_css(child, ss, cgroup_for_each_live_child()
2858 for_each_subsys(ss, ssid) { for_each_subsys()
2863 struct cgroup_subsys_state *css = cgroup_css(child, ss); cgroup_for_each_live_child()
2869 if (ss->css_reset) cgroup_for_each_live_child()
2870 ss->css_reset(css); cgroup_for_each_live_child()
2881 for_each_subsys(ss, ssid) { for_each_subsys()
2882 struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss); for_each_subsys()
2885 if (!ss->css_e_css_changed || !this_css) for_each_subsys()
2890 ss->css_e_css_changed(css); for_each_subsys()
2903 for_each_subsys(ss, ssid) { for_each_subsys()
2908 struct cgroup_subsys_state *css = cgroup_css(child, ss); cgroup_for_each_live_child()
2946 css = cgroup_css(cgrp, cft->ss); cgroup_file_write()
3141 struct cgroup_subsys *ss = cfts[0].ss; cgroup_apply_cftypes() local
3142 struct cgroup *root = &ss->root->cgrp; cgroup_apply_cftypes()
3149 css_for_each_descendant_pre(css, cgroup_css(root, ss)) { css_for_each_descendant_pre()
3174 cft->ss = NULL; cgroup_exit_cftypes()
3181 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) cgroup_init_cftypes() argument
3188 WARN_ON(cft->ss || cft->kf_ops); cgroup_init_cftypes()
3209 cft->ss = ss; cgroup_init_cftypes()
3219 if (!cfts || !cfts[0].ss) cgroup_rm_cftypes_locked()
3251 * @ss: target cgroup subsystem
3254 * Register @cfts to @ss. Files described by @cfts are created for all
3255 * existing cgroups to which @ss is attached and all future cgroups will
3256 * have them too. This function can be called anytime whether @ss is
3263 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) cgroup_add_cftypes() argument
3267 if (ss->disabled) cgroup_add_cftypes()
3273 ret = cgroup_init_cftypes(ss, cfts); cgroup_add_cftypes()
3279 list_add_tail(&cfts->node, &ss->cfts); cgroup_add_cftypes()
3290 * @ss: target cgroup subsystem
3296 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) cgroup_add_dfl_cftypes() argument
3302 return cgroup_add_cftypes(ss, cfts); cgroup_add_dfl_cftypes()
3307 * @ss: target cgroup subsystem
3313 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) cgroup_add_legacy_cftypes() argument
3323 ss->dfl_cftypes != ss->legacy_cftypes) { cgroup_add_legacy_cftypes()
3328 return cgroup_add_cftypes(ss, cfts); cgroup_add_legacy_cftypes()
3596 if (it->ss) { css_advance_task_iter()
3598 e_cset_node[it->ss->id]); css_advance_task_iter()
3639 it->ss = css->ss; __acquires()
3641 if (it->ss) __acquires()
3642 it->cset_pos = &css->cgroup->e_csets[css->ss->id]; __acquires()
4326 struct cgroup_subsys *ss; cgroup_populate_dir() local
4330 for_each_subsys(ss, i) { for_each_subsys()
4336 list_for_each_entry(cfts, &ss->cfts, node) { for_each_subsys()
4374 struct cgroup_subsys *ss = css->ss; css_free_work_fn() local
4379 if (ss) { css_free_work_fn()
4386 ss->css_free(css); css_free_work_fn()
4387 cgroup_idr_remove(&ss->css_idr, id); css_free_work_fn()
4429 struct cgroup_subsys *ss = css->ss; css_release_work_fn() local
4437 if (ss) { css_release_work_fn()
4439 cgroup_idr_replace(&ss->css_idr, NULL, css->id); css_release_work_fn()
4440 if (ss->css_released) css_release_work_fn()
4441 ss->css_released(css); css_release_work_fn()
4472 struct cgroup_subsys *ss, struct cgroup *cgrp) init_and_link_css()
4480 css->ss = ss; init_and_link_css()
4487 css->parent = cgroup_css(cgroup_parent(cgrp), ss); init_and_link_css()
4491 BUG_ON(cgroup_css(cgrp, ss)); init_and_link_css()
4497 struct cgroup_subsys *ss = css->ss; online_css() local
4502 if (ss->css_online) online_css()
4503 ret = ss->css_online(css); online_css()
4506 rcu_assign_pointer(css->cgroup->subsys[ss->id], css); online_css()
4518 struct cgroup_subsys *ss = css->ss; offline_css() local
4525 if (ss->css_offline) offline_css()
4526 ss->css_offline(css); offline_css()
4529 RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL); offline_css()
4537 * @ss: the subsys of new css
4540 * Create a new css associated with @cgrp - @ss pair. On success, the new
4544 static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss, create_css() argument
4548 struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss); create_css()
4554 css = ss->css_alloc(parent_css); create_css()
4558 init_and_link_css(css, ss, cgrp); create_css()
4564 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT); create_css()
4570 err = cgroup_populate_dir(cgrp, 1 << ss->id); create_css()
4577 cgroup_idr_replace(&ss->css_idr, css, css->id); create_css()
4583 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && create_css()
4586 current->comm, current->pid, ss->name); create_css()
4587 if (!strcmp(ss->name, "memory")) create_css()
4589 ss->warned_broken_hierarchy = true; create_css()
4596 cgroup_clear_dir(css->cgroup, 1 << css->ss->id); create_css()
4598 cgroup_idr_remove(&ss->css_idr, css->id); create_css()
4611 struct cgroup_subsys *ss; cgroup_mkdir() local
4699 for_each_subsys(ss, ssid) { for_each_subsys()
4701 ret = create_css(cgrp, ss, for_each_subsys()
4788 cgroup_clear_dir(css->cgroup, 1 << css->ss->id); kill_css()
4907 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early) cgroup_init_subsys() argument
4911 printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); cgroup_init_subsys()
4915 idr_init(&ss->css_idr); cgroup_init_subsys()
4916 INIT_LIST_HEAD(&ss->cfts); cgroup_init_subsys()
4919 ss->root = &cgrp_dfl_root; cgroup_init_subsys()
4920 css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss)); cgroup_init_subsys()
4923 init_and_link_css(css, ss, &cgrp_dfl_root.cgrp); cgroup_init_subsys()
4935 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL); cgroup_init_subsys()
4943 init_css_set.subsys[ss->id] = css; cgroup_init_subsys()
4945 need_forkexit_callback |= ss->fork || ss->exit; cgroup_init_subsys()
4966 struct cgroup_subsys *ss; cgroup_init_early() local
4974 for_each_subsys(ss, i) { for_each_subsys()
4975 WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, for_each_subsys()
4977 i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, for_each_subsys()
4978 ss->id, ss->name); for_each_subsys()
4982 ss->id = i; for_each_subsys()
4983 ss->name = cgroup_subsys_name[i]; for_each_subsys()
4985 if (ss->early_init) for_each_subsys()
4986 cgroup_init_subsys(ss, true); for_each_subsys()
4999 struct cgroup_subsys *ss; cgroup_init() local
5016 for_each_subsys(ss, ssid) { for_each_subsys()
5017 if (ss->early_init) { for_each_subsys()
5019 init_css_set.subsys[ss->id]; for_each_subsys()
5021 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, for_each_subsys()
5025 cgroup_init_subsys(ss, false); for_each_subsys()
5036 if (ss->disabled) for_each_subsys()
5039 cgrp_dfl_root.subsys_mask |= 1 << ss->id; for_each_subsys()
5041 if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes) for_each_subsys()
5042 ss->dfl_cftypes = ss->legacy_cftypes; for_each_subsys()
5044 if (!ss->dfl_cftypes) for_each_subsys()
5045 cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id; for_each_subsys()
5047 if (ss->dfl_cftypes == ss->legacy_cftypes) { for_each_subsys()
5048 WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes)); for_each_subsys()
5050 WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes)); for_each_subsys()
5051 WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes)); for_each_subsys()
5054 if (ss->bind) for_each_subsys()
5055 ss->bind(init_css_set.subsys[ssid]); for_each_subsys()
5118 struct cgroup_subsys *ss; for_each_root() local
5126 for_each_subsys(ss, ssid) for_each_root()
5128 seq_printf(m, "%s%s", count++ ? "," : "", ss->name); for_each_root()
5155 struct cgroup_subsys *ss; proc_cgroupstats_show() local
5166 for_each_subsys(ss, i) proc_cgroupstats_show()
5168 ss->name, ss->root->hierarchy_id, proc_cgroupstats_show()
5169 atomic_read(&ss->root->nr_cgrps), !ss->disabled); proc_cgroupstats_show()
5213 struct cgroup_subsys *ss; cgroup_post_fork() local
5251 * Call ss->fork(). This must happen after @child is linked on cgroup_post_fork()
5256 for_each_subsys(ss, i) cgroup_post_fork()
5257 if (ss->fork) cgroup_post_fork()
5258 ss->fork(child); cgroup_post_fork()
5283 struct cgroup_subsys *ss; cgroup_exit() local
5305 for_each_subsys(ss, i) { for_each_subsys()
5306 if (ss->exit) { for_each_subsys()
5310 ss->exit(css, old_css, tsk); for_each_subsys()
5388 struct cgroup_subsys *ss; cgroup_disable() local
5396 for_each_subsys(ss, i) { for_each_subsys()
5397 if (!strcmp(token, ss->name)) { for_each_subsys()
5398 ss->disabled = 1; for_each_subsys()
5400 " subsystem\n", ss->name); for_each_subsys()
5420 * @ss: subsystem of interest
5422 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
5427 struct cgroup_subsys *ss) css_tryget_online_from_dir()
5447 css = cgroup_css(cgrp, ss); css_tryget_online_from_dir()
5459 * @ss: cgroup subsys to be looked into
5464 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) css_from_id() argument
5467 return id > 0 ? idr_find(&ss->css_idr, id) : NULL; css_from_id()
249 cgroup_css(struct cgroup *cgrp, struct cgroup_subsys *ss) cgroup_css() argument
269 cgroup_e_css(struct cgroup *cgrp, struct cgroup_subsys *ss) cgroup_e_css() argument
302 cgroup_get_e_css(struct cgroup *cgrp, struct cgroup_subsys *ss) cgroup_get_e_css() argument
4471 init_and_link_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss, struct cgroup *cgrp) init_and_link_css() argument
5426 css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys *ss) css_tryget_online_from_dir() argument
/linux-4.1.27/arch/x86/include/asm/
H A Dsuspend_32.h14 u16 es, fs, gs, ss; member in struct:saved_context
H A Da.out-core.h58 dump->regs.ss = (u16)regs->ss; aout_dump_thread()
H A Dkexec.h72 * CPU does not save ss and sp on stack if execution is already
82 "movw %%ss, %%ax\n\t" crash_fixup_ss_esp()
83 :"=a"(newregs->ss)); crash_fixup_ss_esp()
89 * via panic otherwise just fix up the ss and sp if coming via kernel
108 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); crash_setup_regs()
130 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss)); crash_setup_regs()
H A Dsuspend_64.h22 u16 ds, es, fs, gs, ss; member in struct:saved_context
H A Dptrace.h28 unsigned long ss; member in struct:pt_regs
64 unsigned long ss; member in struct:pt_regs
162 #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
180 * Traps from the kernel do not save sp and ss. regs_get_register()
H A Dlguest.h43 unsigned long ss; member in struct:lguest_regs
H A Dsigcontext.h26 unsigned short ss, __ssh; member in struct:sigcontext
H A Duser32.h42 unsigned short ss, __ss; member in struct:user_regs_struct32
H A Dsvm.h141 struct vmcb_seg ss; member in struct:vmcb_save_area
H A Delf.h137 pr_reg[16] = regs->ss & 0xffff; \
228 (pr_reg)[20] = (regs)->ss; \
H A Dsegment.h171 * ss = STAR.SYSRET_CS+8 (in either case)
H A Duser_32.h94 unsigned long ss; member in struct:user_regs_struct
H A Duser_64.h89 unsigned long ss; member in struct:user_regs_struct
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_sdma.c92 static void sdma_get(struct qib_sdma_state *ss) sdma_get() argument
94 kref_get(&ss->kref); sdma_get()
99 struct qib_sdma_state *ss = sdma_complete() local
102 complete(&ss->comp); sdma_complete()
105 static void sdma_put(struct qib_sdma_state *ss) sdma_put() argument
107 kref_put(&ss->kref, sdma_complete); sdma_put()
110 static void sdma_finalput(struct qib_sdma_state *ss) sdma_finalput() argument
112 sdma_put(ss); sdma_finalput()
113 wait_for_completion(&ss->comp); sdma_finalput()
195 struct qib_sdma_state *ss = &ppd->sdma_state; sdma_hw_start_up() local
198 for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno) sdma_hw_start_up()
206 struct qib_sdma_state *ss = &ppd->sdma_state; sdma_sw_tear_down() local
209 sdma_put(ss); sdma_sw_tear_down()
220 struct qib_sdma_state *ss = &ppd->sdma_state; sdma_set_state() local
221 struct sdma_set_state_action *action = ss->set_state_action; sdma_set_state()
225 ss->previous_state = ss->current_state; sdma_set_state()
226 ss->previous_op = ss->current_op; sdma_set_state()
228 ss->current_state = next_state; sdma_set_state()
243 ss->go_s99_running = 0; sdma_set_state()
246 ss->go_s99_running = 1; sdma_set_state()
248 ss->current_op = op; sdma_set_state()
250 ppd->dd->f_sdma_sendctrl(ppd, ss->current_op); sdma_set_state()
534 struct qib_sge_state *ss, u32 dwords, qib_sdma_verbs_send()
586 sge = &ss->sge; qib_sdma_verbs_send()
621 if (--ss->num_sge) qib_sdma_verbs_send()
622 *sge = *ss->sg_list++; qib_sdma_verbs_send()
691 tx->ss = ss; qib_sdma_verbs_send()
791 struct qib_sdma_state *ss = &ppd->sdma_state; __qib_sdma_process_event() local
793 switch (ss->current_state) { __qib_sdma_process_event()
805 ss->go_s99_running = 1; __qib_sdma_process_event()
842 sdma_set_state(ppd, ss->go_s99_running ? __qib_sdma_process_event()
847 ss->go_s99_running = 1; __qib_sdma_process_event()
856 ss->go_s99_running = 0; __qib_sdma_process_event()
879 ss->go_s99_running = 1; __qib_sdma_process_event()
908 ss->go_s99_running = 1; __qib_sdma_process_event()
920 ss->go_s99_running = 0; __qib_sdma_process_event()
942 ss->go_s99_running = 1; __qib_sdma_process_event()
954 ss->go_s99_running = 0; __qib_sdma_process_event()
976 ss->go_s99_running = 1; __qib_sdma_process_event()
988 ss->go_s99_running = 0; __qib_sdma_process_event()
1022 ss->go_s99_running = 0; __qib_sdma_process_event()
1038 ss->last_event = event; __qib_sdma_process_event()
533 qib_sdma_verbs_send(struct qib_pportdata *ppd, struct qib_sge_state *ss, u32 dwords, struct qib_verbs_txreq *tx) qib_sdma_verbs_send() argument
H A Dqib_verbs.c165 * @ss: the SGE state
169 void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) qib_copy_sge() argument
171 struct qib_sge *sge = &ss->sge; qib_copy_sge()
188 if (--ss->num_sge) qib_copy_sge()
189 *sge = *ss->sg_list++; qib_copy_sge()
208 * @ss: the SGE state
211 void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) qib_skip_sge() argument
213 struct qib_sge *sge = &ss->sge; qib_skip_sge()
229 if (--ss->num_sge) qib_skip_sge()
230 *sge = *ss->sg_list++; qib_skip_sge()
251 static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) qib_count_sge() argument
253 struct qib_sge *sg_list = ss->sg_list; qib_count_sge()
254 struct qib_sge sge = ss->sge; qib_count_sge()
255 u8 num_sge = ss->num_sge; qib_count_sge()
297 static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) qib_copy_from_sge() argument
299 struct qib_sge *sge = &ss->sge; qib_copy_from_sge()
314 if (--ss->num_sge) qib_copy_from_sge()
315 *sge = *ss->sg_list++; qib_copy_from_sge()
735 static void update_sge(struct qib_sge_state *ss, u32 length) update_sge() argument
737 struct qib_sge *sge = &ss->sge; update_sge()
743 if (--ss->num_sge) update_sge()
744 *sge = *ss->sg_list++; update_sge()
792 static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, copy_io() argument
800 u32 len = ss->sge.length; copy_io()
805 if (len > ss->sge.sge_length) copy_io()
806 len = ss->sge.sge_length; copy_io()
809 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); copy_io()
811 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & copy_io()
842 u32 *addr = (u32 *) ss->sge.vaddr; copy_io()
895 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); copy_io()
897 last = ((u32 *) ss->sge.vaddr)[w - 1]; copy_io()
902 qib_pio_copy(piobuf, ss->sge.vaddr, w); copy_io()
907 u32 v = ((u32 *) ss->sge.vaddr)[w]; copy_io()
913 update_sge(ss, len); copy_io()
917 update_sge(ss, length); copy_io()
1136 u32 hdrwords, struct qib_sge_state *ss, u32 len, qib_verbs_send_dma()
1153 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); qib_verbs_send_dma()
1182 ndesc = qib_count_sge(ss, len); qib_verbs_send_dma()
1197 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); qib_verbs_send_dma()
1209 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); qib_verbs_send_dma()
1271 u32 hdrwords, struct qib_sge_state *ss, u32 len, qib_verbs_send_pio()
1325 if (likely(ss->num_sge == 1 && len <= ss->sge.length && qib_verbs_send_pio()
1326 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { qib_verbs_send_pio()
1327 u32 *addr = (u32 *) ss->sge.vaddr; qib_verbs_send_pio()
1330 update_sge(ss, len); qib_verbs_send_pio()
1342 copy_io(piobuf, ss, len, flush_wc); qib_verbs_send_pio()
1372 * @ss: the SGE to send
1379 u32 hdrwords, struct qib_sge_state *ss, u32 len) qib_verbs_send()
1399 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, qib_verbs_send()
1402 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, qib_verbs_send()
1135 qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, u32 hdrwords, struct qib_sge_state *ss, u32 len, u32 plen, u32 dwords) qib_verbs_send_dma() argument
1270 qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, u32 hdrwords, struct qib_sge_state *ss, u32 len, u32 plen, u32 dwords) qib_verbs_send_pio() argument
1378 qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, u32 hdrwords, struct qib_sge_state *ss, u32 len) qib_verbs_send() argument
H A Dqib_ruc.c87 struct qib_sge_state *ss; qib_init_sge() local
91 ss = &qp->r_sge; qib_init_sge()
92 ss->sg_list = qp->r_sg_list; qib_init_sge()
98 if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, qib_init_sge()
104 ss->num_sge = j; qib_init_sge()
105 ss->total_len = qp->r_len; qib_init_sge()
111 struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; qib_init_sge()
115 ss->num_sge = 0; qib_init_sge()
H A Dqib_verbs.h951 u32 hdrwords, struct qib_sge_state *ss, u32 len);
953 void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
956 void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
1066 static inline void qib_put_ss(struct qib_sge_state *ss) qib_put_ss() argument
1068 while (ss->num_sge) { qib_put_ss()
1069 qib_put_mr(ss->sge.mr); qib_put_ss()
1070 if (--ss->num_sge) qib_put_ss()
1071 ss->sge = *ss->sg_list++; qib_put_ss()
H A Dqib_rc.c43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, restart_sge() argument
49 ss->sge = wqe->sg_list[0]; restart_sge()
50 ss->sg_list = wqe->sg_list + 1; restart_sge()
51 ss->num_sge = wqe->wr.num_sge; restart_sge()
52 ss->total_len = wqe->length; restart_sge()
53 qib_skip_sge(ss, len, 0); restart_sge()
235 struct qib_sge_state *ss; qib_make_rc_req() local
332 ss = &qp->s_sge; qib_make_rc_req()
434 ss = NULL; qib_make_rc_req()
477 ss = NULL; qib_make_rc_req()
525 ss = &qp->s_sge; qib_make_rc_req()
566 ss = &qp->s_sge; qib_make_rc_req()
608 ss = NULL; qib_make_rc_req()
626 qp->s_cur_sge = ss; qib_make_rc_req()
/linux-4.1.27/arch/sparc/include/uapi/asm/
H A Dasi.h38 /* ss = Single Size, as = All Sizes; */
42 #define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
43 #define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
51 #define ASI_M_TXTC_TAG 0x0C /* Instruction Cache Tag; rw, ss */
52 #define ASI_M_TXTC_DATA 0x0D /* Instruction Cache Data; rw, ss */
53 #define ASI_M_DATAC_TAG 0x0E /* Data Cache Tag; rw, ss */
54 #define ASI_M_DATAC_DATA 0x0F /* Data Cache Data; rw, ss */
62 #define ASI_M_FLUSH_PAGE 0x10 /* Flush I&D Cache Line (page); wo, ss */
63 #define ASI_M_FLUSH_SEG 0x11 /* Flush I&D Cache Line (seg); wo, ss */
64 #define ASI_M_FLUSH_REGION 0x12 /* Flush I&D Cache Line (region); wo, ss */
65 #define ASI_M_FLUSH_CTX 0x13 /* Flush I&D Cache Line (context); wo, ss */
66 #define ASI_M_FLUSH_USER 0x14 /* Flush I&D Cache Line (user); wo, ss */
72 #define ASI_M_IFLUSH_PAGE 0x18 /* Flush I Cache Line (page); wo, ss */
73 #define ASI_M_IFLUSH_SEG 0x19 /* Flush I Cache Line (seg); wo, ss */
74 #define ASI_M_IFLUSH_REGION 0x1A /* Flush I Cache Line (region); wo, ss */
75 #define ASI_M_IFLUSH_CTX 0x1B /* Flush I Cache Line (context); wo, ss */
76 #define ASI_M_IFLUSH_USER 0x1C /* Flush I Cache Line (user); wo, ss */
97 #define ASI_M_FLUSH_IWHOLE 0x31 /* Flush entire ICACHE; wo, ss */
103 #define ASI_M_DCDR 0x39 /* Data Cache Diagnostics Register rw, ss */
/linux-4.1.27/include/trace/events/
H A Dsunrpc.h532 __field_struct(struct sockaddr_storage, ss)
539 xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
545 (struct sockaddr *)&__entry->ss,
556 __field_struct(struct sockaddr_storage, ss)
562 xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
567 (struct sockaddr *)&__entry->ss,
595 __field_struct(struct sockaddr_storage, ss)
601 xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
607 (struct sockaddr *)&__entry->ss,
/linux-4.1.27/drivers/ide/
H A Dsc1200.c239 struct sc1200_saved_state *ss = host->host_priv; sc1200_suspend() local
247 pci_read_config_dword(dev, 0x40 + r * 4, &ss->regs[r]); sc1200_suspend()
258 struct sc1200_saved_state *ss = host->host_priv; sc1200_resume() local
271 pci_write_config_dword(dev, 0x40 + r * 4, ss->regs[r]); sc1200_resume()
308 struct sc1200_saved_state *ss = NULL; sc1200_init_one() local
312 ss = kmalloc(sizeof(*ss), GFP_KERNEL); sc1200_init_one()
313 if (ss == NULL) sc1200_init_one()
316 rc = ide_pci_init_one(dev, &sc1200_chipset, ss); sc1200_init_one()
318 kfree(ss); sc1200_init_one()
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
H A Deeprom.c362 int16_t ss; ath9k_hw_get_gain_boundaries_pdadcs() local
502 ss = (int16_t)(0 - (minPwrT4[i] / 2)); ath9k_hw_get_gain_boundaries_pdadcs()
504 ss = 0; ath9k_hw_get_gain_boundaries_pdadcs()
506 ss = (int16_t)((pPdGainBoundaries[i - 1] - ath9k_hw_get_gain_boundaries_pdadcs()
513 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { ath9k_hw_get_gain_boundaries_pdadcs()
514 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep); ath9k_hw_get_gain_boundaries_pdadcs()
516 ss++; ath9k_hw_get_gain_boundaries_pdadcs()
525 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { ath9k_hw_get_gain_boundaries_pdadcs()
526 pPDADCValues[k++] = vpdTableI[i][ss++]; ath9k_hw_get_gain_boundaries_pdadcs()
534 while ((ss <= tgtIndex) && ath9k_hw_get_gain_boundaries_pdadcs()
537 (ss - maxIndex + 1) * vpdStep)); ath9k_hw_get_gain_boundaries_pdadcs()
540 ss++; ath9k_hw_get_gain_boundaries_pdadcs()
/linux-4.1.27/include/linux/netfilter/
H A Dnfnetlink.h60 * @ss: The nfnetlink subsystem ID
66 #define nfnl_dereference(p, ss) \
67 rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Datombios_crtc.c442 struct radeon_atom_ss *ss) atombios_crtc_program_ss()
449 /* Don't mess with SS if percentage is 0 or external ss. atombios_crtc_program_ss()
454 if (ss->percentage == 0) atombios_crtc_program_ss()
456 if (ss->type & ATOM_EXTERNAL_SS_MASK) atombios_crtc_program_ss()
477 args.v3.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; atombios_crtc_program_ss()
491 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); atombios_crtc_program_ss()
492 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step); atombios_crtc_program_ss()
495 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); atombios_crtc_program_ss()
496 args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; atombios_crtc_program_ss()
510 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount); atombios_crtc_program_ss()
511 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step); atombios_crtc_program_ss()
514 args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); atombios_crtc_program_ss()
515 args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; atombios_crtc_program_ss()
516 args.v1.ucSpreadSpectrumStep = ss->step; atombios_crtc_program_ss()
517 args.v1.ucSpreadSpectrumDelay = ss->delay; atombios_crtc_program_ss()
518 args.v1.ucSpreadSpectrumRange = ss->range; atombios_crtc_program_ss()
522 if ((enable == ATOM_DISABLE) || (ss->percentage == 0) || atombios_crtc_program_ss()
523 (ss->type & ATOM_EXTERNAL_SS_MASK)) { atombios_crtc_program_ss()
527 args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); atombios_crtc_program_ss()
528 args.lvds_ss_2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; atombios_crtc_program_ss()
529 args.lvds_ss_2.ucSpreadSpectrumStep = ss->step; atombios_crtc_program_ss()
530 args.lvds_ss_2.ucSpreadSpectrumDelay = ss->delay; atombios_crtc_program_ss()
531 args.lvds_ss_2.ucSpreadSpectrumRange = ss->range; atombios_crtc_program_ss()
538 args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); atombios_crtc_program_ss()
539 args.lvds_ss.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK; atombios_crtc_program_ss()
540 args.lvds_ss.ucSpreadSpectrumStepSize_Delay = (ss->step & 3) << 2; atombios_crtc_program_ss()
541 args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4; atombios_crtc_program_ss()
620 /* use recommended ref_div for ss */ atombios_adjust_pll()
623 if (radeon_crtc->ss.refdiv) { atombios_adjust_pll()
625 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; atombios_adjust_pll()
689 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) atombios_adjust_pll()
702 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) atombios_adjust_pll()
826 struct radeon_atom_ss *ss) atombios_crtc_program_pll()
876 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) atombios_crtc_program_pll()
889 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) atombios_crtc_program_pll()
918 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) atombios_crtc_program_pll()
990 radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss, atombios_crtc_prepare_pll()
997 &radeon_crtc->ss, atombios_crtc_prepare_pll()
1002 &radeon_crtc->ss, atombios_crtc_prepare_pll()
1007 &radeon_crtc->ss, atombios_crtc_prepare_pll()
1018 &radeon_crtc->ss, atombios_crtc_prepare_pll()
1024 &radeon_crtc->ss, atombios_crtc_prepare_pll()
1031 &radeon_crtc->ss, atombios_crtc_prepare_pll()
1039 &radeon_crtc->ss, atombios_crtc_prepare_pll()
1104 radeon_crtc->crtc_id, &radeon_crtc->ss); atombios_crtc_set_pll()
1109 radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss); atombios_crtc_set_pll()
1112 /* calculate ss amount and step size */ atombios_crtc_set_pll()
1116 (u32)radeon_crtc->ss.percentage) / atombios_crtc_set_pll()
1117 (100 * (u32)radeon_crtc->ss.percentage_divider); atombios_crtc_set_pll()
1118 radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; atombios_crtc_set_pll()
1119 radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & atombios_crtc_set_pll()
1121 if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) atombios_crtc_set_pll()
1122 step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) / atombios_crtc_set_pll()
1125 step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) / atombios_crtc_set_pll()
1127 radeon_crtc->ss.step = step_size; atombios_crtc_set_pll()
1131 radeon_crtc->crtc_id, &radeon_crtc->ss); atombios_crtc_set_pll()
2006 struct radeon_atom_ss ss; radeon_atom_disp_eng_pll_init() local
2007 bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, radeon_atom_disp_eng_pll_init()
2011 atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss); radeon_atom_disp_eng_pll_init()
2015 atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss); radeon_atom_disp_eng_pll_init()
2129 struct radeon_atom_ss ss; atombios_crtc_disable() local
2174 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); atombios_crtc_disable()
2183 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); atombios_crtc_disable()
438 atombios_crtc_program_ss(struct radeon_device *rdev, int enable, int pll_id, int crtc_id, struct radeon_atom_ss *ss) atombios_crtc_program_ss() argument
814 atombios_crtc_program_pll(struct drm_crtc *crtc, u32 crtc_id, int pll_id, u32 encoder_mode, u32 encoder_id, u32 clock, u32 ref_div, u32 fb_div, u32 frac_fb_div, u32 post_div, int bpc, bool ss_enabled, struct radeon_atom_ss *ss) atombios_crtc_program_pll() argument
H A Drv740_dpm.c160 struct radeon_atom_ss ss; rv740_populate_sclk_value() local
163 if (radeon_atombios_get_asic_ss_info(rdev, &ss, rv740_populate_sclk_value()
165 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); rv740_populate_sclk_value()
166 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); rv740_populate_sclk_value()
247 struct radeon_atom_ss ss; rv740_populate_mclk_value() local
250 if (radeon_atombios_get_asic_ss_info(rdev, &ss, rv740_populate_mclk_value()
254 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); rv740_populate_mclk_value()
255 u32 clk_v = 0x40000 * ss.percentage * rv740_populate_mclk_value()
H A Drv730_dpm.c92 struct radeon_atom_ss ss; rv730_populate_sclk_value() local
95 if (radeon_atombios_get_asic_ss_info(rdev, &ss, rv730_populate_sclk_value()
97 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); rv730_populate_sclk_value()
98 u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000); rv730_populate_sclk_value()
167 struct radeon_atom_ss ss; rv730_populate_mclk_value() local
170 if (radeon_atombios_get_asic_ss_info(rdev, &ss, rv730_populate_mclk_value()
173 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); rv730_populate_mclk_value()
174 u32 clk_v = ss.percentage * dividers.fb_div / (clk_s * 10000); rv730_populate_mclk_value()
H A Drv6xx_dpm.c555 struct radeon_atom_ss ss; rv6xx_program_engine_spread_spectrum() local
565 if (radeon_atombios_get_asic_ss_info(rdev, &ss, rv6xx_program_engine_spread_spectrum()
569 ss.rate, rv6xx_program_engine_spread_spectrum()
570 ss.percentage, rv6xx_program_engine_spread_spectrum()
573 clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate, rv6xx_program_engine_spread_spectrum()
658 struct radeon_atom_ss ss; rv6xx_program_mclk_spread_spectrum_parameters() local
683 if (radeon_atombios_get_asic_ss_info(rdev, &ss, rv6xx_program_mclk_spread_spectrum_parameters()
687 ss.rate, rv6xx_program_mclk_spread_spectrum_parameters()
688 ss.percentage, rv6xx_program_mclk_spread_spectrum_parameters()
691 clk_s = rv6xx_calculate_spread_spectrum_clk_s(ss.rate, rv6xx_program_mclk_spread_spectrum_parameters()
1934 struct radeon_atom_ss ss; rv6xx_dpm_init() local
1981 pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, rv6xx_dpm_init()
1983 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, rv6xx_dpm_init()
1986 /* Disable sclk ss, causes hangs on a lot of systems */ rv6xx_dpm_init()
H A Dradeon_atombios.c1387 struct radeon_atom_ss *ss, radeon_atombios_get_ppll_ss_info()
1398 memset(ss, 0, sizeof(struct radeon_atom_ss)); radeon_atombios_get_ppll_ss_info()
1410 ss->percentage = radeon_atombios_get_ppll_ss_info()
1412 ss->type = ss_assign->ucSpreadSpectrumType; radeon_atombios_get_ppll_ss_info()
1413 ss->step = ss_assign->ucSS_Step; radeon_atombios_get_ppll_ss_info()
1414 ss->delay = ss_assign->ucSS_Delay; radeon_atombios_get_ppll_ss_info()
1415 ss->range = ss_assign->ucSS_Range; radeon_atombios_get_ppll_ss_info()
1416 ss->refdiv = ss_assign->ucRecommendedRef_Div; radeon_atombios_get_ppll_ss_info()
1427 struct radeon_atom_ss *ss, radeon_atombios_get_igp_ss_overrides()
1496 ss->percentage = percentage; radeon_atombios_get_igp_ss_overrides()
1498 ss->rate = rate; radeon_atombios_get_igp_ss_overrides()
1515 struct radeon_atom_ss *ss, radeon_atombios_get_asic_ss_info()
1535 memset(ss, 0, sizeof(struct radeon_atom_ss)); radeon_atombios_get_asic_ss_info()
1551 ss->percentage = radeon_atombios_get_asic_ss_info()
1553 ss->type = ss_assign->v1.ucSpreadSpectrumMode; radeon_atombios_get_asic_ss_info()
1554 ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz); radeon_atombios_get_asic_ss_info()
1555 ss->percentage_divider = 100; radeon_atombios_get_asic_ss_info()
1569 ss->percentage = radeon_atombios_get_asic_ss_info()
1571 ss->type = ss_assign->v2.ucSpreadSpectrumMode; radeon_atombios_get_asic_ss_info()
1572 ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz); radeon_atombios_get_asic_ss_info()
1573 ss->percentage_divider = 100; radeon_atombios_get_asic_ss_info()
1577 ss->rate /= 100; radeon_atombios_get_asic_ss_info()
1591 ss->percentage = radeon_atombios_get_asic_ss_info()
1593 ss->type = ss_assign->v3.ucSpreadSpectrumMode; radeon_atombios_get_asic_ss_info()
1594 ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz); radeon_atombios_get_asic_ss_info()
1597 ss->percentage_divider = 1000; radeon_atombios_get_asic_ss_info()
1599 ss->percentage_divider = 100; radeon_atombios_get_asic_ss_info()
1602 ss->rate /= 100; radeon_atombios_get_asic_ss_info()
1604 radeon_atombios_get_igp_ss_overrides(rdev, ss, id); radeon_atombios_get_asic_ss_info()
1386 radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id) radeon_atombios_get_ppll_ss_info() argument
1426 radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id) radeon_atombios_get_igp_ss_overrides() argument
1514 radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id, u32 clock) radeon_atombios_get_asic_ss_info() argument
H A Dnislands_smc.h300 uint32_t ss[256]; member in struct:SMC_NISLANDS_SPLL_DIV_TABLE
H A Dsislands_smc.h350 uint32_t ss[256]; member in struct:SMC_SISLANDS_SPLL_DIV_TABLE
H A Drv770_dpm.c539 struct radeon_atom_ss ss; rv770_populate_sclk_value() local
542 if (radeon_atombios_get_asic_ss_info(rdev, &ss, rv770_populate_sclk_value()
544 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); rv770_populate_sclk_value()
545 u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000); rv770_populate_sclk_value()
2330 struct radeon_atom_ss ss; rv770_get_engine_memory_ss() local
2332 pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, rv770_get_engine_memory_ss()
2334 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, rv770_get_engine_memory_ss()
H A Dcypress_dpm.c554 struct radeon_atom_ss ss; cypress_populate_mclk_value() local
557 if (radeon_atombios_get_asic_ss_info(rdev, &ss, cypress_populate_mclk_value()
561 u32 clk_s = reference_clock * 5 / (decoded_ref * ss.rate); cypress_populate_mclk_value()
562 u32 clk_v = ss.percentage * cypress_populate_mclk_value()
H A Dradeon_mode.h357 struct radeon_atom_ss ss; member in struct:radeon_crtc
807 struct radeon_atom_ss *ss,
810 struct radeon_atom_ss *ss,
/linux-4.1.27/arch/x86/power/
H A Dcpu.c86 savesegment(ss, ctxt->ss); __save_processor_state()
93 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); __save_processor_state()
205 loadsegment(ss, ctxt->ss); __restore_processor_state()
218 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); __restore_processor_state()
/linux-4.1.27/arch/hexagon/kernel/
H A Dprocess.c75 struct hexagon_switch_stack *ss; copy_thread() local
90 ss = (struct hexagon_switch_stack *) ((unsigned long) childregs - copy_thread()
91 sizeof(*ss)); copy_thread()
92 ss->lr = (unsigned long)ret_from_fork; copy_thread()
93 p->thread.switch_sp = ss; copy_thread()
97 ss->r24 = usp; copy_thread()
98 ss->r25 = arg; copy_thread()
103 ss->r2524 = 0; copy_thread()
/linux-4.1.27/arch/m32r/boot/compressed/
H A Dmisc.c33 char *ss = s; memset() local
36 *ss++ = c; memset()
/linux-4.1.27/sound/pci/
H A Dad1889.c321 snd_ad1889_playback_open(struct snd_pcm_substream *ss) snd_ad1889_playback_open() argument
323 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_playback_open()
324 struct snd_pcm_runtime *rt = ss->runtime; snd_ad1889_playback_open()
326 chip->psubs = ss; snd_ad1889_playback_open()
333 snd_ad1889_capture_open(struct snd_pcm_substream *ss) snd_ad1889_capture_open() argument
335 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_capture_open()
336 struct snd_pcm_runtime *rt = ss->runtime; snd_ad1889_capture_open()
338 chip->csubs = ss; snd_ad1889_capture_open()
345 snd_ad1889_playback_close(struct snd_pcm_substream *ss) snd_ad1889_playback_close() argument
347 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_playback_close()
353 snd_ad1889_capture_close(struct snd_pcm_substream *ss) snd_ad1889_capture_close() argument
355 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_capture_close()
361 snd_ad1889_playback_prepare(struct snd_pcm_substream *ss) snd_ad1889_playback_prepare() argument
363 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_playback_prepare()
364 struct snd_pcm_runtime *rt = ss->runtime; snd_ad1889_playback_prepare()
365 unsigned int size = snd_pcm_lib_buffer_bytes(ss); snd_ad1889_playback_prepare()
366 unsigned int count = snd_pcm_lib_period_bytes(ss); snd_ad1889_playback_prepare()
411 snd_ad1889_capture_prepare(struct snd_pcm_substream *ss) snd_ad1889_capture_prepare() argument
413 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_capture_prepare()
414 struct snd_pcm_runtime *rt = ss->runtime; snd_ad1889_capture_prepare()
415 unsigned int size = snd_pcm_lib_buffer_bytes(ss); snd_ad1889_capture_prepare()
416 unsigned int count = snd_pcm_lib_period_bytes(ss); snd_ad1889_capture_prepare()
462 snd_ad1889_playback_trigger(struct snd_pcm_substream *ss, int cmd) snd_ad1889_playback_trigger() argument
465 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_playback_trigger()
503 snd_ad1889_capture_trigger(struct snd_pcm_substream *ss, int cmd) snd_ad1889_capture_trigger() argument
506 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_capture_trigger()
538 snd_ad1889_playback_pointer(struct snd_pcm_substream *ss) snd_ad1889_playback_pointer() argument
541 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_playback_pointer()
552 return bytes_to_frames(ss->runtime, ptr); snd_ad1889_playback_pointer()
557 snd_ad1889_capture_pointer(struct snd_pcm_substream *ss) snd_ad1889_capture_pointer() argument
560 struct snd_ad1889 *chip = snd_pcm_substream_chip(ss); snd_ad1889_capture_pointer()
571 return bytes_to_frames(ss->runtime, ptr); snd_ad1889_capture_pointer()
/linux-4.1.27/sound/soc/au1x/
H A Ddma.c174 static inline struct alchemy_pcm_ctx *ss_to_ctx(struct snd_pcm_substream *ss) ss_to_ctx() argument
176 struct snd_soc_pcm_runtime *rtd = ss->private_data; ss_to_ctx()
180 static inline struct audio_stream *ss_to_as(struct snd_pcm_substream *ss) ss_to_as() argument
182 struct alchemy_pcm_ctx *ctx = ss_to_ctx(ss); ss_to_as()
183 return &(ctx->stream[ss->stream]); ss_to_as()
268 static snd_pcm_uframes_t alchemy_pcm_pointer(struct snd_pcm_substream *ss) alchemy_pcm_pointer() argument
270 struct audio_stream *stream = ss_to_as(ss); alchemy_pcm_pointer()
277 return bytes_to_frames(ss->runtime, location); alchemy_pcm_pointer()
H A Ddbdma2.c187 static inline struct au1xpsc_audio_dmadata *to_dmadata(struct snd_pcm_substream *ss) to_dmadata() argument
189 struct snd_soc_pcm_runtime *rtd = ss->private_data; to_dmadata()
192 return &pcd[ss->stream]; to_dmadata()
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_verbs.c167 * @ss: the SGE state
171 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length) ipath_copy_sge() argument
173 struct ipath_sge *sge = &ss->sge; ipath_copy_sge()
188 if (--ss->num_sge) ipath_copy_sge()
189 *sge = *ss->sg_list++; ipath_copy_sge()
208 * @ss: the SGE state
211 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length) ipath_skip_sge() argument
213 struct ipath_sge *sge = &ss->sge; ipath_skip_sge()
227 if (--ss->num_sge) ipath_skip_sge()
228 *sge = *ss->sg_list++; ipath_skip_sge()
249 static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length) ipath_count_sge() argument
251 struct ipath_sge *sg_list = ss->sg_list; ipath_count_sge()
252 struct ipath_sge sge = ss->sge; ipath_count_sge()
253 u8 num_sge = ss->num_sge; ipath_count_sge()
295 static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss, ipath_copy_from_sge() argument
298 struct ipath_sge *sge = &ss->sge; ipath_copy_from_sge()
313 if (--ss->num_sge) ipath_copy_from_sge()
314 *sge = *ss->sg_list++; ipath_copy_from_sge()
780 static void update_sge(struct ipath_sge_state *ss, u32 length) update_sge() argument
782 struct ipath_sge *sge = &ss->sge; update_sge()
788 if (--ss->num_sge) update_sge()
789 *sge = *ss->sg_list++; update_sge()
837 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, copy_io() argument
845 u32 len = ss->sge.length; copy_io()
850 if (len > ss->sge.sge_length) copy_io()
851 len = ss->sge.sge_length; copy_io()
854 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); copy_io()
856 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & copy_io()
887 u32 *addr = (u32 *) ss->sge.vaddr; copy_io()
941 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); copy_io()
943 last = ((u32 *) ss->sge.vaddr)[w - 1]; copy_io()
948 __iowrite32_copy(piobuf, ss->sge.vaddr, w); copy_io()
953 u32 v = ((u32 *) ss->sge.vaddr)[w]; copy_io()
959 update_sge(ss, len); copy_io()
963 update_sge(ss, length); copy_io()
1099 struct ipath_sge_state *ss, u32 len, ipath_verbs_send_dma()
1115 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx); ipath_verbs_send_dma()
1158 ndesc = ipath_count_sge(ss, len); ipath_verbs_send_dma()
1171 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx); ipath_verbs_send_dma()
1173 /* save ss and length in dwords */ ipath_verbs_send_dma()
1174 tx->ss = ss; ipath_verbs_send_dma()
1196 ipath_copy_from_sge(piobuf + hdrwords, ss, len); ipath_verbs_send_dma()
1206 tx->ss = NULL; ipath_verbs_send_dma()
1224 struct ipath_sge_state *ss, u32 len, ipath_verbs_send_pio()
1285 if (likely(ss->num_sge == 1 && len <= ss->sge.length && ipath_verbs_send_pio()
1286 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { ipath_verbs_send_pio()
1287 u32 *addr = (u32 *) ss->sge.vaddr; ipath_verbs_send_pio()
1290 update_sge(ss, len); ipath_verbs_send_pio()
1302 copy_io(piobuf, ss, len, flush_wc); ipath_verbs_send_pio()
1319 * @ss: the SGE to send
1323 u32 hdrwords, struct ipath_sge_state *ss, u32 len) ipath_verbs_send()
1343 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, ipath_verbs_send()
1346 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, ipath_verbs_send()
1097 ipath_verbs_send_dma(struct ipath_qp *qp, struct ipath_ib_header *hdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len, u32 plen, u32 dwords) ipath_verbs_send_dma() argument
1222 ipath_verbs_send_pio(struct ipath_qp *qp, struct ipath_ib_header *ibhdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len, u32 plen, u32 dwords) ipath_verbs_send_pio() argument
1322 ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, u32 hdrwords, struct ipath_sge_state *ss, u32 len) ipath_verbs_send() argument
H A Dipath_keys.c191 * @ss: SGE state
199 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, ipath_rkey_ok() argument
204 struct ipath_sge *sge = &ss->sge; ipath_rkey_ok()
226 ss->sg_list = NULL; ipath_rkey_ok()
227 ss->num_sge = 1; ipath_rkey_ok()
263 ss->sg_list = NULL; ipath_rkey_ok()
264 ss->num_sge = 1; ipath_rkey_ok()
H A Dipath_rc.c42 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, restart_sge() argument
48 ss->sge = wqe->sg_list[0]; restart_sge()
49 ss->sg_list = wqe->sg_list + 1; restart_sge()
50 ss->num_sge = wqe->wr.num_sge; restart_sge()
51 ipath_skip_sge(ss, len); restart_sge()
217 struct ipath_sge_state *ss; ipath_make_rc_req() local
308 ss = &qp->s_sge; ipath_make_rc_req()
410 ss = NULL; ipath_make_rc_req()
452 ss = NULL; ipath_make_rc_req()
504 ss = &qp->s_sge; ipath_make_rc_req()
540 ss = &qp->s_sge; ipath_make_rc_req()
578 ss = NULL; ipath_make_rc_req()
589 qp->s_cur_sge = ss; ipath_make_rc_req()
H A Dipath_ruc.c124 u32 *lengthp, struct ipath_sge_state *ss) ipath_init_sge()
134 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, ipath_init_sge()
140 ss->num_sge = j; ipath_init_sge()
123 ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, u32 *lengthp, struct ipath_sge_state *ss) ipath_init_sge() argument
H A Dipath_sdma.c665 struct ipath_sge_state *ss, u32 dwords, ipath_sdma_verbs_send()
729 sge = &ss->sge; ipath_sdma_verbs_send()
763 if (--ss->num_sge) ipath_sdma_verbs_send()
764 *sge = *ss->sg_list++; ipath_sdma_verbs_send()
664 ipath_sdma_verbs_send(struct ipath_devdata *dd, struct ipath_sge_state *ss, u32 dwords, struct ipath_verbs_txreq *tx) ipath_sdma_verbs_send() argument
H A Dipath_verbs.h650 struct ipath_sge_state *ss; member in struct:ipath_verbs_txreq
759 u32 hdrwords, struct ipath_sge_state *ss, u32 len);
761 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
763 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
788 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
858 u32 *lengthp, struct ipath_sge_state *ss);
/linux-4.1.27/tools/testing/selftests/x86/
H A Dsigreturn.c71 * want to fish out their ss values, so this trampoline copies ss to eax
78 "mov %ss,%eax\n\t"
278 unsigned short cs, gs, fs, ss; member in struct:selectors
284 return &sels->ss; ssptr()
347 unsigned short ss; sigtrap() local
348 asm ("mov %%ss,%0" : "=r" (ss)); sigtrap()
353 printf("[FAIL]\tSIGTRAP: ss = %hx, frame ss = %hx, ax = %llx\n", sigtrap()
354 ss, *ssptr(ctx), (unsigned long long)asm_ss); sigtrap()
437 asm volatile ("mov %%ss,%0" : "=r" (sig_ss)); test_valid_sigreturn()
490 if (req_sels->ss != res_sels->ss) { test_valid_sigreturn()
492 req_sels->ss, res_sels->ss); test_valid_sigreturn()
530 static int test_bad_iret(int cs_bits, unsigned short ss, int force_cs) test_bad_iret() argument
537 sig_ss = ss; test_bad_iret()
590 asm volatile ("mov %%ss,%0" : "=r" (my_ss)); main()
H A Dentry_from_vm86.c101 v86.regs.ss = load_addr / 16; main()
/linux-4.1.27/arch/x86/math-emu/
H A Dget_address.c49 offsetof(struct pt_regs, ss),
62 offsetof(struct pt_regs, ss),
72 u_char ss, index, base; sib() local
80 ss = base >> 6; sib()
91 /* A non-zero ss is illegal */ sib()
92 if (ss) sib()
95 offset += (REG_(index)) << ss; sib()
H A Dfpu_system.h53 #define FPU_SS (*(unsigned short *) &(FPU_info->regs->ss))
/linux-4.1.27/arch/x86/xen/
H A Dxen-asm_32.S105 movl %ss:xen_vcpu, %eax
118 setz %ss:XEN_vcpu_info_mask(%eax)
122 cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
130 movb $1, %ss:XEN_vcpu_info_mask(%eax)
166 * ss : (ss/esp may be present if we came from usermode)
175 * cs } (no ss/esp because we're nested
H A Dxen-asm_64.S32 * ss
110 * ss
/linux-4.1.27/drivers/usb/storage/
H A Donetouch.c46 static int onetouch_connect_input(struct us_data *ss);
176 static int onetouch_connect_input(struct us_data *ss) onetouch_connect_input() argument
178 struct usb_device *udev = ss->pusb_dev; onetouch_connect_input()
186 interface = ss->pusb_intf->cur_altsetting; onetouch_connect_input()
253 ss->extra_destructor = onetouch_release_input; onetouch_connect_input()
254 ss->extra = onetouch; onetouch_connect_input()
256 ss->suspend_resume_hook = usb_onetouch_pm_hook; onetouch_connect_input()
/linux-4.1.27/arch/s390/lib/
H A Duaccess.c28 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" copy_from_user_mvcos()
39 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" copy_from_user_mvcos()
121 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" copy_to_user_mvcos()
132 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" copy_to_user_mvcos()
195 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" copy_in_user_mvcos()
256 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" clear_user_mvcos()
266 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" clear_user_mvcos()
/linux-4.1.27/arch/x86/platform/efi/
H A Defi_thunk_64.S74 movl %ss, %eax
98 movl %ebx, %ss
135 movl %eax, %ss
/linux-4.1.27/drivers/video/fbdev/
H A Dleo.c211 struct leo_ld_ss0 __iomem *ss = par->ld_ss0; leo_switch_from_graph() local
221 sbus_writel(0xffffffff, &ss->wid); leo_switch_from_graph()
222 sbus_writel(0xffff, &ss->wmask); leo_switch_from_graph()
223 sbus_writel(0, &ss->vclipmin); leo_switch_from_graph()
224 sbus_writel(par->extent, &ss->vclipmax); leo_switch_from_graph()
225 sbus_writel(0, &ss->fg); leo_switch_from_graph()
226 sbus_writel(0xff000000, &ss->planemask); leo_switch_from_graph()
227 sbus_writel(0x310850, &ss->rop); leo_switch_from_graph()
228 sbus_writel(0, &ss->widclip); leo_switch_from_graph()
239 sbus_writel(1, &ss->wid); leo_switch_from_graph()
240 sbus_writel(0x00ffffff, &ss->planemask); leo_switch_from_graph()
241 sbus_writel(0x310b90, &ss->rop); leo_switch_from_graph()
/linux-4.1.27/drivers/pcmcia/
H A Dbcm63xx_pcmcia.h6 #include <pcmcia/ss.h>
H A Drsrc_mgr.c19 #include <pcmcia/ss.h>
H A Dpxa2xx_stargate2.c24 #include <pcmcia/ss.h>
H A Dcardbus.c27 #include <pcmcia/ss.h>
H A Dpxa2xx_mainstone.c22 #include <pcmcia/ss.h>
H A Dpxa2xx_viper.c24 #include <pcmcia/ss.h>
H A Drsrc_iodyn.c19 #include <pcmcia/ss.h>
H A Dsa1100_generic.c38 #include <pcmcia/ss.h>
H A Dsa1111_generic.c17 #include <pcmcia/ss.h>
H A Dsoc_common.h15 #include <pcmcia/ss.h>
H A Dsocket_sysfs.c28 #include <pcmcia/ss.h>
H A Ddb1xxx_ss.c34 #include <pcmcia/ss.h>
441 printk(KERN_INFO "db1xxx-ss: unknown board %d!\n", bid); db1x_pcmcia_socket_probe()
H A Dbfin_cf_pcmcia.c40 #include <pcmcia/ss.h>
H A Delectra_cf.c38 #include <pcmcia/ss.h>
H A Domap_cf.c21 #include <pcmcia/ss.h>
H A Dpxa2xx_base.c35 #include <pcmcia/ss.h>
H A Dvrc4173_cardu.h35 #include <pcmcia/ss.h>
H A Dxxs1500_ss.c20 #include <pcmcia/ss.h>
H A Dat91_cf.c27 #include <pcmcia/ss.h>
H A Di82092.c18 #include <pcmcia/ss.h>
H A Dm32r_pcc.c29 #include <pcmcia/ss.h>
H A Dpcmcia_cis.c24 #include <pcmcia/ss.h>
H A Dvrc4171_card.c30 #include <pcmcia/ss.h>
H A Dvrc4173_cardu.c39 #include <pcmcia/ss.h>
/linux-4.1.27/arch/x86/purgatory/
H A Dsetup-x86_64.S26 movl %eax, %ss
H A Dentry64.S27 movl %eax, %ss
/linux-4.1.27/include/uapi/video/
H A Duvesafb.h18 __u16 ss; member in struct:v86_regs
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dcapa.c296 struct scatterlist ss; capa_encrypt_id() local
327 sg_init_table(&ss, 1); capa_encrypt_id()
328 sg_set_page(&ss, virt_to_page(s), 16, capa_encrypt_id()
333 rc = crypto_blkcipher_encrypt(&desc, &sd, &ss, 16); capa_encrypt_id()
349 struct scatterlist ss; capa_decrypt_id() local
380 sg_init_table(&ss, 1); capa_decrypt_id()
381 sg_set_page(&ss, virt_to_page(s), 16, capa_decrypt_id()
387 rc = crypto_blkcipher_decrypt(&desc, &sd, &ss, 16); capa_decrypt_id()
/linux-4.1.27/arch/sh/boot/compressed/
H A Dmisc.c81 char *ss = (char*)s; memset() local
83 for (i=0;i<n;i++) ss[i] = c; memset()
/linux-4.1.27/arch/x86/kernel/
H A Ddumpstack.c257 unsigned short ss; __die() local
284 ss = regs->ss & 0xffff; __die()
287 savesegment(ss, ss); __die()
291 printk(" SS:ESP %04x:%08lx\n", ss, sp); __die()
H A Dprocess_32.c74 unsigned short ss, gs; __show_regs() local
78 ss = regs->ss & 0xffff; __show_regs()
82 savesegment(ss, ss); __show_regs()
96 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); __show_regs()
204 regs->ss = __USER_DS; start_thread()
H A Dperf_regs.c30 PT_REGS_OFFSET(PERF_REG_X86_SS, ss),
151 regs_user_copy->ss = user_regs->ss; perf_get_regs_user()
H A Ddoublefault.c64 .ss = __KERNEL_DS,
H A Dasm-offsets_32.c60 OFFSET(PT_OLDSS, pt_regs, ss); foo()
H A Dsignal.c98 COPY_SEG_CPL3(ss); restore_sigcontext()
160 put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); setup_sigcontext()
219 (regs->ss & 0xffff) != __USER_DS && get_sigframe()
331 regs->ss = __USER_DS; __setup_frame()
397 regs->ss = __USER_DS; __setup_rt_frame()
527 regs->ss = __USER_DS; x32_setup_rt_frame()
H A Dptrace.c93 REG_OFFSET_NAME(ss),
172 * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
174 * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
233 * For %cs and %ss we cannot permit a null selector. set_segment_reg()
236 * we will never get back to user mode with invalid %cs or %ss set_segment_reg()
243 case offsetof(struct user_regs_struct, ss): set_segment_reg()
306 case offsetof(struct user_regs_struct, ss):
369 case offsetof(struct user_regs_struct,ss): set_segment_reg()
372 task_pt_regs(task)->ss = value; set_segment_reg()
422 case offsetof(struct user_regs_struct, ss): putreg()
464 case offsetof(struct user_regs_struct, ss): getreg()
961 SEG32(ss); putreg32()
1032 R32(ss, ss); getreg32()
H A Dhead_32.S113 movl %eax,%ss
311 movl %eax,%ss
458 movl %eax,%ss # after changing gdt.
562 cmpl $2,%ss:early_recursion_flag
564 incl %ss:early_recursion_flag
612 decl %ss:early_recursion_flag
H A Dprocess_64.c68 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, __show_regs()
180 childregs->ss = __KERNEL_DS; copy_thread()
241 regs->ss = _ss; start_thread_common()
457 savesegment(ss, ss_sel); __switch_to()
459 loadsegment(ss, __KERNEL_DS); __switch_to()
H A Dkgdb.c67 { "ss", 4, offsetof(struct pt_regs, ss) },
90 { "ss", 4, offsetof(struct pt_regs, ss) },
H A Duprobes.c83 * 07,17,1f - pop es/ss/ds
88 * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
129 * 16,17 - formerly push/pop ss
254 * 07, 17, 1f - pop es, pop ss, pop ds
255 * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes --
H A Dvm86_32.c58 * after a "mov ss,xx" to make stack handling atomic even without
559 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); handle_vm86_trap()
591 ssp = (unsigned char __user *) (regs->pt.ss << 4); handle_vm86_fault()
H A Dentry_32.S389 /*CFI_REL_OFFSET ss, 0*/
712 movl %ss, %eax
1302 movl %ss, %eax
1352 pushl_cfi %ss
H A Dmachine_kexec_32.c65 "\tmovl %%eax,%%ss\n" load_segments()
H A Dentry_64.S133 /*CFI_REL_OFFSET ss, 4*8+\offset*/
174 * then loads new ss, cs, and rip from previously programmed MSRs.
222 pushq_cfi $__USER_DS /* pt_regs->ss */
296 * cs and ss are loaded from MSRs.
299 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
1468 pushq 5*8(%rdx) /* pt_regs->ss */
H A Dhead_64.S240 movl %eax,%ss
/linux-4.1.27/arch/ia64/mm/
H A Dtlb.c108 static inline void spinaphore_init(struct spinaphore *ss, int val) spinaphore_init() argument
110 ss->ticket = 0; spinaphore_init()
111 ss->serve = val; spinaphore_init()
114 static inline void down_spin(struct spinaphore *ss) down_spin() argument
116 unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve; down_spin()
118 if (time_before(t, ss->serve)) down_spin()
124 asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); down_spin()
131 static inline void up_spin(struct spinaphore *ss) up_spin() argument
133 ia64_fetchadd(1, &ss->serve, rel); up_spin()
/linux-4.1.27/fs/cifs/
H A Ddns_resolve.c48 struct sockaddr_storage ss; dns_resolve_server_name_to_ip() local
75 rc = cifs_convert_address((struct sockaddr *)&ss, hostname, len); dns_resolve_server_name_to_ip()
/linux-4.1.27/arch/x86/include/uapi/asm/
H A Dptrace.h74 unsigned long ss; member in struct:pt_regs
H A Dsigcontext32.h71 unsigned short ss, __ssh; member in struct:sigcontext_ia32
H A Dvm86.h82 unsigned short ss, __ssh; member in struct:vm86_regs
H A Dkvm.h145 struct kvm_segment cs, ds, es, fs, gs, ss; member in struct:kvm_sregs
H A Dsigcontext.h124 unsigned short ss, __ssh; member in struct:sigcontext
/linux-4.1.27/arch/x86/kernel/acpi/
H A Dwakeup_32.S14 movw %ax, %ss
H A Dwakeup_64.S22 movw %ax, %ss
/linux-4.1.27/arch/x86/platform/olpc/
H A Dxo1-wakeup.S39 movw %ax, %ss
/linux-4.1.27/include/sound/
H A Dsoc-dai.h282 const struct snd_pcm_substream *ss) snd_soc_dai_get_dma_data()
284 return (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) ? snd_soc_dai_get_dma_data()
289 const struct snd_pcm_substream *ss, snd_soc_dai_set_dma_data()
292 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_soc_dai_set_dma_data()
281 snd_soc_dai_get_dma_data(const struct snd_soc_dai *dai, const struct snd_pcm_substream *ss) snd_soc_dai_get_dma_data() argument
288 snd_soc_dai_set_dma_data(struct snd_soc_dai *dai, const struct snd_pcm_substream *ss, void *data) snd_soc_dai_set_dma_data() argument
/linux-4.1.27/arch/x86/boot/
H A Dheader.S58 movw %ax, %ss
462 # Apparently some ancient versions of LILO invoked the kernel with %ss != %ds,
464 # pointer if %ss is invalid. Otherwise leave it alone, LOADLIN sets up the
467 movw %ss, %dx
468 cmpw %ax, %dx # %ds == %ss?
472 # Invalid %ss, make up a new stack
485 3: movw %ax, %ss
/linux-4.1.27/arch/sparc/kernel/
H A Dsignal32.c47 struct sparc_stackf32 ss; member in struct:signal_frame32
59 struct sparc_stackf32 ss; member in struct:rt_signal_frame32
495 err |= __put_user(rp->locals[i], &sf->ss.locals[i]); setup_frame32()
497 err |= __put_user(rp->ins[i], &sf->ss.ins[i]); setup_frame32()
498 err |= __put_user(rp->ins[6], &sf->ss.fp); setup_frame32()
499 err |= __put_user(rp->ins[7], &sf->ss.callers_pc); setup_frame32()
626 err |= __put_user(rp->locals[i], &sf->ss.locals[i]); setup_rt_frame32()
628 err |= __put_user(rp->ins[i], &sf->ss.ins[i]); setup_rt_frame32()
629 err |= __put_user(rp->ins[6], &sf->ss.fp); setup_rt_frame32()
630 err |= __put_user(rp->ins[7], &sf->ss.callers_pc); setup_rt_frame32()
H A Dsignal_32.c38 struct sparc_stackf ss; member in struct:signal_frame
48 struct sparc_stackf ss; member in struct:rt_signal_frame
/linux-4.1.27/net/ceph/
H A Dmessenger.c195 const char *ceph_pr_addr(const struct sockaddr_storage *ss) ceph_pr_addr() argument
199 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; ceph_pr_addr()
200 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; ceph_pr_addr()
205 switch (ss->ss_family) { ceph_pr_addr()
218 ss->ss_family); ceph_pr_addr()
1765 static bool addr_is_blank(struct sockaddr_storage *ss) addr_is_blank() argument
1767 switch (ss->ss_family) { addr_is_blank()
1769 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; addr_is_blank()
1772 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && addr_is_blank()
1773 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && addr_is_blank()
1774 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && addr_is_blank()
1775 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; addr_is_blank()
1780 static int addr_port(struct sockaddr_storage *ss) addr_port() argument
1782 switch (ss->ss_family) { addr_port()
1784 return ntohs(((struct sockaddr_in *)ss)->sin_port); addr_port()
1786 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); addr_port()
1791 static void addr_set_port(struct sockaddr_storage *ss, int p) addr_set_port() argument
1793 switch (ss->ss_family) { addr_set_port()
1795 ((struct sockaddr_in *)ss)->sin_port = htons(p); addr_set_port()
1798 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); addr_set_port()
1806 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, ceph_pton() argument
1809 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; ceph_pton()
1810 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; ceph_pton()
1812 memset(ss, 0, sizeof(*ss)); ceph_pton()
1815 ss->ss_family = AF_INET; ceph_pton()
1820 ss->ss_family = AF_INET6; ceph_pton()
1832 struct sockaddr_storage *ss, char delim, const char **ipend) ceph_dns_resolve_name()
1861 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); ceph_dns_resolve_name()
1870 ret, ret ? "failed" : ceph_pr_addr(ss)); ceph_dns_resolve_name()
1876 struct sockaddr_storage *ss, char delim, const char **ipend) ceph_dns_resolve_name()
1887 struct sockaddr_storage *ss, char delim, const char **ipend) ceph_parse_server_name()
1891 ret = ceph_pton(name, namelen, ss, delim, ipend); ceph_parse_server_name()
1893 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); ceph_parse_server_name()
1912 struct sockaddr_storage *ss = &addr[i].in_addr; ceph_parse_ips() local
1921 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); ceph_parse_ips()
1952 addr_set_port(ss, port); ceph_parse_ips()
1954 dout("parse_ips got %s\n", ceph_pr_addr(ss)); ceph_parse_ips()
1831 ceph_dns_resolve_name(const char *name, size_t namelen, struct sockaddr_storage *ss, char delim, const char **ipend) ceph_dns_resolve_name() argument
1875 ceph_dns_resolve_name(const char *name, size_t namelen, struct sockaddr_storage *ss, char delim, const char **ipend) ceph_dns_resolve_name() argument
1886 ceph_parse_server_name(const char *name, size_t namelen, struct sockaddr_storage *ss, char delim, const char **ipend) ceph_parse_server_name() argument
/linux-4.1.27/arch/x86/ia32/
H A Dia32entry.S91 * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
133 pushq_cfi $__USER32_DS /* pt_regs->ss */
223 * ss = __USER_DS
318 * then loads new ss, cs, and rip from previously programmed MSRs.
366 pushq_cfi $__USER32_DS /* pt_regs->ss */
427 * cs and ss are loaded from MSRs.
431 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
502 /*CFI_REL_OFFSET ss,4*8 */
606 /* CFI_REL_OFFSET ss,SS*/
H A Dia32_signal.c190 COPY_SEG_CPL3(ss); ia32_restore_sigcontext()
292 put_user_ex(regs->ss, (unsigned int __user *)&sc->ss); ia32_setup_sigcontext()
320 else if ((regs->ss & 0xffff) != __USER32_DS && get_sigframe()
415 regs->ss = __USER32_DS; ia32_setup_frame()
494 regs->ss = __USER32_DS; ia32_setup_rt_frame()
H A Dia32_aout.c98 dump->regs.ss = regs->ss; dump_thread32()
391 (regs)->ss = __USER32_DS; load_aout_binary()
/linux-4.1.27/tools/usb/usbip/src/
H A Dusbipd.c297 struct sockaddr_storage ss; do_accept() local
298 socklen_t len = sizeof(ss); do_accept()
302 memset(&ss, 0, sizeof(ss)); do_accept()
304 connfd = accept(listenfd, (struct sockaddr *)&ss, &len); do_accept()
310 rc = getnameinfo((struct sockaddr *)&ss, len, host, sizeof(host), do_accept()
/linux-4.1.27/arch/x86/include/asm/xen/
H A Dinterface_64.h83 uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss; member in struct:iret_context
122 uint16_t ss, _pad2[3]; member in struct:cpu_user_regs
H A Dinterface_32.h64 uint16_t ss, _pad1; member in struct:cpu_user_regs
H A Dhypercall.h251 HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp) HYPERVISOR_stack_switch() argument
253 return _hypercall2(int, stack_switch, ss, esp); HYPERVISOR_stack_switch()
594 unsigned long ss, unsigned long esp) MULTI_stack_switch()
597 mcl->args[0] = ss; MULTI_stack_switch()
593 MULTI_stack_switch(struct multicall_entry *mcl, unsigned long ss, unsigned long esp) MULTI_stack_switch() argument
/linux-4.1.27/arch/x86/realmode/rm/
H A Dreboot.S63 movl %ecx, %ss
115 movw %ax, %ss
H A Dtrampoline_64.S48 mov %ax, %ss
88 movl %edx, %ss
H A Dwakeup_asm.S58 movw %cx, %ss
68 movw %ax, %ss
/linux-4.1.27/include/uapi/sound/
H A Dhdspm.h41 ss, enumerator in enum:hdspm_speed
57 uint8_t speed; /* enum {ss, ds, qs} */
/linux-4.1.27/arch/nios2/boot/compressed/
H A Dmisc.c110 char *ss = (char *)s; memset() local
113 ss[i] = c; memset()
/linux-4.1.27/arch/um/os-Linux/
H A Dsignal.c310 stack_t ss; os_is_signal_stack() local
311 sigaltstack(NULL, &ss); os_is_signal_stack()
313 return ss.ss_flags & SS_ONSTACK; os_is_signal_stack()
/linux-4.1.27/drivers/usb/gadget/
H A Dconfig.c165 struct usb_descriptor_header **ss) usb_assign_descriptors()
179 if (ss && gadget_is_superspeed(g)) { usb_assign_descriptors()
180 f->ss_descriptors = usb_copy_descriptors(ss); usb_assign_descriptors()
162 usb_assign_descriptors(struct usb_function *f, struct usb_descriptor_header **fs, struct usb_descriptor_header **hs, struct usb_descriptor_header **ss) usb_assign_descriptors() argument
/linux-4.1.27/scripts/dtc/
H A Dutil.c76 const char *ss, *se; util_is_printable_string() local
89 ss = s; util_is_printable_string()
94 if (*s != '\0' || s == ss) util_is_printable_string()
/linux-4.1.27/drivers/lguest/
H A Dinterrupts_and_traps.c71 u32 eflags, ss, irq_enable; push_guest_interrupt_stack() local
79 if ((cpu->regs->ss&0x3) != GUEST_PL) { push_guest_interrupt_stack()
85 ss = cpu->ss1; push_guest_interrupt_stack()
94 push_guest_stack(cpu, &gstack, cpu->regs->ss); push_guest_interrupt_stack()
99 ss = cpu->regs->ss; push_guest_interrupt_stack()
129 cpu->regs->ss = ss; push_guest_interrupt_stack()
144 if ((cpu->regs->ss&0x3) != GUEST_PL) guest_run_interrupt()
145 cpu->regs->ss = cpu->esp1; guest_run_interrupt()
/linux-4.1.27/drivers/net/wimax/i2400m/
H A Dcontrol.c308 * @ss: validated System State TLV
312 const struct i2400m_tlv_system_state *ss) i2400m_report_tlv_system_state()
316 enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state); i2400m_report_tlv_system_state()
318 d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state); i2400m_report_tlv_system_state()
367 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", i2400m_report_tlv_system_state()
368 i2400m, ss, i2400m_state); i2400m_report_tlv_system_state()
438 const struct i2400m_tlv_system_state *ss; i2400m_report_state_parse_tlv() local
441 if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) { i2400m_report_state_parse_tlv()
442 ss = container_of(tlv, typeof(*ss), hdr); i2400m_report_state_parse_tlv()
446 le32_to_cpu(ss->state)); i2400m_report_state_parse_tlv()
447 i2400m_report_tlv_system_state(i2400m, ss); i2400m_report_state_parse_tlv()
311 i2400m_report_tlv_system_state(struct i2400m *i2400m, const struct i2400m_tlv_system_state *ss) i2400m_report_tlv_system_state() argument
/linux-4.1.27/include/linux/
H A Dcgroup.h241 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
242 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
348 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
470 struct cgroup_subsys *ss; member in struct:css_task_iter
489 struct cgroup_subsys *ss);
491 struct cgroup_subsys *ss);
H A Dcgroup-defs.h90 struct cgroup_subsys *ss; member in struct:cgroup_subsys_state
203 /* self css with NULL ->ss, points back to this cgroup */
344 struct cgroup_subsys *ss; /* NULL for cgroup core files */ member in struct:cftype
345 struct list_head node; /* anchored at ss->cfts */
/linux-4.1.27/arch/x86/lguest/
H A Dhead_32.S185 * Note the %ss: segment prefix here. Normal data accesses use the
187 * we're returning to (such as userspace): we can't trust it. The %ss:
190 popl %ss:lguest_data+LGUEST_DATA_irq_enabled
/linux-4.1.27/drivers/usb/gadget/legacy/
H A Dzero.c260 MODULE_PARM_DESC(isoc_maxpacket, "0 - 1023 (fs), 0 - 1024 (hs/ss)");
263 MODULE_PARM_DESC(isoc_mult, "0 - 2 (hs/ss only)");
267 MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
H A Dwebcam.c31 MODULE_PARM_DESC(streaming_maxpacket, "1 - 1023 (FS), 1 - 3072 (hs/ss)");
35 MODULE_PARM_DESC(streaming_maxburst, "0 - 15 (ss only)");
/linux-4.1.27/kernel/debug/kdb/
H A Dkdb_bp.c489 * Process the 'ss' (Single Step) command.
491 * ss
554 kdb_register_flags("ss", kdb_ss, "", kdb_initbptab()
H A Dkdb_private.h126 #define KDB_STATE_DOING_SS 0x00000020 /* Doing ss command */
128 * after one ss, independent of
/linux-4.1.27/fs/
H A Dcompat_ioctl.c613 struct serial_struct ss; serial_struct_ioctl() local
621 if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base))) serial_struct_ioctl()
625 ss.iomem_base = compat_ptr(udata); serial_struct_ioctl()
626 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || serial_struct_ioctl()
627 __get_user(ss.port_high, &ss32->port_high)) serial_struct_ioctl()
629 ss.iomap_base = 0UL; serial_struct_ioctl()
632 err = sys_ioctl(fd,cmd,(unsigned long)(&ss)); serial_struct_ioctl()
637 if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base))) serial_struct_ioctl()
639 base = (unsigned long)ss.iomem_base >> 32 ? serial_struct_ioctl()
640 0xffffffff : (unsigned)(unsigned long)ss.iomem_base; serial_struct_ioctl()
642 __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) || serial_struct_ioctl()
643 __put_user(ss.port_high, &ss32->port_high)) serial_struct_ioctl()
/linux-4.1.27/include/uapi/linux/netfilter/
H A Dxt_osf.h70 __u16 ss, mss; member in struct:xt_osf_user_finger
/linux-4.1.27/arch/hexagon/include/asm/
H A Dthread_info.h112 #define TIF_SINGLESTEP 4 /* restore ss @ return to usr mode */
/linux-4.1.27/net/bluetooth/bnep/
H A Dcore.c563 struct bnep_session *s, *ss; bnep_add_connection() local
588 ss = __bnep_get_session(dst); bnep_add_connection()
589 if (ss && ss->state == BT_CONNECTED) { bnep_add_connection()
/linux-4.1.27/arch/mn10300/kernel/
H A Dmn10300-serial.c1475 struct serial_struct *ss) mn10300_serial_verify_port()
1484 if (ss->irq != port->uart.irq || mn10300_serial_verify_port()
1485 ss->port != port->uart.iobase || mn10300_serial_verify_port()
1486 ss->io_type != port->uart.iotype || mn10300_serial_verify_port()
1487 ss->iomem_base != mapbase || mn10300_serial_verify_port()
1488 ss->iomem_reg_shift != port->uart.regshift || mn10300_serial_verify_port()
1489 ss->hub6 != port->uart.hub6 || mn10300_serial_verify_port()
1490 ss->xmit_fifo_size != port->uart.fifosize) mn10300_serial_verify_port()
1494 if (ss->type != port->uart.type) { mn10300_serial_verify_port()
1498 if (ss->type != PORT_MN10300 && mn10300_serial_verify_port()
1499 ss->type != PORT_MN10300_CTS) mn10300_serial_verify_port()
1474 mn10300_serial_verify_port(struct uart_port *_port, struct serial_struct *ss) mn10300_serial_verify_port() argument
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/
H A Dcxgb2.c464 struct sge_port_stats ss; get_stats() local
468 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); get_stats()
508 *data++ = ss.rx_cso_good; get_stats()
509 *data++ = ss.tx_cso; get_stats()
510 *data++ = ss.tx_tso; get_stats()
511 *data++ = ss.vlan_xtract; get_stats()
512 *data++ = ss.vlan_insert; get_stats()
513 *data++ = ss.tx_need_hdrroom; get_stats()
H A Dsge.c978 struct sge_port_stats *ss) t1_sge_get_port_stats()
982 memset(ss, 0, sizeof(*ss)); for_each_possible_cpu()
986 ss->rx_cso_good += st->rx_cso_good; for_each_possible_cpu()
987 ss->tx_cso += st->tx_cso; for_each_possible_cpu()
988 ss->tx_tso += st->tx_tso; for_each_possible_cpu()
989 ss->tx_need_hdrroom += st->tx_need_hdrroom; for_each_possible_cpu()
990 ss->vlan_xtract += st->vlan_xtract; for_each_possible_cpu()
991 ss->vlan_insert += st->vlan_insert; for_each_possible_cpu()
977 t1_sge_get_port_stats(const struct sge *sge, int port, struct sge_port_stats *ss) t1_sge_get_port_stats() argument
/linux-4.1.27/arch/mn10300/boot/compressed/
H A Dmisc.c61 char *ss = (char *) s; memset() local
64 ss[i] = c; memset()
/linux-4.1.27/arch/cris/boot/compressed/
H A Dmisc.c176 char *ss = (char*)s; memset() local
178 for (i=0;i<n;i++) ss[i] = c; memset()
/linux-4.1.27/sound/soc/omap/
H A Domap-hdmi-audio.c50 struct hdmi_audio_data *card_drvdata_substream(struct snd_pcm_substream *ss) card_drvdata_substream() argument
52 struct snd_soc_pcm_runtime *rtd = ss->private_data; card_drvdata_substream()
/linux-4.1.27/sound/pci/echoaudio/
H A Dechoaudio.c885 struct snd_pcm_substream *ss; snd_echo_preallocate_pages() local
889 for (ss = pcm->streams[stream].substream; ss; ss = ss->next) { snd_echo_preallocate_pages()
890 err = snd_pcm_lib_preallocate_pages(ss, SNDRV_DMA_TYPE_DEV_SG, snd_echo_preallocate_pages()
892 ss->number ? 0 : 128<<10, snd_echo_preallocate_pages()
1826 int period, ss, st; snd_echo_interrupt() local
1836 for (ss = 0; ss < DSP_MAXPIPES; ss++) { snd_echo_interrupt()
1837 substream = chip->substream[ss]; snd_echo_interrupt()
1842 if (period != chip->last_period[ss]) { snd_echo_interrupt()
1843 chip->last_period[ss] = period; snd_echo_interrupt()
/linux-4.1.27/drivers/video/fbdev/intelfb/
H A Dintelfbhw.c1053 u32 *vs, *vb, *vt, *hs, *hb, *ht, *ss, *pipe_conf; intelfbhw_mode_to_hw() local
1072 ss = &hw->src_size_b; intelfbhw_mode_to_hw()
1084 ss = &hw->src_size_a; intelfbhw_mode_to_hw()
1183 DBG_MSG("H: act %d, ss %d, se %d, tot %d bs %d, be %d\n", intelfbhw_mode_to_hw()
1196 DBG_MSG("V: act %d, ss %d, se %d, tot %d bs %d, be %d\n", intelfbhw_mode_to_hw()
1248 *ss = (hactive << SRC_SIZE_HORIZ_SHIFT) | intelfbhw_mode_to_hw()
1285 const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss; intelfbhw_program_mode() local
1316 ss = &hw->src_size_b; intelfbhw_program_mode()
1340 ss = &hw->src_size_a; intelfbhw_program_mode()
1430 OUTREG(src_size_reg, *ss); intelfbhw_program_mode()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_dma.c662 int s, ss; intel_device_info_runtime_init() local
693 for (ss = 0; ss < ss_max; ss++) { intel_device_info_runtime_init()
696 if (ss_disable & (0x1 << ss)) intel_device_info_runtime_init()
701 (ss * eu_max)); intel_device_info_runtime_init()
709 info->subslice_7eu[s] |= 1 << ss; intel_device_info_runtime_init()
H A Di915_debugfs.c4507 int ss; i915_sseu_status() local
4515 for (ss = 0; ss < ss_max; ss++) { i915_sseu_status()
4518 if (sig1[ss] & CHV_SS_PG_ENABLE) i915_sseu_status()
4524 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + i915_sseu_status()
4525 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + i915_sseu_status()
4526 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + i915_sseu_status()
4527 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); i915_sseu_status()
4534 int s, ss; i915_sseu_status() local
4563 for (ss = 0; ss < ss_max; ss++) { i915_sseu_status()
4566 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & i915_sseu_status()
4567 eu_mask[ss%2]); i915_sseu_status()
/linux-4.1.27/arch/ia64/kernel/
H A Dbrl_emu.c219 } else if (ia64_psr(regs)->ss) { ia64_emulate_brl()
H A Dkprobes.c752 ia64_psr(regs)->ss = 0; resume_execution()
772 ia64_psr(regs)->ss = 1; prepare_ss()
807 ia64_psr(regs)->ss = 0; pre_kprobes_handler()
879 ia64_psr(regs)->ss = 0; pre_kprobes_handler()
/linux-4.1.27/net/ipv4/
H A Dtcp_memcontrol.c9 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) tcp_init_cgroup() argument
H A Digmp.c2293 struct sockaddr_storage ss; local
2295 psin = (struct sockaddr_in *)&ss;
2296 memset(&ss, 0, sizeof(ss));
2299 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
/linux-4.1.27/arch/x86/um/
H A Dsignal.c198 GETREG(SS, ss); copy_sc_from_user()
290 PUTREG(SS, ss); copy_sc_to_user()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/
H A Dnv04.c183 new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580) new_ramdac580() argument
187 if (ss) /* single stage pll mode */ new_ramdac580()
/linux-4.1.27/drivers/crypto/
H A Dmv_cesa.c778 int bs, ds, ss; mv_hash_setkey() local
791 ss = crypto_shash_statesize(ctx->base_hash); mv_hash_setkey()
797 char ipad[ss]; mv_hash_setkey()
798 char opad[ss]; mv_hash_setkey()
/linux-4.1.27/drivers/tty/ipwireless/
H A Dmain.c33 #include <pcmcia/ss.h>
/linux-4.1.27/include/pcmcia/
H A Dds.h28 #include <pcmcia/ss.h>
H A Dss.h2 * ss.h
/linux-4.1.27/include/uapi/linux/usb/
H A Dch11.h252 } __attribute__ ((packed)) ss; member in union:usb_hub_descriptor::__anon13738
/linux-4.1.27/arch/avr32/kernel/
H A Dkprobes.c152 /* handler has already set things up, so skip ss setup */ kprobe_handler()
/linux-4.1.27/include/linux/ceph/
H A Dmessenger.h254 extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
/linux-4.1.27/drivers/net/wireless/rtlwifi/btcoexist/
H A Dhalbtc8192e2ant.c319 disra_mask = 0x0; /* enable 2ss */ halbtc8192e2ant_decidera_mask()
321 disra_mask = 0xfff00000;/* disable 2ss */ halbtc8192e2ant_decidera_mask()
325 disra_mask = 0x00000003;/* enable 2ss */ halbtc8192e2ant_decidera_mask()
327 disra_mask = 0xfff00003;/* disable 2ss */ halbtc8192e2ant_decidera_mask()
331 disra_mask = 0x0001f1f7;/* enable 2ss */ halbtc8192e2ant_decidera_mask()
333 disra_mask = 0xfff1f1f7;/* disable 2ss */ halbtc8192e2ant_decidera_mask()
1382 /* set rx 1ss or 2ss */ halbtc8192e2ant_set_switch_sstype()
/linux-4.1.27/fs/ntfs/
H A Drunlist.c626 int ss = sfinal - sstart + 1; ntfs_runlists_merge() local
636 ss++; ntfs_runlists_merge()
643 ntfs_debug("ds = %i, ss = %i, dins = %i", ds, ss, dins); ntfs_runlists_merge()
647 drl = ntfs_rl_replace(drl, ds, srl + sstart, ss, dins); ntfs_runlists_merge()
649 drl = ntfs_rl_insert(drl, ds, srl + sstart, ss, dins); ntfs_runlists_merge()
652 drl = ntfs_rl_append(drl, ds, srl + sstart, ss, dins); ntfs_runlists_merge()
654 drl = ntfs_rl_split(drl, ds, srl + sstart, ss, dins); ntfs_runlists_merge()
/linux-4.1.27/arch/x86/kernel/kprobes/
H A Dopt.c95 /* We don't bother saving the ss register */
/linux-4.1.27/arch/powerpc/platforms/pasemi/
H A Dsetup.c43 #include <pcmcia/ss.h>
/linux-4.1.27/drivers/lguest/x86/
H A Dcore.c223 case offsetof(struct pt_regs, ss): lguest_arch_regptr()
224 return &cpu->regs->ss; lguest_arch_regptr()
713 regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; lguest_arch_setup_regs()
/linux-4.1.27/drivers/usb/host/
H A Dxhci-hub.c146 desc->u.ss.bHubHdrDecLat = 0; xhci_usb3_hub_descriptor()
147 desc->u.ss.wHubDelay = 0; xhci_usb3_hub_descriptor()
157 desc->u.ss.DeviceRemovable = cpu_to_le16(port_removable); xhci_usb3_hub_descriptor()
/linux-4.1.27/net/ipv6/
H A Dmcast.c587 struct sockaddr_storage ss; local
589 psin6 = (struct sockaddr_in6 *)&ss;
590 memset(&ss, 0, sizeof(ss));
593 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))

Completed in 6690 milliseconds

12