/linux-4.1.27/arch/m32r/platforms/m32104ut/ |
H A D | Makefile | 1 obj-y := setup.o io.o
|
/linux-4.1.27/arch/m32r/platforms/m32700ut/ |
H A D | Makefile | 1 obj-y := setup.o io.o
|
/linux-4.1.27/arch/m32r/platforms/mappi/ |
H A D | Makefile | 1 obj-y := setup.o io.o
|
/linux-4.1.27/arch/m32r/platforms/mappi2/ |
H A D | Makefile | 1 obj-y := setup.o io.o
|
/linux-4.1.27/arch/m32r/platforms/mappi3/ |
H A D | Makefile | 1 obj-y := setup.o io.o
|
/linux-4.1.27/arch/m32r/platforms/oaks32r/ |
H A D | Makefile | 1 obj-y := setup.o io.o
|
/linux-4.1.27/arch/m32r/platforms/opsput/ |
H A D | Makefile | 1 obj-y := setup.o io.o
|
/linux-4.1.27/arch/m32r/platforms/usrv/ |
H A D | Makefile | 1 obj-y := setup.o io.o
|
/linux-4.1.27/arch/avr32/lib/ |
H A D | Makefile | 9 lib-y += io-readsw.o io-readsl.o io-writesw.o io-writesl.o 10 lib-y += io-readsb.o io-writesb.o
|
/linux-4.1.27/drivers/md/bcache/ |
H A D | movinggc.c | 39 struct moving_io *io = container_of(cl, struct moving_io, cl); moving_io_destructor() local 40 kfree(io); moving_io_destructor() 45 struct moving_io *io = container_of(cl, struct moving_io, cl); write_moving_finish() local 46 struct bio *bio = &io->bio.bio; write_moving_finish() 53 if (io->op.replace_collision) write_moving_finish() 54 trace_bcache_gc_copy_collision(&io->w->key); write_moving_finish() 56 bch_keybuf_del(&io->op.c->moving_gc_keys, io->w); write_moving_finish() 58 up(&io->op.c->moving_in_flight); write_moving_finish() 66 struct moving_io *io = container_of(bio->bi_private, read_moving_endio() local 70 io->op.error = error; read_moving_endio() 72 ptr_stale(io->op.c, &b->key, 0)) { read_moving_endio() 73 io->op.error = -EINTR; read_moving_endio() 76 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); read_moving_endio() 79 static void moving_init(struct moving_io *io) moving_init() argument 81 struct bio *bio = &io->bio.bio; moving_init() 87 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; moving_init() 88 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), moving_init() 90 bio->bi_private = &io->cl; moving_init() 97 struct moving_io *io = container_of(cl, struct moving_io, cl); write_moving() local 98 struct data_insert_op *op = &io->op; write_moving() 101 moving_init(io); write_moving() 103 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); write_moving() 105 op->bio = &io->bio.bio; write_moving() 107 op->writeback = KEY_DIRTY(&io->w->key); write_moving() 108 op->csum = KEY_CSUM(&io->w->key); write_moving() 110 bkey_copy(&op->replace_key, &io->w->key); write_moving() 121 struct moving_io *io = container_of(cl, struct moving_io, cl); read_moving_submit() local 122 struct bio *bio = &io->bio.bio; read_moving_submit() 124 bch_submit_bbio(bio, io->op.c, &io->w->key, 0); read_moving_submit() 126 continue_at(cl, write_moving, io->op.wq); read_moving_submit() 132 struct moving_io *io; read_moving() local 151 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) read_moving() 154 if (!io) read_moving() 157 w->private = io; read_moving() 158 io->w = w; read_moving() 159 io->op.inode = KEY_INODE(&w->key); read_moving() 160 io->op.c = c; read_moving() 161 io->op.wq = c->moving_gc_wq; read_moving() 163 moving_init(io); read_moving() 164 bio = &io->bio.bio; read_moving() 175 closure_call(&io->cl, read_moving_submit, NULL, &cl); read_moving()
|
H A D | Makefile | 5 io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
|
H A D | writeback.c | 107 struct dirty_io *io = w->private; dirty_init() local 108 struct bio *bio = &io->bio; dirty_init() 111 if (!io->dc->writeback_percent) dirty_init() 123 struct dirty_io *io = container_of(cl, struct dirty_io, cl); dirty_io_destructor() local 124 kfree(io); dirty_io_destructor() 129 struct dirty_io *io = container_of(cl, struct dirty_io, cl); write_dirty_finish() local 130 struct keybuf_key *w = io->bio.bi_private; write_dirty_finish() 131 struct cached_dev *dc = io->dc; write_dirty_finish() 135 bio_for_each_segment_all(bv, &io->bio, i) write_dirty_finish() 172 struct dirty_io *io = w->private; dirty_endio() local 177 closure_put(&io->cl); dirty_endio() 182 struct dirty_io *io = container_of(cl, struct dirty_io, cl); write_dirty() local 183 struct keybuf_key *w = io->bio.bi_private; write_dirty() 186 io->bio.bi_rw = WRITE; write_dirty() 187 io->bio.bi_iter.bi_sector = KEY_START(&w->key); write_dirty() 188 io->bio.bi_bdev = io->dc->bdev; write_dirty() 189 io->bio.bi_end_io = dirty_endio; write_dirty() 191 closure_bio_submit(&io->bio, cl, &io->dc->disk); write_dirty() 199 struct dirty_io *io = w->private; read_dirty_endio() local 201 bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), read_dirty_endio() 209 struct dirty_io *io = container_of(cl, struct dirty_io, cl); read_dirty_submit() local 211 closure_bio_submit(&io->bio, cl, &io->dc->disk); read_dirty_submit() 220 struct dirty_io *io; read_dirty() local 246 io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec) read_dirty() 249 if (!io) read_dirty() 252 w->private = io; read_dirty() 253 io->dc = dc; read_dirty() 256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); read_dirty() 257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, read_dirty() 259 io->bio.bi_rw = READ; read_dirty() 260 io->bio.bi_end_io = read_dirty_endio; read_dirty() 262 if (bio_alloc_pages(&io->bio, GFP_KERNEL)) read_dirty() 268 closure_call(&io->cl, read_dirty_submit, NULL, &cl); read_dirty()
|
/linux-4.1.27/drivers/net/wireless/ath/wil6210/ |
H A D | ioctl.c | 60 struct wil_memio io; wil_ioc_memio_dword() local 64 if (copy_from_user(&io, data, sizeof(io))) wil_ioc_memio_dword() 68 io.addr, io.val, io.op); wil_ioc_memio_dword() 70 a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op); wil_ioc_memio_dword() 72 wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr, wil_ioc_memio_dword() 73 io.op); wil_ioc_memio_dword() 77 switch (io.op & wil_mmio_op_mask) { wil_ioc_memio_dword() 79 io.val = ioread32(a); wil_ioc_memio_dword() 83 iowrite32(io.val, a); wil_ioc_memio_dword() 87 wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op); wil_ioc_memio_dword() 94 io.addr, io.val, io.op); wil_ioc_memio_dword() 95 if (copy_to_user(data, &io, sizeof(io))) wil_ioc_memio_dword() 104 struct wil_memio_block io; wil_ioc_memio_block() local 109 if (copy_from_user(&io, data, sizeof(io))) wil_ioc_memio_block() 113 io.addr, io.size, io.op); wil_ioc_memio_block() 116 if (io.size % 4) { wil_ioc_memio_block() 117 wil_err(wil, "size is not multiple of 4: 0x%08x\n", io.size); wil_ioc_memio_block() 121 a = wil_ioc_addr(wil, io.addr, io.size, io.op); wil_ioc_memio_block() 123 wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr, wil_ioc_memio_block() 124 io.op); wil_ioc_memio_block() 128 block = kmalloc(io.size, GFP_USER); wil_ioc_memio_block() 133 switch (io.op & wil_mmio_op_mask) { wil_ioc_memio_block() 135 wil_memcpy_fromio_32(block, a, io.size); wil_ioc_memio_block() 136 wil_hex_dump_ioctl("Read ", block, io.size); wil_ioc_memio_block() 137 if (copy_to_user(io.block, block, io.size)) { wil_ioc_memio_block() 143 if (copy_from_user(block, io.block, io.size)) { wil_ioc_memio_block() 147 wil_memcpy_toio_32(a, block, io.size); wil_ioc_memio_block() 149 wil_hex_dump_ioctl("Write ", block, io.size); wil_ioc_memio_block() 152 wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op); wil_ioc_memio_block()
|
/linux-4.1.27/drivers/net/wireless/ti/wl18xx/ |
H A D | Makefile | 1 wl18xx-objs = main.o acx.o tx.o io.o debugfs.o scan.o cmd.o event.o
|
/linux-4.1.27/arch/um/include/asm/ |
H A D | dma.h | 4 #include <asm/io.h>
|
/linux-4.1.27/arch/score/include/asm/ |
H A D | dma.h | 4 #include <asm/io.h>
|
H A D | io.h | 4 #include <asm-generic/io.h>
|
/linux-4.1.27/arch/sh/boards/mach-microdev/ |
H A D | Makefile | 5 obj-y := setup.o irq.o io.o fdc37c93xapm.o
|
/linux-4.1.27/arch/cris/arch-v32/mach-a3/ |
H A D | Makefile | 5 obj-y := dma.o pinmux.o io.o arbiter.o
|
/linux-4.1.27/arch/cris/arch-v32/mach-fs/ |
H A D | Makefile | 5 obj-y := dma.o pinmux.o io.o arbiter.o
|
/linux-4.1.27/arch/hexagon/lib/ |
H A D | Makefile | 4 obj-y = checksum.o io.o memcpy.o memset.o
|
/linux-4.1.27/arch/arm/mach-ebsa110/ |
H A D | Makefile | 7 obj-y := core.o io.o leds.o
|
/linux-4.1.27/arch/arm/lib/ |
H A D | Makefile | 15 io-readsb.o io-writesb.o io-readsl.o io-writesl.o \ 27 lib-y += io-readsw-armv3.o io-writesw-armv3.o 29 lib-y += io-readsw-armv4.o io-writesw-armv4.o 32 lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
|
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/ |
H A D | cl_io.c | 56 #define cl_io_for_each(slice, io) \ 57 list_for_each_entry((slice), &io->ci_layers, cis_linkage) 58 #define cl_io_for_each_reverse(slice, io) \ 59 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage) 66 static inline int cl_io_is_loopable(const struct cl_io *io) cl_io_is_loopable() argument 68 return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC; cl_io_is_loopable() 84 static int cl_io_invariant(const struct cl_io *io) cl_io_invariant() argument 88 up = io->ci_parent; cl_io_invariant() 91 * io can own pages only when it is ongoing. Sub-io might cl_io_invariant() 92 * still be in CIS_LOCKED state when top-io is in cl_io_invariant() 95 ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING || cl_io_invariant() 96 (io->ci_state == CIS_LOCKED && up != NULL)); cl_io_invariant() 100 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top. 102 void cl_io_fini(const struct lu_env *env, struct cl_io *io) cl_io_fini() argument 107 LINVRNT(cl_io_type_is_valid(io->ci_type)); cl_io_fini() 108 LINVRNT(cl_io_invariant(io)); cl_io_fini() 110 while (!list_empty(&io->ci_layers)) { cl_io_fini() 111 slice = container_of(io->ci_layers.prev, struct cl_io_slice, cl_io_fini() 114 if (slice->cis_iop->op[io->ci_type].cio_fini != NULL) cl_io_fini() 115 slice->cis_iop->op[io->ci_type].cio_fini(env, slice); cl_io_fini() 123 io->ci_state = CIS_FINI; cl_io_fini() 125 if (info->clt_current_io == io) cl_io_fini() 129 switch (io->ci_type) { cl_io_fini() 135 LASSERT(!io->ci_need_restart); cl_io_fini() 140 LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout, cl_io_fini() 141 !io->ci_need_restart)); cl_io_fini() 149 static int cl_io_init0(const struct lu_env *env, struct cl_io *io, cl_io_init0() argument 155 LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI); cl_io_init0() 157 LINVRNT(cl_io_invariant(io)); cl_io_init0() 159 io->ci_type = iot; cl_io_init0() 160 INIT_LIST_HEAD(&io->ci_lockset.cls_todo); cl_io_init0() 161 INIT_LIST_HEAD(&io->ci_lockset.cls_curr); cl_io_init0() 162 INIT_LIST_HEAD(&io->ci_lockset.cls_done); cl_io_init0() 163 INIT_LIST_HEAD(&io->ci_layers); cl_io_init0() 168 result = scan->co_ops->coo_io_init(env, scan, io); cl_object_for_each() 174 io->ci_state = CIS_INIT; 179 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom. 183 int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, cl_io_sub_init() argument 190 info->clt_current_io = io; cl_io_sub_init() 191 return cl_io_init0(env, io, iot, obj); cl_io_sub_init() 196 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom. 203 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot 205 int cl_io_init(const struct lu_env *env, struct cl_io *io, cl_io_init() argument 213 info->clt_current_io = io; cl_io_init() 214 return cl_io_init0(env, io, iot, obj); cl_io_init() 219 * Initialize read or write io. 223 int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, cl_io_rw_init() argument 227 LINVRNT(io->ci_obj != NULL); cl_io_rw_init() 229 LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu, cl_io_rw_init() 230 "io range: %u [%llu, %llu) %u %u\n", cl_io_rw_init() 232 io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append); cl_io_rw_init() 233 io->u.ci_rw.crw_pos = pos; cl_io_rw_init() 234 io->u.ci_rw.crw_count = count; cl_io_rw_init() 235 return cl_io_init(env, io, iot, io->ci_obj); cl_io_rw_init() 283 static void cl_io_locks_sort(struct cl_io *io) cl_io_locks_sort() argument 297 &io->ci_lockset.cls_todo, cl_io_locks_sort() 377 struct cl_io *io, struct cl_lockset *set, cl_lockset_lock_one() 383 lock = cl_lock_request(env, io, &link->cill_descr, "io", io); cl_lockset_lock_one() 400 static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io, cl_lock_link_fini() argument 407 cl_lock_release(env, lock, "io", io); cl_lock_link_fini() 414 static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, cl_lockset_lock() argument 427 result = cl_lockset_lock_one(env, io, set, link); cl_lockset_lock() 431 cl_lock_link_fini(env, io, link); cl_lockset_lock() 449 * Takes locks necessary for the current iteration of io. 455 int cl_io_lock(const struct lu_env *env, struct cl_io *io) cl_io_lock() argument 460 LINVRNT(cl_io_is_loopable(io)); cl_io_lock() 461 LINVRNT(io->ci_state == CIS_IT_STARTED); cl_io_lock() 462 LINVRNT(cl_io_invariant(io)); cl_io_lock() 464 cl_io_for_each(scan, io) { cl_io_for_each() 465 if (scan->cis_iop->op[io->ci_type].cio_lock == NULL) cl_io_for_each() 467 result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan); cl_io_for_each() 472 cl_io_locks_sort(io); 473 result = cl_lockset_lock(env, io, &io->ci_lockset); 476 cl_io_unlock(env, io); 478 io->ci_state = CIS_LOCKED; 484 * Release locks takes by io. 486 void cl_io_unlock(const struct lu_env *env, struct cl_io *io) cl_io_unlock() argument 493 LASSERT(cl_io_is_loopable(io)); cl_io_unlock() 494 LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED); cl_io_unlock() 495 LINVRNT(cl_io_invariant(io)); cl_io_unlock() 497 set = &io->ci_lockset; cl_io_unlock() 500 cl_lock_link_fini(env, io, link); cl_io_unlock() 503 cl_lock_link_fini(env, io, link); cl_io_unlock() 507 cl_lock_link_fini(env, io, link); cl_io_unlock() 509 cl_io_for_each_reverse(scan, io) { cl_io_for_each_reverse() 510 if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL) cl_io_for_each_reverse() 511 scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); cl_io_for_each_reverse() 513 io->ci_state = CIS_UNLOCKED; 519 * Prepares next iteration of io. 522 * layers a chance to modify io parameters, e.g., so that lov can restrict io 525 int cl_io_iter_init(const struct lu_env *env, struct cl_io *io) cl_io_iter_init() argument 530 LINVRNT(cl_io_is_loopable(io)); cl_io_iter_init() 531 LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED); cl_io_iter_init() 532 LINVRNT(cl_io_invariant(io)); cl_io_iter_init() 535 cl_io_for_each(scan, io) { cl_io_for_each() 536 if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL) cl_io_for_each() 538 result = scan->cis_iop->op[io->ci_type].cio_iter_init(env, cl_io_for_each() 544 io->ci_state = CIS_IT_STARTED; 550 * Finalizes io iteration. 554 void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io) cl_io_iter_fini() argument 558 LINVRNT(cl_io_is_loopable(io)); cl_io_iter_fini() 559 LINVRNT(io->ci_state == CIS_UNLOCKED); cl_io_iter_fini() 560 LINVRNT(cl_io_invariant(io)); cl_io_iter_fini() 562 cl_io_for_each_reverse(scan, io) { cl_io_for_each_reverse() 563 if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL) cl_io_for_each_reverse() 564 scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan); cl_io_for_each_reverse() 566 io->ci_state = CIS_IT_ENDED; 571 * Records that read or write io progressed \a nob bytes forward. 573 void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob) cl_io_rw_advance() argument 577 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || cl_io_rw_advance() 579 LINVRNT(cl_io_is_loopable(io)); cl_io_rw_advance() 580 LINVRNT(cl_io_invariant(io)); cl_io_rw_advance() 582 io->u.ci_rw.crw_pos += nob; cl_io_rw_advance() 583 io->u.ci_rw.crw_count -= nob; cl_io_rw_advance() 586 cl_io_for_each_reverse(scan, io) { cl_io_for_each_reverse() 587 if (scan->cis_iop->op[io->ci_type].cio_advance != NULL) cl_io_for_each_reverse() 588 scan->cis_iop->op[io->ci_type].cio_advance(env, scan, cl_io_for_each_reverse() 597 int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, cl_io_lock_add() argument 602 if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) cl_io_lock_add() 605 list_add(&link->cill_linkage, &io->ci_lockset.cls_todo); cl_io_lock_add() 621 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, cl_io_lock_alloc_add() argument 631 result = cl_io_lock_add(env, io, link); cl_io_lock_alloc_add() 642 * Starts io by calling cl_io_operations::cio_start() top-to-bottom. 644 int cl_io_start(const struct lu_env *env, struct cl_io *io) cl_io_start() argument 649 LINVRNT(cl_io_is_loopable(io)); cl_io_start() 650 LINVRNT(io->ci_state == CIS_LOCKED); cl_io_start() 651 LINVRNT(cl_io_invariant(io)); cl_io_start() 653 io->ci_state = CIS_IO_GOING; cl_io_for_each() 654 cl_io_for_each(scan, io) { cl_io_for_each() 655 if (scan->cis_iop->op[io->ci_type].cio_start == NULL) cl_io_for_each() 657 result = scan->cis_iop->op[io->ci_type].cio_start(env, scan); cl_io_for_each() 668 * Wait until current io iteration is finished by calling 671 void cl_io_end(const struct lu_env *env, struct cl_io *io) cl_io_end() argument 675 LINVRNT(cl_io_is_loopable(io)); cl_io_end() 676 LINVRNT(io->ci_state == CIS_IO_GOING); cl_io_end() 677 LINVRNT(cl_io_invariant(io)); cl_io_end() 679 cl_io_for_each_reverse(scan, io) { cl_io_for_each_reverse() 680 if (scan->cis_iop->op[io->ci_type].cio_end != NULL) cl_io_for_each_reverse() 681 scan->cis_iop->op[io->ci_type].cio_end(env, scan); cl_io_for_each_reverse() 684 io->ci_state = CIS_IO_FINISHED; 699 * True iff \a page is within \a io range. 701 static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io) cl_page_in_io() argument 709 switch (io->ci_type) { cl_page_in_io() 716 if (!cl_io_is_append(io)) { cl_page_in_io() 717 const struct cl_io_rw_common *crw = &(io->u.ci_rw); cl_page_in_io() 725 result = io->u.ci_fault.ft_index == idx; cl_page_in_io() 734 * Called by read io, when page has to be read from the server. 738 int cl_io_read_page(const struct lu_env *env, struct cl_io *io, cl_io_read_page() argument 745 LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT); cl_io_read_page() 746 LINVRNT(cl_page_is_owned(page, io)); cl_io_read_page() 747 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED); cl_io_read_page() 748 LINVRNT(cl_page_in_io(page, io)); cl_io_read_page() 749 LINVRNT(cl_io_invariant(io)); cl_io_read_page() 751 queue = &io->ci_queue; cl_io_read_page() 759 * requires no network io). cl_io_read_page() 762 * "parallel io" (see CLO_REPEAT loops in cl_lock.c). cl_io_read_page() 764 cl_io_for_each(scan, io) { cl_io_for_each() 776 result = cl_io_submit_rw(env, io, CRT_READ, queue); 780 cl_page_list_disown(env, io, &queue->c2_qin); 787 * Called by write io to prepare page to receive data from user buffer. 791 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, cl_io_prepare_write() argument 797 LINVRNT(io->ci_type == CIT_WRITE); cl_io_prepare_write() 798 LINVRNT(cl_page_is_owned(page, io)); cl_io_prepare_write() 799 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED); cl_io_prepare_write() 800 LINVRNT(cl_io_invariant(io)); cl_io_prepare_write() 801 LASSERT(cl_page_in_io(page, io)); cl_io_prepare_write() 803 cl_io_for_each_reverse(scan, io) { cl_io_for_each_reverse() 820 * Called by write io after user data were copied into a page. 824 int cl_io_commit_write(const struct lu_env *env, struct cl_io *io, cl_io_commit_write() argument 830 LINVRNT(io->ci_type == CIT_WRITE); cl_io_commit_write() 831 LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED); cl_io_commit_write() 832 LINVRNT(cl_io_invariant(io)); cl_io_commit_write() 839 LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL); cl_io_commit_write() 840 LASSERT(cl_page_in_io(page, io)); cl_io_commit_write() 842 cl_io_for_each(scan, io) { cl_io_for_each() 860 * Submits a list of pages for immediate io. 869 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, cl_io_submit_rw() argument 877 cl_io_for_each(scan, io) { cl_io_for_each() 897 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, cl_io_submit_sync() argument 911 rc = cl_io_submit_rw(env, io, iot, queue); cl_io_submit_sync() 925 rc = cl_sync_io_wait(env, io, &queue->c2_qout, cl_io_submit_sync() 939 int cl_io_cancel(const struct lu_env *env, struct cl_io *io, cl_io_cancel() argument 949 LINVRNT(cl_page_in_io(page, io)); cl_page_list_for_each() 958 * Main io loop. 960 * Pumps io through iterations calling 974 * repeatedly until there is no more io to do. 976 int cl_io_loop(const struct lu_env *env, struct cl_io *io) cl_io_loop() argument 980 LINVRNT(cl_io_is_loopable(io)); cl_io_loop() 985 io->ci_continue = 0; cl_io_loop() 986 result = cl_io_iter_init(env, io); cl_io_loop() 988 nob = io->ci_nob; cl_io_loop() 989 result = cl_io_lock(env, io); cl_io_loop() 998 result = cl_io_start(env, io); cl_io_loop() 1001 * io, etc. cl_io_loop() 1005 cl_io_end(env, io); cl_io_loop() 1006 cl_io_unlock(env, io); cl_io_loop() 1007 cl_io_rw_advance(env, io, io->ci_nob - nob); cl_io_loop() 1010 cl_io_iter_fini(env, io); cl_io_loop() 1011 } while (result == 0 && io->ci_continue); cl_io_loop() 1013 result = io->ci_result; cl_io_loop() 1019 * Adds io slice to the cl_io. 1022 * per-layer state to the io. New state is added at the end of 1027 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, cl_io_slice_add() argument 1036 list_add_tail(linkage, &io->ci_layers); cl_io_slice_add() 1037 slice->cis_io = io; cl_io_slice_add() 1060 /* it would be better to check that page is owned by "current" io, but cl_page_list_add() 1130 struct cl_io *io, struct cl_page *pg); 1136 struct cl_io *io, struct cl_page_list *plist) cl_page_list_disown() 1159 cl_page_disown0(env, io, page); cl_page_list_for_each_safe() 1187 struct cl_io *io, struct cl_page_list *plist) cl_page_list_own() 1200 if (cl_page_own(env, io, page) == 0) cl_page_list_for_each_safe() 1213 struct cl_io *io, struct cl_page_list *plist) cl_page_list_assume() 1220 cl_page_assume(env, io, page); cl_page_list_assume() 1227 void cl_page_list_discard(const struct lu_env *env, struct cl_io *io, cl_page_list_discard() argument 1234 cl_page_discard(env, io, page); cl_page_list_discard() 1241 int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io, cl_page_list_unmap() argument 1250 result = cl_page_unmap(env, io, page); cl_page_list_for_each() 1281 struct cl_io *io, struct cl_2queue *queue) cl_2queue_disown() 1283 cl_page_list_disown(env, io, &queue->c2_qin); cl_2queue_disown() 1284 cl_page_list_disown(env, io, &queue->c2_qout); cl_2queue_disown() 1292 struct cl_io *io, struct cl_2queue *queue) cl_2queue_discard() 1294 cl_page_list_discard(env, io, &queue->c2_qin); cl_2queue_discard() 1295 cl_page_list_discard(env, io, &queue->c2_qout); cl_2queue_discard() 1303 struct cl_io *io, struct cl_2queue *queue) cl_2queue_assume() 1305 cl_page_list_assume(env, io, &queue->c2_qin); cl_2queue_assume() 1306 cl_page_list_assume(env, io, &queue->c2_qout); cl_2queue_assume() 1331 * Returns top-level io. 1335 struct cl_io *cl_io_top(struct cl_io *io) cl_io_top() argument 1337 while (io->ci_parent != NULL) cl_io_top() 1338 io = io->ci_parent; cl_io_top() 1339 return io; cl_io_top() 1344 * Prints human readable representation of \a io to the \a f. 1347 lu_printer_t printer, const struct cl_io *io) cl_io_print() 1596 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages. 1611 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io, cl_sync_io_wait() argument 1628 (void)cl_io_cancel(env, io, queue); cl_sync_io_wait() 1638 cl_page_list_assume(env, io, queue); cl_sync_io_wait() 376 cl_lockset_lock_one(const struct lu_env *env, struct cl_io *io, struct cl_lockset *set, struct cl_io_lock_link *link) cl_lockset_lock_one() argument 1135 cl_page_list_disown(const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist) cl_page_list_disown() argument 1186 cl_page_list_own(const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist) cl_page_list_own() argument 1212 cl_page_list_assume(const struct lu_env *env, struct cl_io *io, struct cl_page_list *plist) cl_page_list_assume() argument 1280 cl_2queue_disown(const struct lu_env *env, struct cl_io *io, struct cl_2queue *queue) cl_2queue_disown() argument 1291 cl_2queue_discard(const struct lu_env *env, struct cl_io *io, struct cl_2queue *queue) cl_2queue_discard() argument 1302 cl_2queue_assume(const struct lu_env *env, struct cl_io *io, struct cl_2queue *queue) cl_2queue_assume() argument 1346 cl_io_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_io *io) cl_io_print() argument
|
H A D | cl_page.c | 155 struct cl_io *io, pgoff_t start, pgoff_t end, cl_page_gang_lookup() 226 res = (*cb)(env, io, page, cbdata); cl_page_gang_lookup() 515 [CPS_OWNED] = 1, /* io finds existing cached page */ cl_page_state_set0() 528 [CPS_CACHED] = 1, /* io completion */ cl_page_state_set0() 535 [CPS_CACHED] = 1, /* io completion */ cl_page_state_set0() 770 struct cl_io *io, struct cl_page *page, ptrdiff_t op) cl_page_invoke() 773 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj)); cl_page_invoke() 777 io); cl_page_invoke() 781 struct cl_io *io, struct cl_page *page, ptrdiff_t op) cl_page_invoid() 784 PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj)); cl_page_invoid() 787 const struct cl_page_slice *, struct cl_io *), io); cl_page_invoid() 811 struct cl_io *io, struct cl_page *pg) cl_page_disown0() 830 io); cl_page_disown0() 834 * returns true, iff page is owned by the given io. 836 int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io) cl_page_is_owned() argument 838 LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj)); cl_page_is_owned() 839 return pg->cp_state == CPS_OWNED && pg->cp_owner == io; cl_page_is_owned() 849 * \pre !cl_page_is_owned(pg, io) 850 * \post result == 0 iff cl_page_is_owned(pg, io) 863 static int cl_page_own0(const struct lu_env *env, struct cl_io *io, cl_page_own0() argument 868 PINVRNT(env, pg, !cl_page_is_owned(pg, io)); cl_page_own0() 871 io = cl_io_top(io); cl_page_own0() 880 io, nonblock); cl_page_own0() 884 pg->cp_owner = io; cl_page_own0() 890 cl_page_disown0(env, io, pg); cl_page_own0() 904 int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) cl_page_own() argument 906 return cl_page_own0(env, io, pg, 0); cl_page_own() 915 int cl_page_own_try(const struct lu_env *env, struct cl_io *io, cl_page_own_try() argument 918 return cl_page_own0(env, io, pg, 1); cl_page_own_try() 928 * \pre !cl_page_is_owned(pg, io) 929 * \post cl_page_is_owned(pg, io) 934 struct cl_io *io, struct cl_page *pg) cl_page_assume() 936 PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj)); cl_page_assume() 939 io = cl_io_top(io); cl_page_assume() 941 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume)); cl_page_assume() 943 pg->cp_owner = io; cl_page_assume() 956 * \pre cl_page_is_owned(pg, io) 957 * \post !cl_page_is_owned(pg, io) 962 struct cl_io *io, struct cl_page *pg) cl_page_unassume() 964 PINVRNT(env, pg, cl_page_is_owned(pg, io)); cl_page_unassume() 968 io = cl_io_top(io); cl_page_unassume() 974 io); cl_page_unassume() 983 * \pre cl_page_is_owned(pg, io) 984 * \post !cl_page_is_owned(pg, io) 990 struct cl_io *io, struct cl_page *pg) cl_page_disown() 992 PINVRNT(env, pg, cl_page_is_owned(pg, io)); cl_page_disown() 995 io = cl_io_top(io); cl_page_disown() 996 cl_page_disown0(env, io, pg); cl_page_disown() 1006 * \pre cl_page_is_owned(pg, io) 1011 struct cl_io *io, struct cl_page *pg) cl_page_discard() 1013 PINVRNT(env, pg, cl_page_is_owned(pg, io)); cl_page_discard() 1016 cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard)); cl_page_discard() 1117 struct cl_io *io, struct cl_page *pg) cl_page_unmap() 1119 PINVRNT(env, pg, cl_page_is_owned(pg, io)); cl_page_unmap() 1122 return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap)); cl_page_unmap() 1190 int cl_page_prep(const struct lu_env *env, struct cl_io *io, cl_page_prep() argument 1195 PINVRNT(env, pg, cl_page_is_owned(pg, io)); cl_page_prep() 1206 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep)); cl_page_prep() 1252 CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion), cl_page_completion() 1290 result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready), cl_page_make_ready() 1303 * Notify layers that high level io decided to place this page into a cache 1309 * \pre cl_page_is_owned(pg, io) 1310 * \post cl_page_is_owned(pg, io) 1314 int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, cl_page_cache_add() argument 1321 PINVRNT(env, pg, cl_page_is_owned(pg, io)); cl_page_cache_add() 1328 if (scan->cpl_ops->io[crt].cpo_cache_add == NULL) cl_page_cache_add() 1331 result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io); cl_page_cache_add() 1343 * \pre cl_page_is_owned(pg, io) 1348 int cl_page_flush(const struct lu_env *env, struct cl_io *io, cl_page_flush() argument 1353 PINVRNT(env, pg, cl_page_is_owned(pg, io)); cl_page_flush() 1356 result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush)); cl_page_flush() 1370 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, cl_page_is_under_lock() argument 1380 io); cl_page_is_under_lock() 1386 static int page_prune_cb(const struct lu_env *env, struct cl_io *io, page_prune_cb() argument 1389 cl_page_own(env, io, page); page_prune_cb() 1390 cl_page_unmap(env, io, page); page_prune_cb() 1391 cl_page_discard(env, io, page); page_prune_cb() 1392 cl_page_disown(env, io, page); page_prune_cb() 1403 struct cl_io *io; cl_pages_prune() local 1407 io = &info->clt_io; cl_pages_prune() 1410 * initialize the io. This is ugly since we never do IO in this cl_pages_prune() 1413 io->ci_obj = obj; cl_pages_prune() 1414 io->ci_ignore_layout = 1; cl_pages_prune() 1415 result = cl_io_init(env, io, CIT_MISC, obj); cl_pages_prune() 1417 cl_io_fini(env, io); cl_pages_prune() 1418 return io->ci_result; cl_pages_prune() 1422 result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, cl_pages_prune() 1428 cl_io_fini(env, io); cl_pages_prune() 154 cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, struct cl_io *io, pgoff_t start, pgoff_t end, cl_page_gang_cb_t cb, void *cbdata) cl_page_gang_lookup() argument 769 cl_page_invoke(const struct lu_env *env, struct cl_io *io, struct cl_page *page, ptrdiff_t op) cl_page_invoke() argument 780 cl_page_invoid(const struct lu_env *env, struct cl_io *io, struct cl_page *page, ptrdiff_t op) cl_page_invoid() argument 810 cl_page_disown0(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) cl_page_disown0() argument 933 cl_page_assume(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) cl_page_assume() argument 961 cl_page_unassume(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) cl_page_unassume() argument 989 cl_page_disown(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) cl_page_disown() argument 1010 cl_page_discard(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) cl_page_discard() argument 1116 cl_page_unmap(const struct lu_env *env, struct cl_io *io, struct cl_page *pg) cl_page_unmap() argument
|
/linux-4.1.27/arch/parisc/lib/ |
H A D | Makefile | 5 lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
|
/linux-4.1.27/drivers/watchdog/ |
H A D | wd501p.h | 23 #define WDT_COUNT0 (io+0) 24 #define WDT_COUNT1 (io+1) 25 #define WDT_COUNT2 (io+2) 26 #define WDT_CR (io+3) 27 #define WDT_SR (io+4) /* Start buzzer on PCI write */ 28 #define WDT_RT (io+5) /* Stop buzzer on PCI write */ 29 #define WDT_BUZZER (io+6) /* PCI only: rd=disable, wr=enable */ 30 #define WDT_DC (io+7) 34 #define WDT_CLOCK (io+12) /* COUNT2: rd=16.67MHz, wr=2.0833MHz */ 36 #define WDT_OPTONOTRST (io+13) /* wr=enable, rd=disable */ 38 #define WDT_OPTORST (io+14) /* wr=enable, rd=disable */ 40 #define WDT_PROGOUT (io+15) /* wr=enable, rd=disable */
|
H A D | sc1200wdt.c | 48 #include <linux/io.h> 55 #define PMIR (io) /* Power Management Index Register */ 56 #define PMDR (io+1) /* Power Management Data Register */ 76 static int io = -1; variable 80 static DEFINE_SPINLOCK(sc1200wdt_lock); /* io port access serialisation */ 91 module_param(io, int, 0); 92 MODULE_PARM_DESC(io, "io port"); 332 * but we don't have access to those io regions. sc1200wdt_probe() 359 io = pnp_port_start(wdt_dev, 0); scl200wdt_pnp_probe() 362 if (!request_region(io, io_len, SC1200_MODULE_NAME)) { scl200wdt_pnp_probe() 363 pr_err("Unable to register IO port %#x\n", io); scl200wdt_pnp_probe() 367 pr_info("PnP device found at io port %#x/%d\n", io, io_len); scl200wdt_pnp_probe() 374 release_region(io, io_len); scl200wdt_pnp_remove() 403 if (io == -1) { sc1200wdt_init() 404 pr_err("io parameter must be specified\n"); sc1200wdt_init() 417 if (!request_region(io, io_len, SC1200_MODULE_NAME)) { sc1200wdt_init() 418 pr_err("Unable to register IO port %#x\n", io); sc1200wdt_init() 449 release_region(io, io_len); sc1200wdt_init() 470 release_region(io, io_len); sc1200wdt_exit()
|
H A D | eurotechwdt.c | 61 #include <linux/io.h> 74 static int io = 0x3f0; variable 100 module_param(io, int, 0); 101 MODULE_PARM_DESC(io, "Eurotech WDT io port (default=0x3f0)"); 114 outb(index, io); eurwdt_write_reg() 115 outb(data, io+1); eurwdt_write_reg() 120 outb(0xaa, io); eurwdt_lock_chip() 125 outb(0x55, io); eurwdt_unlock_chip() 414 release_region(io, 2); eurwdt_exit() 436 if (!request_region(io, 2, "eurwdt")) { eurwdt_init() 437 pr_err("IO %X is not free\n", io); eurwdt_init() 458 io, irq, (!strcmp("int", ev) ? "int" : "reboot")); eurwdt_init() 467 release_region(io, 2); eurwdt_init()
|
/linux-4.1.27/arch/mn10300/kernel/ |
H A D | Makefile | 10 ptrace.o setup.o time.o sys_mn10300.o io.o \ 21 obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o 22 obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
|
/linux-4.1.27/drivers/input/serio/ |
H A D | ambakmi.c | 25 #include <asm/io.h> 31 struct serio *io; member in struct:amba_kmi_port 46 serio_interrupt(kmi->io, readb(KMIDATA), 0); amba_kmi_int() 54 static int amba_kmi_write(struct serio *io, unsigned char val) amba_kmi_write() argument 56 struct amba_kmi_port *kmi = io->port_data; amba_kmi_write() 68 static int amba_kmi_open(struct serio *io) amba_kmi_open() argument 70 struct amba_kmi_port *kmi = io->port_data; amba_kmi_open() 100 static void amba_kmi_close(struct serio *io) amba_kmi_close() argument 102 struct amba_kmi_port *kmi = io->port_data; amba_kmi_close() 114 struct serio *io; amba_kmi_probe() local 122 io = kzalloc(sizeof(struct serio), GFP_KERNEL); amba_kmi_probe() 123 if (!kmi || !io) { amba_kmi_probe() 129 io->id.type = SERIO_8042; amba_kmi_probe() 130 io->write = amba_kmi_write; amba_kmi_probe() 131 io->open = amba_kmi_open; amba_kmi_probe() 132 io->close = amba_kmi_close; amba_kmi_probe() 133 strlcpy(io->name, dev_name(&dev->dev), sizeof(io->name)); amba_kmi_probe() 134 strlcpy(io->phys, dev_name(&dev->dev), sizeof(io->phys)); amba_kmi_probe() 135 io->port_data = kmi; amba_kmi_probe() 136 io->dev.parent = &dev->dev; amba_kmi_probe() 138 kmi->io = io; amba_kmi_probe() 154 serio_register_port(kmi->io); amba_kmi_probe() 161 kfree(io); amba_kmi_probe() 170 serio_unregister_port(kmi->io); amba_kmi_remove() 183 serio_reconnect(kmi->io); amba_kmi_resume()
|
H A D | arc_ps2.c | 18 #include <linux/io.h> 38 struct serio *io; member in struct:arc_ps2_port 73 serio_interrupt(port->io, data, flag); arc_ps2_check_rx() 76 dev_err(&port->io->dev, "PS/2 hardware stuck\n"); arc_ps2_check_rx() 90 static int arc_ps2_write(struct serio *io, unsigned char val) arc_ps2_write() argument 93 struct arc_ps2_port *port = io->port_data; arc_ps2_write() 107 dev_err(&io->dev, "write timeout\n"); arc_ps2_write() 111 static int arc_ps2_open(struct serio *io) arc_ps2_open() argument 113 struct arc_ps2_port *port = io->port_data; arc_ps2_open() 120 static void arc_ps2_close(struct serio *io) arc_ps2_close() argument 122 struct arc_ps2_port *port = io->port_data; arc_ps2_close() 159 struct serio *io; arc_ps2_create_port() local 161 io = kzalloc(sizeof(struct serio), GFP_KERNEL); arc_ps2_create_port() 162 if (!io) arc_ps2_create_port() 165 io->id.type = SERIO_8042; arc_ps2_create_port() 166 io->write = arc_ps2_write; arc_ps2_create_port() 167 io->open = arc_ps2_open; arc_ps2_create_port() 168 io->close = arc_ps2_close; arc_ps2_create_port() 169 snprintf(io->name, sizeof(io->name), "ARC PS/2 port%d", index); arc_ps2_create_port() 170 snprintf(io->phys, sizeof(io->phys), "arc/serio%d", index); arc_ps2_create_port() 171 io->port_data = port; arc_ps2_create_port() 173 port->io = io; arc_ps2_create_port() 181 serio_register_port(port->io); arc_ps2_create_port() 232 serio_unregister_port(arc_ps2->port[i].io); arc_ps2_probe() 248 serio_unregister_port(arc_ps2->port[i].io); arc_ps2_remove()
|
H A D | apbps2.c | 33 #include <linux/io.h> 57 struct serio *io; member in struct:apbps2_priv 78 serio_interrupt(priv->io, data, rxflags); apbps2_isr() 86 static int apbps2_write(struct serio *io, unsigned char val) apbps2_write() argument 88 struct apbps2_priv *priv = io->port_data; apbps2_write() 106 static int apbps2_open(struct serio *io) apbps2_open() argument 108 struct apbps2_priv *priv = io->port_data; apbps2_open() 126 static void apbps2_close(struct serio *io) apbps2_close() argument 128 struct apbps2_priv *priv = io->port_data; apbps2_close() 175 priv->io = kzalloc(sizeof(struct serio), GFP_KERNEL); apbps2_of_probe() 176 if (!priv->io) apbps2_of_probe() 179 priv->io->id.type = SERIO_8042; apbps2_of_probe() 180 priv->io->open = apbps2_open; apbps2_of_probe() 181 priv->io->close = apbps2_close; apbps2_of_probe() 182 priv->io->write = apbps2_write; apbps2_of_probe() 183 priv->io->port_data = priv; apbps2_of_probe() 184 strlcpy(priv->io->name, "APBPS2 PS/2", sizeof(priv->io->name)); apbps2_of_probe() 185 snprintf(priv->io->phys, sizeof(priv->io->phys), apbps2_of_probe() 190 serio_register_port(priv->io); apbps2_of_probe() 201 serio_unregister_port(priv->io); apbps2_of_remove()
|
H A D | altera_ps2.c | 19 #include <linux/io.h> 26 struct serio *io; member in struct:ps2if 41 serio_interrupt(ps2if->io, status & 0xff, 0); altera_ps2_rxint() 51 static int altera_ps2_write(struct serio *io, unsigned char val) altera_ps2_write() argument 53 struct ps2if *ps2if = io->port_data; altera_ps2_write() 59 static int altera_ps2_open(struct serio *io) altera_ps2_open() argument 61 struct ps2if *ps2if = io->port_data; altera_ps2_open() 71 static void altera_ps2_close(struct serio *io) altera_ps2_close() argument 73 struct ps2if *ps2if = io->port_data; altera_ps2_close() 120 ps2if->io = serio; altera_ps2_probe() 124 serio_register_port(ps2if->io); altera_ps2_probe() 137 serio_unregister_port(ps2if->io); altera_ps2_remove()
|
H A D | at32psif.c | 18 #include <linux/io.h> 98 struct serio *io; member in struct:psif 123 serio_interrupt(psif->io, val, io_flags); psif_interrupt() 131 static int psif_write(struct serio *io, unsigned char val) psif_write() argument 133 struct psif *psif = io->port_data; psif_write() 155 static int psif_open(struct serio *io) psif_open() argument 157 struct psif *psif = io->port_data; psif_open() 172 static void psif_close(struct serio *io) psif_close() argument 174 struct psif *psif = io->port_data; psif_close() 207 struct serio *io; psif_probe() local 220 io = kzalloc(sizeof(struct serio), GFP_KERNEL); psif_probe() 221 if (!io) { psif_probe() 226 psif->io = io; psif_probe() 272 io->id.type = SERIO_8042; psif_probe() 273 io->write = psif_write; psif_probe() 274 io->open = psif_open; psif_probe() 275 io->close = psif_close; psif_probe() 276 snprintf(io->name, sizeof(io->name), "AVR32 PS/2 port%d", pdev->id); psif_probe() 277 snprintf(io->phys, sizeof(io->phys), "at32psif/serio%d", pdev->id); psif_probe() 278 io->port_data = psif; psif_probe() 279 io->dev.parent = &pdev->dev; psif_probe() 284 serio_register_port(psif->io); psif_probe() 297 kfree(io); psif_probe() 311 serio_unregister_port(psif->io); psif_remove()
|
H A D | pcips2.c | 21 #include <asm/io.h> 41 struct serio *io; member in struct:pcips2_data 46 static int pcips2_write(struct serio *io, unsigned char val) pcips2_write() argument 48 struct pcips2_data *ps2if = io->port_data; pcips2_write() 83 serio_interrupt(ps2if->io, scancode, flag); pcips2_interrupt() 102 static int pcips2_open(struct serio *io) pcips2_open() argument 104 struct pcips2_data *ps2if = io->port_data; pcips2_open() 120 static void pcips2_close(struct serio *io) pcips2_close() argument 122 struct pcips2_data *ps2if = io->port_data; pcips2_close() 159 ps2if->io = serio; pcips2_probe() 165 serio_register_port(ps2if->io); pcips2_probe() 182 serio_unregister_port(ps2if->io); pcips2_remove()
|
H A D | sa1111ps2.c | 22 #include <asm/io.h> 47 struct serio *io; member in struct:ps2if 80 serio_interrupt(ps2if->io, scancode, flag); ps2_rxint() 114 static int ps2_write(struct serio *io, unsigned char val) ps2_write() argument 116 struct ps2if *ps2if = io->port_data; ps2_write() 141 static int ps2_open(struct serio *io) ps2_open() argument 143 struct ps2if *ps2if = io->port_data; ps2_open() 177 static void ps2_close(struct serio *io) ps2_close() argument 179 struct ps2if *ps2if = io->port_data; ps2_close() 276 ps2if->io = serio; ps2_probe() 321 serio_register_port(ps2if->io); ps2_probe() 341 serio_unregister_port(ps2if->io); ps2_remove()
|
/linux-4.1.27/drivers/md/ |
H A D | dm-io.c | 18 #include <linux/dm-io.h> 20 #define DM_MSG_PREFIX "io" 30 * Aligning 'struct io' reduces the number of bits required to store 33 struct io { struct 84 * We need to keep track of which region a bio is doing io for. 86 * ensure the 'struct io' pointer is aligned so enough low bits are 90 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, store_io_and_region_in_bio() argument 93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { store_io_and_region_in_bio() 94 DMCRIT("Unaligned struct io pointer %p", io); store_io_and_region_in_bio() 98 bio->bi_private = (void *)((unsigned long)io | region); store_io_and_region_in_bio() 101 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, retrieve_io_and_region_from_bio() argument 106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); retrieve_io_and_region_from_bio() 111 * We need an io object to keep track of the number of bios that 112 * have been dispatched for a particular io. 114 static void complete_io(struct io *io) complete_io() argument 116 unsigned long error_bits = io->error_bits; complete_io() 117 io_notify_fn fn = io->callback; complete_io() 118 void *context = io->context; complete_io() 120 if (io->vma_invalidate_size) complete_io() 121 invalidate_kernel_vmap_range(io->vma_invalidate_address, complete_io() 122 io->vma_invalidate_size); complete_io() 124 mempool_free(io, io->client->pool); complete_io() 128 static void dec_count(struct io *io, unsigned int region, int error) dec_count() argument 131 set_bit(region, &io->error_bits); dec_count() 133 if (atomic_dec_and_test(&io->count)) dec_count() 134 complete_io(io); dec_count() 139 struct io *io; endio() local 146 * The bio destructor in bio_put() may use the io object. endio() 148 retrieve_io_and_region_from_bio(bio, &io, ®ion); endio() 152 dec_count(io, region, error); endio() 157 * destination page for io. 281 struct dpages *dp, struct io *io) do_region() 302 dec_count(io, region, -EOPNOTSUPP); do_region() 320 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); do_region() 324 store_io_and_region_in_bio(bio, io, region); do_region() 356 atomic_inc(&io->count); do_region() 363 struct io *io, int sync) dispatch_io() 380 do_region(rw, i, where + i, dp, io); dispatch_io() 385 * the io being completed too early. dispatch_io() 387 dec_count(io, 0, 0); dispatch_io() 407 struct io *io; sync_io() local 417 io = mempool_alloc(client->pool, GFP_NOIO); sync_io() 418 io->error_bits = 0; sync_io() 419 atomic_set(&io->count, 1); /* see dispatch_io() */ sync_io() 420 io->client = client; sync_io() 421 io->callback = sync_io_complete; sync_io() 422 io->context = &sio; sync_io() 424 io->vma_invalidate_address = dp->vma_invalidate_address; sync_io() 425 io->vma_invalidate_size = dp->vma_invalidate_size; sync_io() 427 dispatch_io(rw, num_regions, where, dp, io, 1); sync_io() 441 struct io *io; async_io() local 449 io = mempool_alloc(client->pool, GFP_NOIO); async_io() 450 io->error_bits = 0; async_io() 451 atomic_set(&io->count, 1); /* see dispatch_io() */ async_io() 452 io->client = client; async_io() 453 io->callback = fn; async_io() 454 io->context = context; async_io() 456 io->vma_invalidate_address = dp->vma_invalidate_address; async_io() 457 io->vma_invalidate_size = dp->vma_invalidate_size; async_io() 459 dispatch_io(rw, num_regions, where, dp, io, 0); async_io() 529 _dm_io_cache = KMEM_CACHE(io, 0); dm_io_init() 280 do_region(int rw, unsigned region, struct dm_io_region *where, struct dpages *dp, struct io *io) do_region() argument 361 dispatch_io(int rw, unsigned int num_regions, struct dm_io_region *where, struct dpages *dp, struct io *io, int sync) dispatch_io() argument
|
H A D | dm-verity.c | 119 static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) io_hash_desc() argument 121 return (struct shash_desc *)(io + 1); io_hash_desc() 124 static u8 *io_real_digest(struct dm_verity *v, struct dm_verity_io *io) io_real_digest() argument 126 return (u8 *)(io + 1) + v->shash_descsize; io_real_digest() 129 static u8 *io_want_digest(struct dm_verity *v, struct dm_verity_io *io) io_want_digest() argument 131 return (u8 *)(io + 1) + v->shash_descsize + v->digest_size; io_want_digest() 253 * On successful return, io_want_digest(v, io) contains the hash value for 258 * against current value of io_want_digest(v, io). 260 static int verity_verify_level(struct dm_verity_io *io, sector_t block, verity_verify_level() argument 263 struct dm_verity *v = io->v; verity_verify_level() 288 desc = io_hash_desc(v, io); verity_verify_level() 319 result = io_real_digest(v, io); verity_verify_level() 325 if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) { verity_verify_level() 337 memcpy(io_want_digest(v, io), data, v->digest_size); verity_verify_level() 351 static int verity_verify_io(struct dm_verity_io *io) verity_verify_io() argument 353 struct dm_verity *v = io->v; verity_verify_io() 354 struct bio *bio = dm_bio_from_per_bio_data(io, verity_verify_io() 359 for (b = 0; b < io->n_blocks; b++) { verity_verify_io() 373 int r = verity_verify_level(io, io->block + b, 0, true); verity_verify_io() 380 memcpy(io_want_digest(v, io), v->root_digest, v->digest_size); verity_verify_io() 383 int r = verity_verify_level(io, io->block + b, i, false); verity_verify_io() 389 desc = io_hash_desc(v, io); verity_verify_io() 409 struct bio_vec bv = bio_iter_iovec(bio, io->iter); verity_verify_io() 423 bio_advance_iter(bio, &io->iter, len); verity_verify_io() 435 result = io_real_digest(v, io); verity_verify_io() 441 if (unlikely(memcmp(result, io_want_digest(v, io), v->digest_size))) { verity_verify_io() 443 io->block + b)) verity_verify_io() 452 * End one "io" structure with a given error. 454 static void verity_finish_io(struct dm_verity_io *io, int error) verity_finish_io() argument 456 struct dm_verity *v = io->v; verity_finish_io() 457 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size); verity_finish_io() 459 bio->bi_end_io = io->orig_bi_end_io; verity_finish_io() 460 bio->bi_private = io->orig_bi_private; verity_finish_io() 467 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work); verity_work() local 469 verity_finish_io(io, verity_verify_io(io)); verity_work() 474 struct dm_verity_io *io = bio->bi_private; verity_end_io() local 477 verity_finish_io(io, error); verity_end_io() 481 INIT_WORK(&io->work, verity_work); verity_end_io() 482 queue_work(io->v->verify_wq, &io->work); verity_end_io() 486 * Prefetch buffers for the specified io. 525 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) verity_submit_prefetch() argument 537 pw->block = io->block; verity_submit_prefetch() 538 pw->n_blocks = io->n_blocks; verity_submit_prefetch() 549 struct dm_verity_io *io; verity_map() local 556 DMERR_LIMIT("unaligned io"); verity_map() 562 DMERR_LIMIT("io out of range"); verity_map() 569 io = dm_per_bio_data(bio, ti->per_bio_data_size); verity_map() 570 io->v = v; verity_map() 571 io->orig_bi_end_io = bio->bi_end_io; verity_map() 572 io->orig_bi_private = bio->bi_private; verity_map() 573 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); verity_map() 574 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; verity_map() 577 bio->bi_private = io; verity_map() 578 io->iter = bio->bi_iter; verity_map() 580 verity_submit_prefetch(v, io); verity_map()
|
H A D | dm-crypt.c | 185 static void kcryptd_queue_crypt(struct dm_crypt_io *io); 902 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); crypt_free_req() local 904 if ((struct ablkcipher_request *)(io + 1) != req) crypt_free_req() 973 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) crypt_alloc_buffer() argument 975 struct crypt_config *cc = io->cc; crypt_alloc_buffer() 991 clone_init(io, clone); crypt_alloc_buffer() 1035 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, crypt_io_init() argument 1038 io->cc = cc; crypt_io_init() 1039 io->base_bio = bio; crypt_io_init() 1040 io->sector = sector; crypt_io_init() 1041 io->error = 0; crypt_io_init() 1042 io->ctx.req = NULL; crypt_io_init() 1043 atomic_set(&io->io_pending, 0); crypt_io_init() 1046 static void crypt_inc_pending(struct dm_crypt_io *io) crypt_inc_pending() argument 1048 atomic_inc(&io->io_pending); crypt_inc_pending() 1055 static void crypt_dec_pending(struct dm_crypt_io *io) crypt_dec_pending() argument 1057 struct crypt_config *cc = io->cc; crypt_dec_pending() 1058 struct bio *base_bio = io->base_bio; crypt_dec_pending() 1059 int error = io->error; crypt_dec_pending() 1061 if (!atomic_dec_and_test(&io->io_pending)) crypt_dec_pending() 1064 if (io->ctx.req) crypt_dec_pending() 1065 crypt_free_req(cc, io->ctx.req, base_bio); crypt_dec_pending() 1089 struct dm_crypt_io *io = clone->bi_private; crypt_endio() local 1090 struct crypt_config *cc = io->cc; crypt_endio() 1105 kcryptd_queue_crypt(io); crypt_endio() 1110 io->error = error; crypt_endio() 1112 crypt_dec_pending(io); crypt_endio() 1115 static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone_init() argument 1117 struct crypt_config *cc = io->cc; clone_init() 1119 clone->bi_private = io; clone_init() 1122 clone->bi_rw = io->base_bio->bi_rw; clone_init() 1125 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) kcryptd_io_read() argument 1127 struct crypt_config *cc = io->cc; kcryptd_io_read() 1136 clone = bio_clone_fast(io->base_bio, gfp, cc->bs); kcryptd_io_read() 1140 crypt_inc_pending(io); kcryptd_io_read() 1142 clone_init(io, clone); kcryptd_io_read() 1143 clone->bi_iter.bi_sector = cc->start + io->sector; kcryptd_io_read() 1151 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); kcryptd_io_read_work() local 1153 crypt_inc_pending(io); kcryptd_io_read_work() 1154 if (kcryptd_io_read(io, GFP_NOIO)) kcryptd_io_read_work() 1155 io->error = -ENOMEM; kcryptd_io_read_work() 1156 crypt_dec_pending(io); kcryptd_io_read_work() 1159 static void kcryptd_queue_read(struct dm_crypt_io *io) kcryptd_queue_read() argument 1161 struct crypt_config *cc = io->cc; kcryptd_queue_read() 1163 INIT_WORK(&io->work, kcryptd_io_read_work); kcryptd_queue_read() 1164 queue_work(cc->io_queue, &io->work); kcryptd_queue_read() 1167 static void kcryptd_io_write(struct dm_crypt_io *io) kcryptd_io_write() argument 1169 struct bio *clone = io->ctx.bio_out; kcryptd_io_write() 1179 struct dm_crypt_io *io; dmcrypt_write() local 1224 io = crypt_io_from_node(rb_first(&write_tree)); dmcrypt_write() 1225 rb_erase(&io->rb_node, &write_tree); dmcrypt_write() 1226 kcryptd_io_write(io); dmcrypt_write() 1233 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) kcryptd_crypt_write_io_submit() argument 1235 struct bio *clone = io->ctx.bio_out; kcryptd_crypt_write_io_submit() 1236 struct crypt_config *cc = io->cc; kcryptd_crypt_write_io_submit() 1241 if (unlikely(io->error < 0)) { kcryptd_crypt_write_io_submit() 1244 crypt_dec_pending(io); kcryptd_crypt_write_io_submit() 1249 BUG_ON(io->ctx.iter_out.bi_size); kcryptd_crypt_write_io_submit() 1251 clone->bi_iter.bi_sector = cc->start + io->sector; kcryptd_crypt_write_io_submit() 1261 sector = io->sector; kcryptd_crypt_write_io_submit() 1269 rb_link_node(&io->rb_node, parent, rbp); kcryptd_crypt_write_io_submit() 1270 rb_insert_color(&io->rb_node, &cc->write_tree); kcryptd_crypt_write_io_submit() 1276 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) kcryptd_crypt_write_convert() argument 1278 struct crypt_config *cc = io->cc; kcryptd_crypt_write_convert() 1281 sector_t sector = io->sector; kcryptd_crypt_write_convert() 1285 * Prevent io from disappearing until this function completes. kcryptd_crypt_write_convert() 1287 crypt_inc_pending(io); kcryptd_crypt_write_convert() 1288 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); kcryptd_crypt_write_convert() 1290 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); kcryptd_crypt_write_convert() 1292 io->error = -EIO; kcryptd_crypt_write_convert() 1296 io->ctx.bio_out = clone; kcryptd_crypt_write_convert() 1297 io->ctx.iter_out = clone->bi_iter; kcryptd_crypt_write_convert() 1301 crypt_inc_pending(io); kcryptd_crypt_write_convert() 1302 r = crypt_convert(cc, &io->ctx); kcryptd_crypt_write_convert() 1304 io->error = -EIO; kcryptd_crypt_write_convert() 1305 crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); kcryptd_crypt_write_convert() 1307 /* Encryption was already finished, submit io now */ kcryptd_crypt_write_convert() 1309 kcryptd_crypt_write_io_submit(io, 0); kcryptd_crypt_write_convert() 1310 io->sector = sector; kcryptd_crypt_write_convert() 1314 crypt_dec_pending(io); kcryptd_crypt_write_convert() 1317 static void kcryptd_crypt_read_done(struct dm_crypt_io *io) kcryptd_crypt_read_done() argument 1319 crypt_dec_pending(io); kcryptd_crypt_read_done() 1322 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) kcryptd_crypt_read_convert() argument 1324 struct crypt_config *cc = io->cc; kcryptd_crypt_read_convert() 1327 crypt_inc_pending(io); kcryptd_crypt_read_convert() 1329 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, kcryptd_crypt_read_convert() 1330 io->sector); kcryptd_crypt_read_convert() 1332 r = crypt_convert(cc, &io->ctx); kcryptd_crypt_read_convert() 1334 io->error = -EIO; kcryptd_crypt_read_convert() 1336 if (atomic_dec_and_test(&io->ctx.cc_pending)) kcryptd_crypt_read_convert() 1337 kcryptd_crypt_read_done(io); kcryptd_crypt_read_convert() 1339 crypt_dec_pending(io); kcryptd_crypt_read_convert() 1347 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); kcryptd_async_done() local 1348 struct crypt_config *cc = io->cc; kcryptd_async_done() 1359 io->error = -EIO; kcryptd_async_done() 1361 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); kcryptd_async_done() 1366 if (bio_data_dir(io->base_bio) == READ) kcryptd_async_done() 1367 kcryptd_crypt_read_done(io); kcryptd_async_done() 1369 kcryptd_crypt_write_io_submit(io, 1); kcryptd_async_done() 1374 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); kcryptd_crypt() local 1376 if (bio_data_dir(io->base_bio) == READ) kcryptd_crypt() 1377 kcryptd_crypt_read_convert(io); kcryptd_crypt() 1379 kcryptd_crypt_write_convert(io); kcryptd_crypt() 1382 static void kcryptd_queue_crypt(struct dm_crypt_io *io) kcryptd_queue_crypt() argument 1384 struct crypt_config *cc = io->cc; kcryptd_queue_crypt() 1386 INIT_WORK(&io->work, kcryptd_crypt); kcryptd_queue_crypt() 1387 queue_work(cc->crypt_queue, &io->work); kcryptd_queue_crypt() 1847 ti->error = "Couldn't create kcryptd io queue"; crypt_ctr() 1885 struct dm_crypt_io *io; crypt_map() local 1901 io = dm_per_bio_data(bio, cc->per_bio_data_size); crypt_map() 1902 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); crypt_map() 1903 io->ctx.req = (struct ablkcipher_request *)(io + 1); crypt_map() 1905 if (bio_data_dir(io->base_bio) == READ) { crypt_map() 1906 if (kcryptd_io_read(io, GFP_NOWAIT)) crypt_map() 1907 kcryptd_queue_read(io); crypt_map() 1909 kcryptd_queue_crypt(io); crypt_map()
|
/linux-4.1.27/drivers/media/radio/ |
H A D | radio-rtrack2.c | 21 #include <linux/io.h> /* outb, outb_p */ 38 static int io[RTRACK2_MAX] = { [0] = CONFIG_RADIO_RTRACK2_PORT, variable 42 module_param_array(io, int, NULL, 0444); 43 MODULE_PARM_DESC(io, "I/O addresses of the RadioTrack card (0x20f or 0x30f)"); 54 outb_p(1, isa->io); zero() 55 outb_p(3, isa->io); zero() 56 outb_p(1, isa->io); zero() 61 outb_p(5, isa->io); one() 62 outb_p(7, isa->io); one() 63 outb_p(5, isa->io); one() 72 outb_p(0xc8, isa->io); rtrack2_s_frequency() 73 outb_p(0xc9, isa->io); rtrack2_s_frequency() 74 outb_p(0xc9, isa->io); rtrack2_s_frequency() 85 outb_p(0xc8, isa->io); rtrack2_s_frequency() 86 outb_p(v4l2_ctrl_g_ctrl(isa->mute), isa->io); rtrack2_s_frequency() 93 return (inb(isa->io) & 2) ? 0 : 0xffff; rtrack2_g_signal() 98 outb(mute, isa->io); rtrack2_s_mute_volume() 120 .io_params = io,
|
H A D | radio-cadet.c | 41 #include <linux/io.h> /* outb, outb_p */ 53 static int io = -1; /* default to isapnp activation */ variable 56 module_param(io, int, 0); 57 MODULE_PARM_DESC(io, "I/O address of Cadet card (0x330,0x332,0x334,0x336,0x338,0x33a,0x33c,0x33e)"); 68 int io; member in struct:cadet 121 outb(7, dev->io); /* Select tuner control */ cadet_getstereo() 122 if ((inb(dev->io + 1) & 0x40) == 0) cadet_getstereo() 136 outb(7, dev->io); /* Select tuner control */ cadet_gettune() 137 curvol = inb(dev->io + 1); /* Save current volume/mute setting */ cadet_gettune() 138 outb(0x00, dev->io + 1); /* Ensure WRITE-ENABLE is LOW */ cadet_gettune() 145 fifo = (fifo << 1) | ((inb(dev->io + 1) >> 7) & 0x01); cadet_gettune() 147 outb(0x01, dev->io + 1); cadet_gettune() 148 dev->tunestat &= inb(dev->io + 1); cadet_gettune() 149 outb(0x00, dev->io + 1); cadet_gettune() 156 outb(curvol, dev->io + 1); cadet_gettune() 193 outb(7, dev->io); /* Select tuner control */ cadet_settune() 200 outb(7, dev->io); /* Select tuner control */ cadet_settune() 201 outb(test, dev->io + 1); /* Initialize for write */ cadet_settune() 204 outb(test, dev->io + 1); cadet_settune() 206 outb(test, dev->io + 1); cadet_settune() 209 outb(test, dev->io + 1); cadet_settune() 247 outb(7, dev->io); /* Select tuner control */ cadet_setfreq() 248 curvol = inb(dev->io + 1); cadet_setfreq() 256 outb(7, dev->io); /* Select tuner control */ cadet_setfreq() 257 outb(curvol, dev->io + 1); cadet_setfreq() 269 outb(3, dev->io); cadet_setfreq() 270 outb(inb(dev->io + 1) & 0x7f, dev->io + 1); cadet_setfreq() 290 outb(0x3, dev->io); /* Select RDS Decoder Control */ cadet_handler() 291 if ((inb(dev->io + 1) & 0x20) != 0) cadet_handler() 293 outb(0x80, dev->io); /* Select RDS fifo */ cadet_handler() 295 while ((inb(dev->io) & 0x80) != 0) { cadet_handler() 296 dev->rdsbuf[dev->rdsin] = inb(dev->io + 1); cadet_handler() 322 outb(0x80, dev->io); /* Select RDS fifo */ cadet_start_rds() 384 outb(3, dev->io); vidioc_g_tuner() 385 outb(inb(dev->io + 1) & 0x7f, dev->io + 1); vidioc_g_tuner() 387 outb(3, dev->io); vidioc_g_tuner() 388 if (inb(dev->io + 1) & 0x80) vidioc_g_tuner() 449 outb(7, dev->io); /* Select tuner control */ cadet_s_ctrl() 451 outb(0x00, dev->io + 1); cadet_s_ctrl() 453 outb(0x20, dev->io + 1); cadet_s_ctrl() 548 if (io > 0) cadet_pnp_probe() 554 io = pnp_port_start(dev, 0); cadet_pnp_probe() 556 printk(KERN_INFO "radio-cadet: PnP reports device at %#x\n", io); cadet_pnp_probe() 558 return io; cadet_pnp_probe() 578 dev->io = iovals[i]; cadet_probe() 579 if (request_region(dev->io, 2, "cadet-probe")) { cadet_probe() 582 release_region(dev->io, 2); cadet_probe() 585 release_region(dev->io, 2); cadet_probe() 588 dev->io = -1; cadet_probe() 592 * io should only be set if the user has used something like 607 if (io < 0) cadet_init() 609 dev->io = io; cadet_init() 612 if (dev->io < 0) cadet_init() 616 if (dev->io < 0) { cadet_init() 618 v4l2_err(v4l2_dev, "you must set an I/O address with io=0x330, 0x332, 0x334,\n"); cadet_init() 623 if (!request_region(dev->io, 2, "cadet")) cadet_init() 628 release_region(dev->io, 2); cadet_init() 658 v4l2_info(v4l2_dev, "ADS Cadet Radio Card at 0x%x\n", dev->io); cadet_init() 663 release_region(dev->io, 2); cadet_init() 676 outb(7, dev->io); /* Mute */ cadet_exit() 677 outb(0x00, dev->io + 1); cadet_exit() 678 release_region(dev->io, 2); cadet_exit()
|
H A D | radio-sf16fmi.c | 27 #include <linux/io.h> /* outb, outb_p */ 39 static int io = -1; variable 42 module_param(io, int, 0); 43 MODULE_PARM_DESC(io, "I/O address of the SF16-FMI/SF16-FMP/SF16-FMD card (0x284 or 0x384)"); 51 int io; member in struct:fmi 86 outb_p(bits, fmi->io); fmi_set_pins() 93 outb(0x00, fmi->io); fmi_mute() 100 outb(0x08, fmi->io); fmi_unmute() 111 outb(val, fmi->io); fmi_getsigstr() 112 outb(val | 0x10, fmi->io); fmi_getsigstr() 114 res = (int)inb(fmi->io + 1); fmi_getsigstr() 115 outb(val, fmi->io); fmi_getsigstr() 281 if (io < 0) { fmi_init() 283 io = probe_ports[i]; fmi_init() 284 if (io == 0) { fmi_init() 285 io = isapnp_fmi_probe(); fmi_init() 286 if (io < 0) fmi_init() 290 if (!request_region(io, 2, "radio-sf16fmi")) { fmi_init() 293 io = -1; fmi_init() 297 ((inb(io) & 0xf9) == 0xf9 && (inb(io) & 0x4) == 0)) fmi_init() 299 release_region(io, 2); fmi_init() 300 io = -1; fmi_init() 303 if (!request_region(io, 2, "radio-sf16fmi")) { fmi_init() 304 printk(KERN_ERR "radio-sf16fmi: port %#x already in use\n", io); fmi_init() 307 if (inb(io) == 0xff) { fmi_init() 308 printk(KERN_ERR "radio-sf16fmi: card not present at %#x\n", io); fmi_init() 309 release_region(io, 2); fmi_init() 313 if (io < 0) { fmi_init() 319 fmi->io = io; fmi_init() 323 release_region(fmi->io, 2); fmi_init() 359 release_region(fmi->io, 2); fmi_init() 365 v4l2_info(v4l2_dev, "card driver at 0x%x\n", fmi->io); fmi_init() 376 release_region(fmi->io, 2); fmi_exit()
|
H A D | radio-zoltrix.c | 47 #include <linux/io.h> /* outb, outb_p */ 64 static int io[ZOLTRIX_MAX] = { [0] = CONFIG_RADIO_ZOLTRIX_PORT, variable 68 module_param_array(io, int, NULL, 0444); 69 MODULE_PARM_DESC(io, "I/O addresses of the Zoltrix Radio Plus card (0x20c or 0x30c)"); 93 outb(0, isa->io); zoltrix_s_mute_volume() 94 outb(0, isa->io); zoltrix_s_mute_volume() 95 inb(isa->io + 3); /* Zoltrix needs to be read to confirm */ zoltrix_s_mute_volume() 99 outb(vol - 1, isa->io); zoltrix_s_mute_volume() 101 inb(isa->io + 2); zoltrix_s_mute_volume() 125 outb(0, isa->io); zoltrix_s_frequency() 126 outb(0, isa->io); zoltrix_s_frequency() 127 inb(isa->io + 3); /* Zoltrix needs to be read to confirm */ zoltrix_s_frequency() 129 outb(0x40, isa->io); zoltrix_s_frequency() 130 outb(0xc0, isa->io); zoltrix_s_frequency() 135 outb(0x80, isa->io); zoltrix_s_frequency() 137 outb(0x00, isa->io); zoltrix_s_frequency() 139 outb(0x80, isa->io); zoltrix_s_frequency() 142 outb(0xc0, isa->io); zoltrix_s_frequency() 144 outb(0x40, isa->io); zoltrix_s_frequency() 146 outb(0xc0, isa->io); zoltrix_s_frequency() 152 outb(0x80, isa->io); zoltrix_s_frequency() 153 outb(0xc0, isa->io); zoltrix_s_frequency() 154 outb(0x40, isa->io); zoltrix_s_frequency() 156 inb(isa->io + 2); zoltrix_s_frequency() 168 outb(0x00, isa->io); /* This stuff I found to do nothing */ zoltrix_g_rxsubchans() 169 outb(zol->curvol, isa->io); zoltrix_g_rxsubchans() 172 a = inb(isa->io); zoltrix_g_rxsubchans() 174 b = inb(isa->io); zoltrix_g_rxsubchans() 185 outb(0x00, isa->io); /* This stuff I found to do nothing */ zoltrix_g_signal() 186 outb(zol->curvol, isa->io); zoltrix_g_signal() 189 a = inb(isa->io); zoltrix_g_signal() 191 b = inb(isa->io); zoltrix_g_signal() 196 /* I found this out by playing with a binary scanner on the card io */ zoltrix_g_signal() 225 .io_params = io,
|
H A D | radio-trust.c | 23 #include <linux/io.h> 42 static int io[TRUST_MAX] = { [0] = CONFIG_RADIO_TRUST_PORT, variable 46 module_param_array(io, int, NULL, 0444); 47 MODULE_PARM_DESC(io, "I/O addresses of the Trust FM Radio card (0x350 or 0x358)"); 67 #define TR_DELAY do { inb(tr->isa.io); inb(tr->isa.io); inb(tr->isa.io); } while (0) 68 #define TR_SET_SCL outb(tr->ioval |= 2, tr->isa.io) 69 #define TR_CLR_SCL outb(tr->ioval &= 0xfd, tr->isa.io) 70 #define TR_SET_SDA outb(tr->ioval |= 1, tr->isa.io) 71 #define TR_CLR_SDA outb(tr->ioval &= 0xfe, tr->isa.io) 124 outb(tr->ioval, isa->io); trust_s_mute_volume() 134 outb(tr->ioval, isa->io); trust_s_stereo() 143 v |= inb(isa->io); trust_g_signal() 221 .io_params = io,
|
H A D | radio-aimslab.c | 34 #include <linux/io.h> /* outb, outb_p */ 53 static int io[RTRACK_MAX] = { [0] = CONFIG_RADIO_RTRACK_PORT, variable 57 module_param_array(io, int, NULL, 0444); 58 MODULE_PARM_DESC(io, "I/O addresses of the RadioTrack card (0x20f or 0x30f)"); 101 outb_p(bits, rt->isa.io); rtrack_set_pins() 114 return 0xffff * !(inb(isa->io) & 2); rtrack_g_signal() 123 outb(0xd0, isa->io); /* volume steady + sigstr + off */ rtrack_s_mute_volume() 127 outb(0x48, isa->io); /* volume down but still "on" */ rtrack_s_mute_volume() 130 outb(0x98, isa->io); /* volume up + sigstr + on */ rtrack_s_mute_volume() 134 outb(0x58, isa->io); /* volume down + sigstr + on */ rtrack_s_mute_volume() 138 outb(0xd8, isa->io); /* volume steady + sigstr + on */ rtrack_s_mute_volume() 147 outb(0x90, isa->io); /* volume up but still "on" */ rtrack_initialize() 149 outb(0xc0, isa->io); /* steady volume, mute card */ rtrack_initialize() 172 .io_params = io,
|
H A D | radio-aztech.c | 23 #include <linux/io.h> /* outb, outb_p */ 43 static int io[AZTECH_MAX] = { [0] = CONFIG_RADIO_AZTECH_PORT, variable 48 module_param_array(io, int, NULL, 0444); 49 MODULE_PARM_DESC(io, "I/O addresses of the Aztech card (0x350 or 0x358)"); 80 outb_p(bits, az->isa.io); aztech_set_pins() 99 if (inb(isa->io) & AZTECH_BIT_MONO) aztech_g_rxsubchans() 106 return (inb(isa->io) & AZTECH_BIT_NOT_TUNED) ? 0 : 0xffff; aztech_g_signal() 116 outb(az->curvol, isa->io); aztech_s_mute_volume() 139 .io_params = io,
|
H A D | radio-terratec.c | 28 #include <linux/io.h> /* outb, outb_p */ 42 static int io = 0x590; variable 70 outb(0x80, isa->io + 1); terratec_s_mute_volume() 72 outb(0x00, isa->io + 1); terratec_s_mute_volume() 111 outb(WRT_EN | DATA, isa->io); terratec_s_frequency() 112 outb(WRT_EN | DATA | CLK_ON, isa->io); terratec_s_frequency() 113 outb(WRT_EN | DATA, isa->io); terratec_s_frequency() 115 outb(WRT_EN | 0x00, isa->io); terratec_s_frequency() 116 outb(WRT_EN | 0x00 | CLK_ON, isa->io); terratec_s_frequency() 119 outb(0x00, isa->io); terratec_s_frequency() 126 return (inb(isa->io) & 2) ? 0 : 0xffff; terratec_g_signal() 147 .io_params = &io,
|
H A D | radio-sf16fmr2.c | 14 #include <linux/io.h> /* outb, outb_p */ 31 int io; member in struct:fmr2 69 outb(bits, fmr2->io); fmr2_tea575x_set_pins() 75 u8 bits = inb(fmr2->io); fmr2_tea575x_get_pins() 120 outb(pins, fmr2->io); tc9154a_set_pins() 188 if (!fmr2->is_fmd2 && inb(fmr2->io) & FMR2_HASVOL) { fmr2_tea_ext_init() 206 static int fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io) fmr2_probe() argument 213 if (io == fmr2_cards[i]->io) fmr2_probe() 218 fmr2->io = io; fmr2_probe() 220 if (!request_region(fmr2->io, 2, fmr2->v4l2_dev.name)) { fmr2_probe() 221 printk(KERN_ERR "radio-sf16fmr2: I/O port 0x%x already in use\n", fmr2->io); fmr2_probe() 229 release_region(fmr2->io, 2); fmr2_probe() 243 release_region(fmr2->io, 2); fmr2_probe() 248 card_name, fmr2->io); fmr2_probe() 290 release_region(fmr2->io, 2); fmr2_remove()
|
H A D | radio-typhoon.c | 35 #include <linux/io.h> /* outb, outb_p */ 58 static int io[TYPHOON_MAX] = { [0] = CONFIG_RADIO_TYPHOON_PORT, variable 63 module_param_array(io, int, NULL, 0444); 64 MODULE_PARM_DESC(io, "I/O addresses of the Typhoon card (0x316 or 0x336)"); 104 outb_p((outval >> 8) & 0x01, isa->io + 4); typhoon_s_frequency() 105 outb_p(outval >> 9, isa->io + 6); typhoon_s_frequency() 106 outb_p(outval & 0xff, isa->io + 8); typhoon_s_frequency() 118 outb_p(vol / 2, isa->io); /* Set the volume, high bit. */ typhoon_s_mute_volume() 119 outb_p(vol % 2, isa->io + 2); /* Set the volume, low bit. */ typhoon_s_mute_volume() 149 .io_params = io,
|
H A D | radio-isa.c | 28 #include <linux/io.h> 149 v4l2_info(&isa->v4l2_dev, "I/O Port = 0x%03x\n", isa->io); radio_isa_log_status() 185 static bool radio_isa_valid_io(const struct radio_isa_driver *drv, int io) radio_isa_valid_io() argument 190 if (drv->io_ports[i] == io) radio_isa_valid_io() 220 if (!request_region(isa->io, region_size, v4l2_dev->name)) { radio_isa_common_probe() 221 v4l2_err(v4l2_dev, "port 0x%x already in use\n", isa->io); radio_isa_common_probe() 280 drv->card, isa->io); radio_isa_common_probe() 286 release_region(isa->io, region_size); radio_isa_common_probe() 300 release_region(isa->io, region_size); radio_isa_common_remove() 316 isa->io = drv->io_params[dev]; radio_isa_probe() 323 int io = drv->io_ports[i]; radio_isa_probe() local 325 if (request_region(io, drv->region_size, v4l2_dev->name)) { radio_isa_probe() 326 bool found = ops->probe(isa, io); radio_isa_probe() 328 release_region(io, drv->region_size); radio_isa_probe() 330 isa->io = io; radio_isa_probe() 337 if (!radio_isa_valid_io(drv, isa->io)) { radio_isa_probe() 340 if (isa->io < 0) radio_isa_probe() 342 v4l2_err(v4l2_dev, "you must set an I/O address with io=0x%03x", radio_isa_probe() 379 isa->io = pnp_port_start(dev, 0); radio_isa_pnp_probe()
|
H A D | radio-gemtek.c | 31 #include <linux/io.h> /* outb, outb_p */ 62 static int io[GEMTEK_MAX] = { [0] = CONFIG_RADIO_GEMTEK_PORT, variable 73 module_param_array(io, int, NULL, 0444); 74 MODULE_PARM_DESC(io, "Force I/O ports for the GemTek Radio card if automatic " 160 outb_p(mute | GEMTEK_CE | GEMTEK_DA | GEMTEK_CK, isa->io); gemtek_bu2614_transmit() 165 outb_p(mute | GEMTEK_CE | bit, isa->io); gemtek_bu2614_transmit() 167 outb_p(mute | GEMTEK_CE | bit | GEMTEK_CK, isa->io); gemtek_bu2614_transmit() 171 outb_p(mute | GEMTEK_DA | GEMTEK_CK, isa->io); gemtek_bu2614_transmit() 241 i = inb_p(isa->io); gemtek_s_mute_volume() 243 outb_p((i >> 5) | (mute ? GEMTEK_MT : 0), isa->io); gemtek_s_mute_volume() 250 if (inb_p(isa->io) & GEMTEK_NS) gemtek_g_rxsubchans() 258 static bool gemtek_probe(struct radio_isa_card *isa, int io) gemtek_probe() argument 262 q = inb_p(io); /* Read bus contents before probing. */ gemtek_probe() 266 outb_p(1 << i, io); gemtek_probe() 269 if ((inb_p(io) & ~GEMTEK_NS) != (0x17 | (1 << (i + 5)))) gemtek_probe() 272 outb_p(q >> 5, io); /* Write bus contents back. */ gemtek_probe() 314 .io_params = io,
|
H A D | radio-maxiradio.c | 43 #include <linux/io.h> 75 u16 io; /* base of radio io */ member in struct:maxiradio 93 outb(bits, dev->io); maxiradio_tea575x_set_pins() 101 u8 bits = inb(dev->io); maxiradio_tea575x_get_pins() 160 dev->io = pci_resource_start(pdev, 0); maxiradio_probe() 183 outb(0, dev->io); maxiradio_remove()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
H A D | vvp_io.c | 54 * True, if \a io is a normal io, False for splice_{read,write} 56 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) cl_is_normalio() argument 60 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); cl_is_normalio() 71 static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, can_populate_pages() argument 78 switch (io->ci_type) { can_populate_pages() 84 io->ci_need_restart = 1; can_populate_pages() 86 io->ci_continue = 0; can_populate_pages() 100 * io operations. 118 struct cl_io *io = ios->cis_io; vvp_io_fini() local 119 struct cl_object *obj = io->ci_obj; vvp_io_fini() 127 io->ci_ignore_layout, io->ci_verify_layout, vvp_io_fini() 128 cio->cui_layout_gen, io->ci_restore_needed); vvp_io_fini() 130 if (io->ci_restore_needed == 1) { vvp_io_fini() 134 * before finishing the io vvp_io_fini() 146 io->ci_restore_needed = 0; vvp_io_fini() 147 io->ci_need_restart = 1; vvp_io_fini() 148 io->ci_verify_layout = 1; vvp_io_fini() 150 io->ci_restore_needed = 1; vvp_io_fini() 151 io->ci_need_restart = 0; vvp_io_fini() 152 io->ci_verify_layout = 0; vvp_io_fini() 153 io->ci_result = rc; vvp_io_fini() 157 if (!io->ci_ignore_layout && io->ci_verify_layout) { vvp_io_fini() 162 io->ci_need_restart = cio->cui_layout_gen != gen; vvp_io_fini() 163 if (io->ci_need_restart) { vvp_io_fini() 180 struct cl_io *io = ios->cis_io; vvp_io_fault_fini() local 181 struct cl_page *page = io->u.ci_fault.ft_page; vvp_io_fault_fini() 183 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); vvp_io_fault_fini() 186 lu_ref_del(&page->cp_reference, "fault", io); vvp_io_fault_fini() 188 io->u.ci_fault.ft_page = NULL; vvp_io_fault_fini() 206 struct ccc_io *vio, struct cl_io *io) vvp_mmap_locks() 219 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); vvp_mmap_locks() 221 if (!cl_is_normalio(env, io)) vvp_mmap_locks() 255 * io only ever reads user level buffer, and CIT_READ vvp_mmap_locks() 266 result = cl_io_lock_alloc_add(env, io, descr); vvp_mmap_locks() 288 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io, vvp_io_rw_lock() argument 295 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); vvp_io_rw_lock() 297 ccc_io_update_iov(env, cio, io); vvp_io_rw_lock() 299 if (io->u.ci_rw.crw_nonblock) vvp_io_rw_lock() 301 result = vvp_mmap_locks(env, cio, io); vvp_io_rw_lock() 303 result = ccc_io_one_lock(env, io, ast_flags, mode, start, end); vvp_io_rw_lock() 310 struct cl_io *io = ios->cis_io; vvp_io_read_lock() local 311 struct cl_io_rw_common *rd = &io->u.ci_rd.rd; vvp_io_read_lock() 314 result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos, vvp_io_read_lock() 323 struct cl_io *io = ios->cis_io; vvp_io_fault_lock() local 329 (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma), vvp_io_fault_lock() 330 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index); vvp_io_fault_lock() 336 struct cl_io *io = ios->cis_io; vvp_io_write_lock() local 340 if (io->u.ci_wr.wr_append) { vvp_io_write_lock() 344 start = io->u.ci_wr.wr.crw_pos; vvp_io_write_lock() 345 end = start + io->u.ci_wr.wr.crw_count - 1; vvp_io_write_lock() 347 return vvp_io_rw_lock(env, io, CLM_WRITE, start, end); vvp_io_write_lock() 357 * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io. 359 * Handles "lockless io" mode when extent locking is done by server. 365 struct cl_io *io = ios->cis_io; vvp_io_setattr_lock() local 369 if (cl_io_is_trunc(io)) { vvp_io_setattr_lock() 370 new_size = io->u.ci_setattr.sa_attr.lvb_size; vvp_io_setattr_lock() 374 if ((io->u.ci_setattr.sa_attr.lvb_mtime >= vvp_io_setattr_lock() 375 io->u.ci_setattr.sa_attr.lvb_ctime) || vvp_io_setattr_lock() 376 (io->u.ci_setattr.sa_attr.lvb_atime >= vvp_io_setattr_lock() 377 io->u.ci_setattr.sa_attr.lvb_ctime)) vvp_io_setattr_lock() 382 return ccc_io_one_lock(env, io, enqflags, CLM_WRITE, vvp_io_setattr_lock() 414 struct cl_io *io = ios->cis_io; vvp_io_setattr_time() local 415 struct cl_object *obj = io->ci_obj; vvp_io_setattr_time() 421 attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime; vvp_io_setattr_time() 422 if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) { vvp_io_setattr_time() 423 attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime; vvp_io_setattr_time() 426 if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) { vvp_io_setattr_time() 427 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime; vvp_io_setattr_time() 439 struct cl_io *io = ios->cis_io; vvp_io_setattr_start() local 440 struct inode *inode = ccc_object_inode(io->ci_obj); vvp_io_setattr_start() 444 if (cl_io_is_trunc(io)) vvp_io_setattr_start() 446 io->u.ci_setattr.sa_attr.lvb_size); vvp_io_setattr_start() 455 struct cl_io *io = ios->cis_io; vvp_io_setattr_end() local 456 struct inode *inode = ccc_object_inode(io->ci_obj); vvp_io_setattr_end() 458 if (cl_io_is_trunc(io)) { vvp_io_setattr_end() 461 vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); vvp_io_setattr_end() 478 struct cl_io *io = ios->cis_io; vvp_io_read_start() local 479 struct cl_object *obj = io->ci_obj; vvp_io_read_start() 485 loff_t pos = io->u.ci_rd.rd.crw_pos; vvp_io_read_start() 486 long cnt = io->u.ci_rd.rd.crw_count; vvp_io_read_start() 494 if (!can_populate_pages(env, io, inode)) vvp_io_read_start() 497 result = ccc_prep_size(env, obj, io, pos, tot, &exceed); vvp_io_read_start() 535 io->ci_continue = 0; vvp_io_read_start() 545 io->ci_continue = 0; vvp_io_read_start() 546 io->ci_nob += result; vvp_io_read_start() 569 struct cl_io *io = ios->cis_io; vvp_io_write_start() local 570 struct cl_object *obj = io->ci_obj; vvp_io_write_start() 573 loff_t pos = io->u.ci_wr.wr.crw_pos; vvp_io_write_start() 574 size_t cnt = io->u.ci_wr.wr.crw_count; vvp_io_write_start() 576 if (!can_populate_pages(env, io, inode)) vvp_io_write_start() 579 if (cl_io_is_append(io)) { vvp_io_write_start() 584 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode); vvp_io_write_start() 592 if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */ vvp_io_write_start() 599 io->ci_continue = 0; vvp_io_write_start() 600 io->ci_nob += result; vvp_io_write_start() 652 struct cl_io *io = ios->cis_io; vvp_io_fault_start() local 653 struct cl_object *obj = io->ci_obj; vvp_io_fault_start() 655 struct cl_fault_io *fio = &io->u.ci_fault; vvp_io_fault_start() 673 result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL); vvp_io_fault_start() 750 cl_page_assume(env, io, page); vvp_io_fault_start() 758 result = cl_page_cache_add(env, io, page, CRT_WRITE); vvp_io_fault_start() 759 LASSERT(cl_page_is_owned(page, io)); vvp_io_fault_start() 763 cl_page_unmap(env, io, page); vvp_io_fault_start() 764 cl_page_discard(env, io, page); vvp_io_fault_start() 765 cl_page_disown(env, io, page); vvp_io_fault_start() 774 cl_page_disown(env, io, page); vvp_io_fault_start() 794 lu_ref_add(&page->cp_reference, "fault", io); vvp_io_fault_start() 818 struct cl_io *io = ios->cis_io; vvp_io_read_page() local 827 struct cl_2queue *queue = &io->ci_queue; vvp_io_read_page() 839 rc = cl_page_is_under_lock(env, io, page); vvp_io_read_page() 859 ll_readahead(env, io, ras, vvp_io_read_page() 865 static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io, vvp_page_sync_io() argument 872 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); vvp_page_sync_io() 874 queue = &io->ci_queue; vvp_page_sync_io() 877 result = cl_io_submit_sync(env, io, crt, queue, 0); vvp_page_sync_io() 878 LASSERT(cl_page_is_owned(page, io)); vvp_page_sync_io() 885 cl_page_list_disown(env, io, &queue->c2_qin); vvp_page_sync_io() 894 static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io, vvp_io_prepare_partial() argument 920 result = vvp_page_sync_io(env, io, pg, cp, CRT_READ); vvp_io_prepare_partial() 975 struct cl_io *io = ios->cis_io; vvp_io_commit_write() local 1005 * (2) large compute jobs generally want compute-only then io-only vvp_io_commit_write() 1015 result = cl_page_cache_add(env, io, pg, CRT_WRITE); vvp_io_commit_write() 1033 * (b) is a part of "parallel io" design that is the vvp_io_commit_write() 1048 result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE); vvp_io_commit_write() 1086 cl_page_discard(env, io, pg); vvp_io_commit_write() 1134 struct cl_io *io) vvp_io_init() 1146 io->ci_ignore_layout, io->ci_verify_layout, vvp_io_init() 1147 cio->cui_layout_gen, io->ci_restore_needed); vvp_io_init() 1150 cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops); vvp_io_init() 1153 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) { vvp_io_init() 1157 count = io->u.ci_rw.crw_count; vvp_io_init() 1172 } else if (io->ci_type == CIT_SETATTR) { vvp_io_init() 1173 if (!cl_io_is_trunc(io)) vvp_io_init() 1174 io->ci_lockreq = CILR_MANDATORY; vvp_io_init() 1178 * io context for glimpse must set ci_verify_layout to true, vvp_io_init() 1180 if (io->ci_type == CIT_MISC && !io->ci_verify_layout) vvp_io_init() 1181 io->ci_ignore_layout = 1; vvp_io_init() 1186 if (result == 0 && !io->ci_ignore_layout) { vvp_io_init() 205 vvp_mmap_locks(const struct lu_env *env, struct ccc_io *vio, struct cl_io *io) vvp_mmap_locks() argument 1133 vvp_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) vvp_io_init() argument
|
H A D | llite_mmap.c | 104 struct cl_io *io; ll_fault_io_init() local 126 io = ccc_env_thread_io(env); ll_fault_io_init() 127 io->ci_obj = ll_i2info(inode)->lli_clob; ll_fault_io_init() 128 LASSERT(io->ci_obj != NULL); ll_fault_io_init() 130 fio = &io->u.ci_fault; ll_fault_io_init() 147 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); ll_fault_io_init() 152 LASSERT(cio->cui_cl.cis_io == io); ll_fault_io_init() 156 io->ci_lockreq = CILR_MANDATORY; ll_fault_io_init() 160 cl_io_fini(env, io); ll_fault_io_init() 162 io = ERR_PTR(rc); ll_fault_io_init() 165 return io; ll_fault_io_init() 173 struct cl_io *io; ll_page_mkwrite0() local 183 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); ll_page_mkwrite0() 184 if (IS_ERR(io)) { ll_page_mkwrite0() 185 result = PTR_ERR(io); ll_page_mkwrite0() 189 result = io->ci_result; ll_page_mkwrite0() 193 io->u.ci_fault.ft_mkwrite = 1; ll_page_mkwrite0() 194 io->u.ci_fault.ft_writable = 1; ll_page_mkwrite0() 205 inode = ccc_object_inode(io->ci_obj); ll_page_mkwrite0() 209 result = cl_io_loop(env, io); ll_page_mkwrite0() 252 cl_io_fini(env, io); ll_page_mkwrite0() 296 struct cl_io *io; ll_fault0() local 304 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); ll_fault0() 305 if (IS_ERR(io)) ll_fault0() 306 return to_fault_error(PTR_ERR(io)); ll_fault0() 308 result = io->ci_result; ll_fault0() 317 result = cl_io_loop(env, io); ll_fault0() 330 cl_io_fini(env, io); ll_fault0()
|
H A D | rw.c | 69 struct cl_io *io = lcc->lcc_io; ll_cl_fini() local 76 lu_ref_del(&page->cp_reference, "cl_io", io); ll_cl_fini() 92 struct cl_io *io; ll_cl_init() local 113 io = cio->cui_cl.cis_io; ll_cl_init() 114 if (io == NULL && create) { ll_cl_init() 136 io = ccc_env_thread_io(env); ll_cl_init() 137 ll_io_init(io, file, 1); ll_cl_init() 146 io->ci_lockreq = CILR_NEVER; ll_cl_init() 151 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); ll_cl_init() 155 result = cl_io_iter_init(env, io); ll_cl_init() 157 result = cl_io_lock(env, io); ll_cl_init() 159 result = cl_io_start(env, io); ll_cl_init() 162 result = io->ci_result; ll_cl_init() 165 lcc->lcc_io = io; ll_cl_init() 166 if (io == NULL) ll_cl_init() 171 LASSERT(io != NULL); ll_cl_init() 172 LASSERT(io->ci_state == CIS_IO_GOING); ll_cl_init() 178 lu_ref_add(&page->cp_reference, "cl_io", io); ll_cl_init() 190 env, io); ll_cl_init() 224 struct cl_io *io = lcc->lcc_io; ll_prepare_write() local 227 cl_page_assume(env, io, page); ll_prepare_write() 229 result = cl_io_prepare_write(env, io, page, from, to); ll_prepare_write() 239 cl_page_unassume(env, io, page); ll_prepare_write() 255 struct cl_io *io; ll_commit_write() local 262 io = lcc->lcc_io; ll_commit_write() 264 LASSERT(cl_page_is_owned(page, io)); ll_commit_write() 267 result = cl_io_commit_write(env, io, page, from, to); ll_commit_write() 268 if (cl_page_is_owned(page, io)) ll_commit_write() 269 cl_page_unassume(env, io, page); ll_commit_write() 452 static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io, cl_read_ahead_page() argument 460 cl_page_assume(env, io, page); cl_read_ahead_page() 464 rc = cl_page_is_under_lock(env, io, page); cl_read_ahead_page() 476 cl_page_unassume(env, io, page); cl_read_ahead_page() 493 static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, ll_read_ahead_page() argument 511 rc = cl_read_ahead_page(env, io, queue, ll_read_ahead_page() 638 struct cl_io *io, struct cl_page_list *queue, ll_read_ahead_pages() 655 rc = ll_read_ahead_page(env, io, queue, ll_read_ahead_pages() 688 int ll_readahead(const struct lu_env *env, struct cl_io *io, ll_readahead() argument 792 ret = ll_read_ahead_pages(env, io, queue, ll_readahead() 1127 struct cl_io *io; ll_writepage() local 1149 io = ccc_env_thread_io(env); ll_writepage() 1150 io->ci_obj = clob; ll_writepage() 1151 io->ci_ignore_layout = 1; ll_writepage() 1152 result = cl_io_init(env, io, CIT_MISC, clob); ll_writepage() 1159 cl_page_assume(env, io, page); ll_writepage() 1160 result = cl_page_flush(env, io, page); ll_writepage() 1173 cl_page_disown(env, io, page); ll_writepage() 1182 cl_io_fini(env, io); ll_writepage() 1271 struct cl_io *io = lcc->lcc_io; ll_readpage() local 1276 cl_page_assume(env, io, page); ll_readpage() 1277 result = cl_io_read_page(env, io, page); ll_readpage() 637 ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, struct ra_io_arg *ria, unsigned long *reserved_pages, struct address_space *mapping, unsigned long *ra_end) ll_read_ahead_pages() argument
|
/linux-4.1.27/arch/m32r/include/asm/ |
H A D | dma.h | 4 #include <asm/io.h>
|
H A D | mc146818rtc.h | 7 #include <asm/io.h>
|
/linux-4.1.27/arch/m68k/sun3/ |
H A D | leds.c | 3 #include <asm/io.h>
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | vga.h | 4 #include <linux/io.h>
|
H A D | mc146818rtc.h | 7 #include <linux/io.h>
|
/linux-4.1.27/fs/befs/ |
H A D | Makefile | 7 befs-objs := datastream.o btree.o super.o inode.o debug.o io.o linuxvfs.o
|
/linux-4.1.27/sound/soc/sh/ |
H A D | fsi.c | 18 #include <linux/io.h> 273 int (*init)(struct fsi_priv *fsi, struct fsi_stream *io); 274 int (*quit)(struct fsi_priv *fsi, struct fsi_stream *io); 275 int (*probe)(struct fsi_priv *fsi, struct fsi_stream *io, struct device *dev); 276 int (*transfer)(struct fsi_priv *fsi, struct fsi_stream *io); 277 int (*remove)(struct fsi_priv *fsi, struct fsi_stream *io); 278 int (*start_stop)(struct fsi_priv *fsi, struct fsi_stream *io, 281 #define fsi_stream_handler_call(io, func, args...) \ 282 (!(io) ? -ENODEV : \ 283 !((io)->handler->func) ? 0 : \ 284 (io)->handler->func(args)) 304 static int fsi_stream_is_play(struct fsi_priv *fsi, struct fsi_stream *io); 428 static u32 fsi_get_port_shift(struct fsi_priv *fsi, struct fsi_stream *io) fsi_get_port_shift() argument 430 int is_play = fsi_stream_is_play(fsi, io); fsi_get_port_shift() 453 struct fsi_stream *io) fsi_get_current_fifo_samples() 455 int is_play = fsi_stream_is_play(fsi, io); fsi_get_current_fifo_samples() 493 struct fsi_stream *io) fsi_stream_is_play() 495 return &fsi->playback == io; fsi_stream_is_play() 505 struct fsi_stream *io) fsi_stream_is_working() 512 ret = !!(io->substream && io->substream->runtime); fsi_stream_is_working() 518 static struct fsi_priv *fsi_stream_to_priv(struct fsi_stream *io) fsi_stream_to_priv() argument 520 return io->priv; fsi_stream_to_priv() 524 struct fsi_stream *io, fsi_stream_init() 532 io->substream = substream; fsi_stream_init() 533 io->buff_sample_capa = fsi_frame2sample(fsi, runtime->buffer_size); fsi_stream_init() 534 io->buff_sample_pos = 0; fsi_stream_init() 535 io->period_samples = fsi_frame2sample(fsi, runtime->period_size); fsi_stream_init() 536 io->period_pos = 0; fsi_stream_init() 537 io->sample_width = samples_to_bytes(runtime, 1); fsi_stream_init() 538 io->bus_option = 0; fsi_stream_init() 539 io->oerr_num = -1; /* ignore 1st err */ fsi_stream_init() 540 io->uerr_num = -1; /* ignore 1st err */ fsi_stream_init() 541 fsi_stream_handler_call(io, init, fsi, io); fsi_stream_init() 545 static void fsi_stream_quit(struct fsi_priv *fsi, struct fsi_stream *io) fsi_stream_quit() argument 547 struct snd_soc_dai *dai = fsi_get_dai(io->substream); fsi_stream_quit() 553 if (io->oerr_num > 0) fsi_stream_quit() 554 dev_err(dai->dev, "over_run = %d\n", io->oerr_num); fsi_stream_quit() 556 if (io->uerr_num > 0) fsi_stream_quit() 557 dev_err(dai->dev, "under_run = %d\n", io->uerr_num); fsi_stream_quit() 559 fsi_stream_handler_call(io, quit, fsi, io); fsi_stream_quit() 560 io->substream = NULL; fsi_stream_quit() 561 io->buff_sample_capa = 0; fsi_stream_quit() 562 io->buff_sample_pos = 0; fsi_stream_quit() 563 io->period_samples = 0; fsi_stream_quit() 564 io->period_pos = 0; fsi_stream_quit() 565 io->sample_width = 0; fsi_stream_quit() 566 io->bus_option = 0; fsi_stream_quit() 567 io->oerr_num = 0; fsi_stream_quit() 568 io->uerr_num = 0; fsi_stream_quit() 572 static int fsi_stream_transfer(struct fsi_stream *io) fsi_stream_transfer() argument 574 struct fsi_priv *fsi = fsi_stream_to_priv(io); fsi_stream_transfer() 578 return fsi_stream_handler_call(io, transfer, fsi, io); fsi_stream_transfer() 581 #define fsi_stream_start(fsi, io)\ 582 fsi_stream_handler_call(io, start_stop, fsi, io, 1) 584 #define fsi_stream_stop(fsi, io)\ 585 fsi_stream_handler_call(io, start_stop, fsi, io, 0) 589 struct fsi_stream *io; fsi_stream_probe() local 592 io = &fsi->playback; fsi_stream_probe() 593 ret1 = fsi_stream_handler_call(io, probe, fsi, io, dev); fsi_stream_probe() 595 io = &fsi->capture; fsi_stream_probe() 596 ret2 = fsi_stream_handler_call(io, probe, fsi, io, dev); fsi_stream_probe() 608 struct fsi_stream *io; fsi_stream_remove() local 611 io = &fsi->playback; fsi_stream_remove() 612 ret1 = fsi_stream_handler_call(io, remove, fsi, io); fsi_stream_remove() 614 io = &fsi->capture; fsi_stream_remove() 615 ret2 = fsi_stream_handler_call(io, remove, fsi, io); fsi_stream_remove() 628 static void fsi_format_bus_setup(struct fsi_priv *fsi, struct fsi_stream *io, fsi_format_bus_setup() argument 632 int is_play = fsi_stream_is_play(fsi, io); fsi_format_bus_setup() 676 static void fsi_irq_enable(struct fsi_priv *fsi, struct fsi_stream *io) fsi_irq_enable() argument 678 u32 data = AB_IO(1, fsi_get_port_shift(fsi, io)); fsi_irq_enable() 685 static void fsi_irq_disable(struct fsi_priv *fsi, struct fsi_stream *io) fsi_irq_disable() argument 687 u32 data = AB_IO(1, fsi_get_port_shift(fsi, io)); fsi_irq_disable() 1036 static void fsi_pointer_update(struct fsi_stream *io, int size) fsi_pointer_update() argument 1038 io->buff_sample_pos += size; fsi_pointer_update() 1040 if (io->buff_sample_pos >= fsi_pointer_update() 1041 io->period_samples * (io->period_pos + 1)) { fsi_pointer_update() 1042 struct snd_pcm_substream *substream = io->substream; fsi_pointer_update() 1045 io->period_pos++; fsi_pointer_update() 1047 if (io->period_pos >= runtime->periods) { fsi_pointer_update() 1048 io->buff_sample_pos = 0; fsi_pointer_update() 1049 io->period_pos = 0; fsi_pointer_update() 1109 static u8 *fsi_pio_get_area(struct fsi_priv *fsi, struct fsi_stream *io) fsi_pio_get_area() argument 1111 struct snd_pcm_runtime *runtime = io->substream->runtime; fsi_pio_get_area() 1114 samples_to_bytes(runtime, io->buff_sample_pos); fsi_pio_get_area() 1117 static int fsi_pio_transfer(struct fsi_priv *fsi, struct fsi_stream *io, fsi_pio_transfer() argument 1124 if (!fsi_stream_is_working(fsi, io)) fsi_pio_transfer() 1127 buf = fsi_pio_get_area(fsi, io); fsi_pio_transfer() 1129 switch (io->sample_width) { fsi_pio_transfer() 1140 fsi_pointer_update(io, samples); fsi_pio_transfer() 1145 static int fsi_pio_pop(struct fsi_priv *fsi, struct fsi_stream *io) fsi_pio_pop() argument 1151 sample_residues = fsi_get_current_fifo_samples(fsi, io); fsi_pio_pop() 1152 sample_space = io->buff_sample_capa - io->buff_sample_pos; fsi_pio_pop() 1156 return fsi_pio_transfer(fsi, io, fsi_pio_pop() 1162 static int fsi_pio_push(struct fsi_priv *fsi, struct fsi_stream *io) fsi_pio_push() argument 1168 sample_residues = io->buff_sample_capa - io->buff_sample_pos; fsi_pio_push() 1169 sample_space = io->fifo_sample_capa - fsi_pio_push() 1170 fsi_get_current_fifo_samples(fsi, io); fsi_pio_push() 1174 return fsi_pio_transfer(fsi, io, fsi_pio_push() 1180 static int fsi_pio_start_stop(struct fsi_priv *fsi, struct fsi_stream *io, fsi_pio_start_stop() argument 1187 fsi_irq_enable(fsi, io); fsi_pio_start_stop() 1189 fsi_irq_disable(fsi, io); fsi_pio_start_stop() 1197 static int fsi_pio_push_init(struct fsi_priv *fsi, struct fsi_stream *io) fsi_pio_push_init() argument 1207 io->bus_option = BUSOP_SET(24, PACKAGE_24BITBUS_BACK) | fsi_pio_push_init() 1210 io->bus_option = BUSOP_SET(24, PACKAGE_24BITBUS_BACK) | fsi_pio_push_init() 1215 static int fsi_pio_pop_init(struct fsi_priv *fsi, struct fsi_stream *io) fsi_pio_pop_init() argument 1220 io->bus_option = BUSOP_SET(24, PACKAGE_24BITBUS_BACK) | fsi_pio_pop_init() 1267 static int fsi_dma_init(struct fsi_priv *fsi, struct fsi_stream *io) fsi_dma_init() argument 1273 io->bus_option = BUSOP_SET(24, PACKAGE_24BITBUS_BACK) | fsi_dma_init() 1281 struct fsi_stream *io = (struct fsi_stream *)data; fsi_dma_complete() local 1282 struct fsi_priv *fsi = fsi_stream_to_priv(io); fsi_dma_complete() 1284 fsi_pointer_update(io, io->period_samples); fsi_dma_complete() 1289 static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io) fsi_dma_transfer() argument 1291 struct snd_soc_dai *dai = fsi_get_dai(io->substream); fsi_dma_transfer() 1292 struct snd_pcm_substream *substream = io->substream; fsi_dma_transfer() 1294 int is_play = fsi_stream_is_play(fsi, io); fsi_dma_transfer() 1303 desc = dmaengine_prep_dma_cyclic(io->chan, fsi_dma_transfer() 1315 desc->callback_param = io; fsi_dma_transfer() 1322 dma_async_issue_pending(io->chan); fsi_dma_transfer() 1346 static int fsi_dma_push_start_stop(struct fsi_priv *fsi, struct fsi_stream *io, fsi_dma_push_start_stop() argument 1355 dmaengine_terminate_all(io->chan); fsi_dma_push_start_stop() 1363 static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io, struct device *dev) fsi_dma_probe() argument 1366 int is_play = fsi_stream_is_play(fsi, io); fsi_dma_probe() 1371 io->chan = dma_request_slave_channel_compat(mask, fsi_dma_probe() 1372 shdma_chan_filter, (void *)io->dma_id, fsi_dma_probe() 1374 if (io->chan) { fsi_dma_probe() 1388 ret = dmaengine_slave_config(io->chan, &cfg); fsi_dma_probe() 1390 dma_release_channel(io->chan); fsi_dma_probe() 1391 io->chan = NULL; fsi_dma_probe() 1395 if (!io->chan) { fsi_dma_probe() 1412 static int fsi_dma_remove(struct fsi_priv *fsi, struct fsi_stream *io) fsi_dma_remove() argument 1414 fsi_stream_stop(fsi, io); fsi_dma_remove() 1416 if (io->chan) fsi_dma_remove() 1417 dma_release_channel(io->chan); fsi_dma_remove() 1419 io->chan = NULL; fsi_dma_remove() 1435 struct fsi_stream *io, fsi_fifo_init() 1439 int is_play = fsi_stream_is_play(fsi, io); fsi_fifo_init() 1445 shift >>= fsi_get_port_shift(fsi, io); fsi_fifo_init() 1474 io->fifo_sample_capa = fsi_frame2sample(fsi, frame_capa); fsi_fifo_init() 1490 struct fsi_stream *io, fsi_hw_startup() 1521 switch (io->sample_width) { fsi_hw_startup() 1523 data = BUSOP_GET(16, io->bus_option); fsi_hw_startup() 1526 data = BUSOP_GET(24, io->bus_option); fsi_hw_startup() 1529 fsi_format_bus_setup(fsi, io, data, dev); fsi_hw_startup() 1532 fsi_irq_disable(fsi, io); fsi_hw_startup() 1536 fsi_fifo_init(fsi, io, dev); fsi_hw_startup() 1577 struct fsi_stream *io = fsi_stream_get(fsi, substream); fsi_dai_trigger() local 1582 fsi_stream_init(fsi, io, substream); fsi_dai_trigger() 1584 ret = fsi_hw_startup(fsi, io, dai->dev); fsi_dai_trigger() 1586 ret = fsi_stream_start(fsi, io); fsi_dai_trigger() 1588 ret = fsi_stream_transfer(io); fsi_dai_trigger() 1593 fsi_stream_stop(fsi, io); fsi_dai_trigger() 1594 fsi_stream_quit(fsi, io); fsi_dai_trigger() 1751 struct fsi_stream *io = fsi_stream_get(fsi, substream); fsi_pointer() local 1753 return fsi_sample2frame(fsi, io->buff_sample_pos); fsi_pointer() 2060 struct fsi_stream *io, __fsi_suspend() 2063 if (!fsi_stream_is_working(fsi, io)) __fsi_suspend() 2066 fsi_stream_stop(fsi, io); __fsi_suspend() 2071 struct fsi_stream *io, __fsi_resume() 2074 if (!fsi_stream_is_working(fsi, io)) __fsi_resume() 2077 fsi_hw_startup(fsi, io, dev); __fsi_resume() 2078 fsi_stream_start(fsi, io); __fsi_resume() 452 fsi_get_current_fifo_samples(struct fsi_priv *fsi, struct fsi_stream *io) fsi_get_current_fifo_samples() argument 492 fsi_stream_is_play(struct fsi_priv *fsi, struct fsi_stream *io) fsi_stream_is_play() argument 504 fsi_stream_is_working(struct fsi_priv *fsi, struct fsi_stream *io) fsi_stream_is_working() argument 523 fsi_stream_init(struct fsi_priv *fsi, struct fsi_stream *io, struct snd_pcm_substream *substream) fsi_stream_init() argument 1434 fsi_fifo_init(struct fsi_priv *fsi, struct fsi_stream *io, struct device *dev) fsi_fifo_init() argument 1489 fsi_hw_startup(struct fsi_priv *fsi, struct fsi_stream *io, struct device *dev) fsi_hw_startup() argument 2059 __fsi_suspend(struct fsi_priv *fsi, struct fsi_stream *io, struct device *dev) __fsi_suspend() argument 2070 __fsi_resume(struct fsi_priv *fsi, struct fsi_stream *io, struct device *dev) __fsi_resume() argument
|
/linux-4.1.27/fs/ext4/ |
H A D | page-io.c | 2 * linux/fs/ext4/page-io.c 98 * to avoid races with other end io clearing async_write flags bio_for_each_segment_all() 162 static int ext4_end_io(ext4_io_end_t *io) ext4_end_io() argument 164 struct inode *inode = io->inode; ext4_end_io() 165 loff_t offset = io->offset; ext4_end_io() 166 ssize_t size = io->size; ext4_end_io() 167 handle_t *handle = io->handle; ext4_end_io() 170 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," ext4_end_io() 172 io, inode->i_ino, io->list.next, io->list.prev); ext4_end_io() 174 io->handle = NULL; /* Following call will use up the handle */ ext4_end_io() 183 ext4_clear_io_unwritten_flag(io); ext4_end_io() 184 ext4_release_io_end(io); ext4_end_io() 192 ext4_io_end_t *io, *io0, *io1; dump_completed_IO() local 197 ext4_debug("Dump inode %lu completed io list\n", inode->i_ino); list_for_each_entry() 198 list_for_each_entry(io, head, list) { list_for_each_entry() 199 cur = &io->list; list_for_each_entry() 205 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n", list_for_each_entry() 206 io, inode->i_ino, io0, io1); list_for_each_entry() 233 ext4_io_end_t *io; ext4_do_flush_completed_IO() local 245 io = list_entry(unwritten.next, ext4_io_end_t, list); ext4_do_flush_completed_IO() 246 BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN)); ext4_do_flush_completed_IO() 247 list_del_init(&io->list); ext4_do_flush_completed_IO() 249 err = ext4_end_io(io); ext4_do_flush_completed_IO() 268 ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); ext4_init_io_end() local 269 if (io) { ext4_init_io_end() 271 io->inode = inode; ext4_init_io_end() 272 INIT_LIST_HEAD(&io->list); ext4_init_io_end() 273 atomic_set(&io->count, 1); ext4_init_io_end() 275 return io; ext4_init_io_end() 355 void ext4_io_submit(struct ext4_io_submit *io) ext4_io_submit() argument 357 struct bio *bio = io->io_bio; ext4_io_submit() 360 bio_get(io->io_bio); ext4_io_submit() 361 submit_bio(io->io_op, io->io_bio); ext4_io_submit() 362 BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP)); ext4_io_submit() 363 bio_put(io->io_bio); ext4_io_submit() 365 io->io_bio = NULL; ext4_io_submit() 368 void ext4_io_submit_init(struct ext4_io_submit *io, ext4_io_submit_init() argument 371 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); ext4_io_submit_init() 372 io->io_bio = NULL; ext4_io_submit_init() 373 io->io_end = NULL; ext4_io_submit_init() 376 static int io_submit_init_bio(struct ext4_io_submit *io, io_submit_init_bio() argument 388 bio->bi_private = ext4_get_io_end(io->io_end); io_submit_init_bio() 389 io->io_bio = bio; io_submit_init_bio() 390 io->io_next_block = bh->b_blocknr; io_submit_init_bio() 394 static int io_submit_add_bh(struct ext4_io_submit *io, io_submit_add_bh() argument 401 if (io->io_bio && bh->b_blocknr != io->io_next_block) { io_submit_add_bh() 403 ext4_io_submit(io); io_submit_add_bh() 405 if (io->io_bio == NULL) { io_submit_add_bh() 406 ret = io_submit_init_bio(io, bh); io_submit_add_bh() 410 ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); io_submit_add_bh() 413 io->io_next_block++; io_submit_add_bh() 417 int ext4_bio_write_page(struct ext4_io_submit *io, ext4_bio_write_page() argument 473 if (io->io_bio) ext4_bio_write_page() 474 ext4_io_submit(io); ext4_bio_write_page() 501 ret = io_submit_add_bh(io, inode, ext4_bio_write_page()
|
H A D | Makefile | 7 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
|
/linux-4.1.27/drivers/input/gameport/ |
H A D | ns558.c | 30 #include <asm/io.h> 49 int io; member in struct:ns558 64 static int ns558_isa_probe(int io) ns558_isa_probe() argument 75 if (!request_region(io, 1, "ns558-isa")) ns558_isa_probe() 83 c = inb(io); ns558_isa_probe() 84 outb(~c & ~3, io); ns558_isa_probe() 85 if (~(u = v = inb(io)) & 3) { ns558_isa_probe() 86 outb(c, io); ns558_isa_probe() 87 release_region(io, 1); ns558_isa_probe() 94 for (i = 0; i < 1000; i++) v &= inb(io); ns558_isa_probe() 97 outb(c, io); ns558_isa_probe() 98 release_region(io, 1); ns558_isa_probe() 106 u = inb(io); ns558_isa_probe() 108 if ((u ^ inb(io)) & 0xf) { ns558_isa_probe() 109 outb(c, io); ns558_isa_probe() 110 release_region(io, 1); ns558_isa_probe() 119 release_region(io & (-1 << (i - 1)), (1 << (i - 1))); ns558_isa_probe() 121 if (!request_region(io & (-1 << i), (1 << i), "ns558-isa")) ns558_isa_probe() 124 outb(0xff, io & (-1 << i)); ns558_isa_probe() 126 if (inb(io & (-1 << i)) != inb((io & (-1 << i)) + (1 << i) - 1)) b++; ns558_isa_probe() 130 release_region(io & (-1 << i), (1 << i)); ns558_isa_probe() 138 if (!request_region(io & (-1 << i), (1 << i), "ns558-isa")) ns558_isa_probe() 146 release_region(io & (-1 << i), (1 << i)); ns558_isa_probe() 152 ns558->io = io; ns558_isa_probe() 156 port->io = io; ns558_isa_probe() 158 gameport_set_phys(port, "isa%04x/gameport0", io & (-1 << i)); ns558_isa_probe() 223 ns558->io = ioport; ns558_pnp_probe() 231 port->io = ioport; ns558_pnp_probe() 278 release_region(ns558->io & ~(ns558->size - 1), ns558->size); ns558_exit()
|
H A D | fm801-gp.c | 23 #include <asm/io.h> 48 w = inw(gameport->io + 2); fm801_gp_cooked_read() 51 w = inw(gameport->io + 4); fm801_gp_cooked_read() 53 w = inw(gameport->io + 6); fm801_gp_cooked_read() 56 w = inw(gameport->io + 8); fm801_gp_cooked_read() 58 outw(0xff, gameport->io); /* reset */ fm801_gp_cooked_read() 105 port->io = pci_resource_start(pci, 0); fm801_gp_probe() 108 gp->res_port = request_region(port->io, 0x10, "FM801 GP"); fm801_gp_probe() 111 port->io, port->io + 0x0f); fm801_gp_probe() 118 outb(0x60, port->io + 0x0d); /* enable joystick 1 and 2 */ fm801_gp_probe()
|
H A D | emu10k1-gp.c | 29 #include <asm/io.h> 44 int io; member in struct:emu 77 emu->io = pci_resource_start(pdev, 0); emu_probe() 86 port->io = emu->io; emu_probe() 88 if (!request_region(emu->io, emu->size, "emu10k1-gp")) { emu_probe() 90 emu->io, emu->io + emu->size - 1); emu_probe() 114 release_region(emu->io, emu->size); emu_remove()
|
/linux-4.1.27/arch/arm/mach-pxa/include/mach/ |
H A D | io.h | 2 * arch/arm/mach-pxa/include/mach/io.h 4 * Copied from asm/arch/sa1100/io.h
|
/linux-4.1.27/drivers/mtd/nand/ |
H A D | sharpsl.c | 27 #include <asm/io.h> 35 void __iomem *io; member in struct:sharpsl_nand 78 writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL); sharpsl_nand_hwcontrol() 88 return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0); sharpsl_nand_dev_ready() 94 writeb(0, sharpsl->io + ECCCLRR); sharpsl_nand_enable_hwecc() 100 ecc_code[0] = ~readb(sharpsl->io + ECCLPUB); sharpsl_nand_calculate_ecc() 101 ecc_code[1] = ~readb(sharpsl->io + ECCLPLB); sharpsl_nand_calculate_ecc() 102 ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03; sharpsl_nand_calculate_ecc() 103 return readb(sharpsl->io + ECCCNTR) != 0; sharpsl_nand_calculate_ecc() 129 dev_err(&pdev->dev, "no io memory resource defined!\n"); sharpsl_nand_probe() 135 sharpsl->io = ioremap(r->start, resource_size(r)); sharpsl_nand_probe() 136 if (!sharpsl->io) { sharpsl_nand_probe() 154 writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL); sharpsl_nand_probe() 157 this->IO_ADDR_R = sharpsl->io + FLASHIO; sharpsl_nand_probe() 158 this->IO_ADDR_W = sharpsl->io + FLASHIO; sharpsl_nand_probe() 195 iounmap(sharpsl->io); sharpsl_nand_probe() 212 iounmap(sharpsl->io); sharpsl_nand_remove()
|
/linux-4.1.27/sound/soc/kirkwood/ |
H A D | kirkwood-i2s.c | 16 #include <linux/io.h> 65 value = readl(priv->io+KIRKWOOD_I2S_PLAYCTL); kirkwood_i2s_set_fmt() 68 writel(value, priv->io+KIRKWOOD_I2S_PLAYCTL); kirkwood_i2s_set_fmt() 70 value = readl(priv->io+KIRKWOOD_I2S_RECCTL); kirkwood_i2s_set_fmt() 73 writel(value, priv->io+KIRKWOOD_I2S_RECCTL); kirkwood_i2s_set_fmt() 78 static inline void kirkwood_set_dco(void __iomem *io, unsigned long rate) kirkwood_set_dco() argument 95 writel(value, io + KIRKWOOD_DCO_CTL); kirkwood_set_dco() 100 value = readl(io + KIRKWOOD_DCO_SPCR_STATUS); kirkwood_set_dco() 115 kirkwood_set_dco(priv->io, rate); kirkwood_set_rate() 127 writel(clks_ctrl, priv->io + KIRKWOOD_CLOCKS_CTRL); kirkwood_set_rate() 156 i2s_value = readl(priv->io+i2s_reg); kirkwood_i2s_hw_params() 220 writel(i2s_value, priv->io+i2s_reg); kirkwood_i2s_hw_params() 241 ctl = readl(priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_play_trigger() 251 ctl = readl(priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_play_trigger() 272 writel(value, priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_play_trigger() 276 value = readl(priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_play_trigger() 278 writel(value, priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_play_trigger() 282 writel(ctl, priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_play_trigger() 289 writel(ctl, priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_play_trigger() 291 value = readl(priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_play_trigger() 293 writel(value, priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_play_trigger() 297 writel(ctl, priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_play_trigger() 304 writel(ctl, priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_play_trigger() 312 writel(ctl, priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_play_trigger() 328 value = readl(priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 340 writel(value, priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 343 value = readl(priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_rec_trigger() 345 writel(value, priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_rec_trigger() 348 writel(ctl, priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 353 value = readl(priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 355 writel(value, priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 357 value = readl(priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_rec_trigger() 359 writel(value, priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_rec_trigger() 362 value = readl(priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 364 writel(value, priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 369 value = readl(priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 371 writel(value, priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 376 value = readl(priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 378 writel(value, priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_rec_trigger() 406 writel(0xffffffff, priv->io + KIRKWOOD_INT_CAUSE); kirkwood_i2s_init() 407 writel(0, priv->io + KIRKWOOD_INT_MASK); kirkwood_i2s_init() 409 reg_data = readl(priv->io + 0x1200); kirkwood_i2s_init() 412 writel(reg_data, priv->io + 0x1200); kirkwood_i2s_init() 416 reg_data = readl(priv->io + 0x1200); kirkwood_i2s_init() 419 writel(reg_data, priv->io + 0x1200); kirkwood_i2s_init() 422 value = readl(priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_init() 424 writel(value, priv->io + KIRKWOOD_PLAYCTL); kirkwood_i2s_init() 426 value = readl(priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_init() 428 writel(value, priv->io + KIRKWOOD_RECCTL); kirkwood_i2s_init() 548 priv->io = devm_ioremap_resource(&pdev->dev, mem); kirkwood_i2s_dev_probe() 549 if (IS_ERR(priv->io)) kirkwood_i2s_dev_probe() 550 return PTR_ERR(priv->io); kirkwood_i2s_dev_probe()
|
H A D | kirkwood-dma.c | 16 #include <linux/io.h> 50 mask = readl(priv->io + KIRKWOOD_INT_MASK); kirkwood_dma_irq() 51 status = readl(priv->io + KIRKWOOD_INT_CAUSE) & mask; kirkwood_dma_irq() 53 cause = readl(priv->io + KIRKWOOD_ERR_CAUSE); kirkwood_dma_irq() 57 writel(cause, priv->io + KIRKWOOD_ERR_CAUSE); kirkwood_dma_irq() 69 writel(status, priv->io + KIRKWOOD_INT_CAUSE); kirkwood_dma_irq() 145 writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK); kirkwood_dma_open() 152 kirkwood_dma_conf_mbus_windows(priv->io, kirkwood_dma_open() 156 kirkwood_dma_conf_mbus_windows(priv->io, kirkwood_dma_open() 176 writel(0, priv->io + KIRKWOOD_ERR_MASK); kirkwood_dma_close() 212 writel(count, priv->io + KIRKWOOD_PLAY_BYTE_INT_COUNT); kirkwood_dma_prepare() 213 writel(runtime->dma_addr, priv->io + KIRKWOOD_PLAY_BUF_ADDR); kirkwood_dma_prepare() 214 writel(size, priv->io + KIRKWOOD_PLAY_BUF_SIZE); kirkwood_dma_prepare() 216 writel(count, priv->io + KIRKWOOD_REC_BYTE_INT_COUNT); kirkwood_dma_prepare() 217 writel(runtime->dma_addr, priv->io + KIRKWOOD_REC_BUF_ADDR); kirkwood_dma_prepare() 218 writel(size, priv->io + KIRKWOOD_REC_BUF_SIZE); kirkwood_dma_prepare() 233 readl(priv->io + KIRKWOOD_PLAY_BYTE_COUNT)); kirkwood_dma_pointer() 236 readl(priv->io + KIRKWOOD_REC_BYTE_COUNT)); kirkwood_dma_pointer()
|
/linux-4.1.27/sound/soc/sh/rcar/ |
H A D | core.c | 124 #define rsnd_is_enable_path(io, name) \ 125 ((io)->info ? (io)->info->name : NULL) 126 #define rsnd_info_id(priv, io, name) \ 127 ((io)->info->name - priv->info->name##_info) 179 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_get_adinr() local 180 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_get_adinr() 223 #define rsnd_dai_call(fn, io, param...) \ 228 mod = (io)->mod[i]; \ 239 struct rsnd_dai_stream *io) rsnd_dai_connect() 244 if (io->mod[mod->type]) { rsnd_dai_connect() 254 io->mod[mod->type] = mod; rsnd_dai_connect() 255 mod->io = io; rsnd_dai_connect() 261 struct rsnd_dai_stream *io) rsnd_dai_disconnect() 263 mod->io = NULL; rsnd_dai_disconnect() 264 io->mod[mod->type] = NULL; rsnd_dai_disconnect() 285 int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional) rsnd_dai_pointer_offset() argument 287 struct snd_pcm_substream *substream = io->substream; rsnd_dai_pointer_offset() 289 int pos = io->byte_pos + additional; rsnd_dai_pointer_offset() 291 pos %= (runtime->periods * io->byte_per_period); rsnd_dai_pointer_offset() 296 void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int byte) rsnd_dai_pointer_update() argument 298 io->byte_pos += byte; rsnd_dai_pointer_update() 300 if (io->byte_pos >= io->next_period_byte) { rsnd_dai_pointer_update() 301 struct snd_pcm_substream *substream = io->substream; rsnd_dai_pointer_update() 304 io->period_pos++; rsnd_dai_pointer_update() 305 io->next_period_byte += io->byte_per_period; rsnd_dai_pointer_update() 307 if (io->period_pos >= runtime->periods) { rsnd_dai_pointer_update() 308 io->byte_pos = 0; rsnd_dai_pointer_update() 309 io->period_pos = 0; rsnd_dai_pointer_update() 310 io->next_period_byte = io->byte_per_period; rsnd_dai_pointer_update() 317 static int rsnd_dai_stream_init(struct rsnd_dai_stream *io, rsnd_dai_stream_init() argument 322 io->substream = substream; rsnd_dai_stream_init() 323 io->byte_pos = 0; rsnd_dai_stream_init() 324 io->period_pos = 0; rsnd_dai_stream_init() 325 io->byte_per_period = runtime->period_size * rsnd_dai_stream_init() 328 io->next_period_byte = io->byte_per_period; rsnd_dai_stream_init() 356 struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); rsnd_soc_dai_trigger() local 357 int ssi_id = rsnd_mod_id(rsnd_io_to_mod_ssi(io)); rsnd_soc_dai_trigger() 365 ret = rsnd_dai_stream_init(io, substream); rsnd_soc_dai_trigger() 373 ret = rsnd_dai_call(init, io, priv); rsnd_soc_dai_trigger() 377 ret = rsnd_dai_call(start, io, priv); rsnd_soc_dai_trigger() 382 ret = rsnd_dai_call(stop, io, priv); rsnd_soc_dai_trigger() 386 ret = rsnd_dai_call(quit, io, priv); rsnd_soc_dai_trigger() 466 #define rsnd_path_parse(priv, io, type) \ 472 if (rsnd_is_enable_path(io, type)) { \ 473 id = rsnd_info_id(priv, io, type); \ 476 ret = rsnd_dai_connect(mod, io); \ 482 #define rsnd_path_break(priv, io, type) \ 487 if (rsnd_is_enable_path(io, type)) { \ 488 id = rsnd_info_id(priv, io, type); \ 491 rsnd_dai_disconnect(mod, io); \ 498 struct rsnd_dai_stream *io) rsnd_path_init() 514 ret = rsnd_path_parse(priv, io, src); rsnd_path_init() 519 ret = rsnd_path_parse(priv, io, ssi); rsnd_path_init() 524 ret = rsnd_path_parse(priv, io, dvc); rsnd_path_init() 733 struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); rsnd_hw_params() local 736 ret = rsnd_dai_call(hw_params, io, substream, hw_params); rsnd_hw_params() 749 struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream); rsnd_pointer() local 751 return bytes_to_frames(runtime, io->byte_pos); rsnd_pointer() 949 struct rsnd_dai_stream *io) rsnd_rdai_continuance_probe() 953 ret = rsnd_dai_call(probe, io, priv); rsnd_rdai_continuance_probe() 966 rsnd_dai_call(remove, io, priv); rsnd_rdai_continuance_probe() 971 rsnd_path_break(priv, io, src); rsnd_rdai_continuance_probe() 972 rsnd_path_break(priv, io, dvc); rsnd_rdai_continuance_probe() 977 rsnd_dai_call(fallback, io, priv); rsnd_rdai_continuance_probe() 983 ret = rsnd_dai_call(probe, io, priv); rsnd_rdai_continuance_probe() 238 rsnd_dai_connect(struct rsnd_mod *mod, struct rsnd_dai_stream *io) rsnd_dai_connect() argument 260 rsnd_dai_disconnect(struct rsnd_mod *mod, struct rsnd_dai_stream *io) rsnd_dai_disconnect() argument 496 rsnd_path_init(struct rsnd_priv *priv, struct rsnd_dai *rdai, struct rsnd_dai_stream *io) rsnd_path_init() argument 948 rsnd_rdai_continuance_probe(struct rsnd_priv *priv, struct rsnd_dai_stream *io) rsnd_rdai_continuance_probe() argument
|
H A D | ssi.c | 92 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_ssi_use_busif() local 100 if (rsnd_io_to_mod_src(io)) rsnd_ssi_use_busif() 126 struct rsnd_dai_stream *io) rsnd_ssi_master_clk_start() 128 struct rsnd_priv *priv = rsnd_io_to_priv(io); rsnd_ssi_master_clk_start() 129 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_ssi_master_clk_start() 139 unsigned int rate = rsnd_src_get_ssi_rate(priv, io, runtime); rsnd_ssi_master_clk_start() 180 struct rsnd_dai_stream *io) rsnd_ssi_hw_start() 182 struct rsnd_priv *priv = rsnd_io_to_priv(io); rsnd_ssi_hw_start() 183 struct rsnd_dai *rdai = rsnd_io_to_rdai(io); rsnd_ssi_hw_start() 193 rsnd_ssi_hw_start(ssi->parent, io); rsnd_ssi_hw_start() 195 rsnd_ssi_master_clk_start(ssi, io); rsnd_ssi_hw_start() 227 struct rsnd_dai_stream *io = rsnd_mod_to_io(&ssi->mod); rsnd_ssi_hw_stop() local 228 struct rsnd_dai *rdai = rsnd_io_to_rdai(io); rsnd_ssi_hw_stop() 276 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_ssi_init() local 277 struct rsnd_dai *rdai = rsnd_io_to_rdai(io); rsnd_ssi_init() 278 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_ssi_init() 311 if (rsnd_io_is_play(io)) rsnd_ssi_init() 354 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_ssi_start() local 358 rsnd_ssi_hw_start(ssi, io); rsnd_ssi_start() 386 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_ssi_interrupt() local 390 if (!io) rsnd_ssi_interrupt() 395 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_ssi_interrupt() 397 rsnd_dai_pointer_offset(io, 0)); rsnd_ssi_interrupt() 404 if (rsnd_io_is_play(io)) rsnd_ssi_interrupt() 409 rsnd_dai_pointer_update(io, sizeof(*buf)); rsnd_ssi_interrupt() 545 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_ssi_dma_req() local 546 int is_play = rsnd_io_is_play(io); rsnd_ssi_dma_req() 125 rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi, struct rsnd_dai_stream *io) rsnd_ssi_master_clk_start() argument 179 rsnd_ssi_hw_start(struct rsnd_ssi *ssi, struct rsnd_dai_stream *io) rsnd_ssi_hw_start() argument
|
H A D | rsnd.h | 17 #include <linux/io.h> 255 struct rsnd_dai_stream *io; member in struct:rsnd_mod 294 #define rsnd_mod_to_io(mod) ((mod)->io) 323 #define rsnd_io_to_mod_ssi(io) ((io)->mod[RSND_MOD_SSI]) 324 #define rsnd_io_to_mod_src(io) ((io)->mod[RSND_MOD_SRC]) 325 #define rsnd_io_to_mod_dvc(io) ((io)->mod[RSND_MOD_DVC]) 326 #define rsnd_io_to_rdai(io) ((io)->rdai) 327 #define rsnd_io_to_priv(io) (rsnd_rdai_to_priv(rsnd_io_to_rdai(io))) 328 #define rsnd_io_is_play(io) (&rsnd_io_to_rdai(io)->playback == io) 329 #define rsnd_io_to_runtime(io) ((io)->substream ? \ 330 (io)->substream->runtime : NULL) 357 void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int cnt); 358 int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional); 387 struct rsnd_dai_stream *io, 391 struct rsnd_dai_stream *io); 393 struct rsnd_dai_stream *io); 512 struct rsnd_dai_stream *io,
|
H A D | dma.c | 39 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_dmaen_complete() local 50 * ant it will breaks io->byte_pos rsnd_dmaen_complete() 53 rsnd_dai_pointer_update(io, io->byte_per_period); rsnd_dmaen_complete() 68 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_dmaen_start() local 69 struct snd_pcm_substream *substream = io->substream; rsnd_dmaen_start() 72 int is_play = rsnd_io_is_play(io); rsnd_dmaen_start() 138 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_dmaen_init() local 139 int is_play = rsnd_io_is_play(io); rsnd_dmaen_init() 243 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_dmapp_get_id() local 244 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); rsnd_dmapp_get_id() 245 struct rsnd_mod *src = rsnd_io_to_mod_src(io); rsnd_dmapp_get_id() 246 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); rsnd_dmapp_get_id() 394 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_gen2_dma_addr() local 397 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod); rsnd_gen2_dma_addr() 398 int use_src = !!rsnd_io_to_mod_src(io); rsnd_gen2_dma_addr() 399 int use_dvc = !!rsnd_io_to_mod_dvc(io); rsnd_gen2_dma_addr() 472 struct rsnd_dai_stream *io = rsnd_mod_to_io(this); rsnd_dma_of_path() local 473 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); rsnd_dma_of_path() 474 struct rsnd_mod *src = rsnd_io_to_mod_src(io); rsnd_dma_of_path() 475 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); rsnd_dma_of_path() 554 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_dma_init() local 556 int is_play = rsnd_io_is_play(io); rsnd_dma_init()
|
H A D | src.c | 123 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_dma_req() local 124 int is_play = rsnd_io_is_play(io); rsnd_src_dma_req() 134 struct rsnd_dai_stream *io = rsnd_mod_to_io(ssi_mod); rsnd_src_ssiu_start() local 135 struct rsnd_dai *rdai = rsnd_io_to_rdai(io); rsnd_src_ssiu_start() 136 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_src_ssiu_start() 241 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_convert_rate() local 242 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_src_convert_rate() 263 struct rsnd_dai_stream *io, rsnd_src_get_ssi_rate() 266 struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io); rsnd_src_get_ssi_rate() 288 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_set_convert_rate() local 289 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_src_set_convert_rate() 416 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_set_route_gen1() local 442 val = rsnd_io_is_play(io) ? 0x1 : 0x2; rsnd_src_set_route_gen1() 453 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_set_convert_timing_gen1() local 456 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_src_set_convert_timing_gen1() 648 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); _rsnd_src_start_gen2() local 649 u32 val = rsnd_io_to_mod_dvc(io) ? 0x01 : 0x11; _rsnd_src_start_gen2() 676 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_interrupt_gen2() local 678 if (!io) rsnd_src_interrupt_gen2() 703 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_set_convert_rate_gen2() local 704 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_src_set_convert_rate_gen2() 735 route |= rsnd_io_is_play(io) ? rsnd_src_set_convert_rate_gen2() 762 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_set_convert_timing_gen2() local 763 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_src_set_convert_timing_gen2() 769 ret = rsnd_adg_set_convert_clk_gen2(mod, io, rsnd_src_set_convert_timing_gen2() 773 ret = rsnd_adg_set_convert_timing_gen2(mod, io); rsnd_src_set_convert_timing_gen2() 857 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_reconvert_update() local 858 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); rsnd_src_reconvert_update() 879 struct rsnd_dai_stream *io = rsnd_mod_to_io(mod); rsnd_src_pcm_new() local 880 struct rsnd_dai *rdai = rsnd_io_to_rdai(io); rsnd_src_pcm_new() 904 if (rsnd_io_to_mod_dvc(io)) rsnd_src_pcm_new() 911 rsnd_io_is_play(io) ? rsnd_src_pcm_new() 920 rsnd_io_is_play(io) ? rsnd_src_pcm_new() 262 rsnd_src_get_ssi_rate(struct rsnd_priv *priv, struct rsnd_dai_stream *io, struct snd_pcm_runtime *runtime) rsnd_src_get_ssi_rate() argument
|
H A D | adg.c | 35 static u32 rsnd_adg_ssi_ws_timing_gen2(struct rsnd_dai_stream *io) rsnd_adg_ssi_ws_timing_gen2() argument 37 struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io); rsnd_adg_ssi_ws_timing_gen2() 61 struct rsnd_dai_stream *io) rsnd_adg_set_cmd_timsel_gen2() 67 val = rsnd_adg_ssi_ws_timing_gen2(io); rsnd_adg_set_cmd_timsel_gen2() 78 struct rsnd_dai_stream *io, rsnd_adg_set_src_timsel_gen2() 81 int is_play = rsnd_io_is_play(io); rsnd_adg_set_src_timsel_gen2() 87 ws = rsnd_adg_ssi_ws_timing_gen2(io); rsnd_adg_set_src_timsel_gen2() 123 struct rsnd_dai_stream *io, rsnd_adg_set_convert_clk_gen2() 178 ret = rsnd_adg_set_src_timsel_gen2(mod, io, val); rsnd_adg_set_convert_clk_gen2() 192 struct rsnd_dai_stream *io) rsnd_adg_set_convert_timing_gen2() 194 u32 val = rsnd_adg_ssi_ws_timing_gen2(io); rsnd_adg_set_convert_timing_gen2() 196 return rsnd_adg_set_src_timsel_gen2(mod, io, val); rsnd_adg_set_convert_timing_gen2() 60 rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *mod, struct rsnd_dai_stream *io) rsnd_adg_set_cmd_timsel_gen2() argument 77 rsnd_adg_set_src_timsel_gen2(struct rsnd_mod *mod, struct rsnd_dai_stream *io, u32 timsel) rsnd_adg_set_src_timsel_gen2() argument 122 rsnd_adg_set_convert_clk_gen2(struct rsnd_mod *mod, struct rsnd_dai_stream *io, unsigned int src_rate, unsigned int dst_rate) rsnd_adg_set_convert_clk_gen2() argument 191 rsnd_adg_set_convert_timing_gen2(struct rsnd_mod *mod, struct rsnd_dai_stream *io) rsnd_adg_set_convert_timing_gen2() argument
|
/linux-4.1.27/drivers/vfio/pci/ |
H A D | vfio_pci_rdwr.c | 19 #include <linux/io.h> 30 static ssize_t do_io_rw(void __iomem *io, char __user *buf, do_io_rw() argument 53 iowrite32(le32_to_cpu(val), io + off); do_io_rw() 55 val = cpu_to_le32(ioread32(io + off)); do_io_rw() 69 iowrite16(le16_to_cpu(val), io + off); do_io_rw() 71 val = cpu_to_le16(ioread16(io + off)); do_io_rw() 85 iowrite8(val, io + off); do_io_rw() 87 val = ioread8(io + off); do_io_rw() 124 void __iomem *io; vfio_pci_bar_rw() local 143 io = pci_map_rom(pdev, &x_start); vfio_pci_bar_rw() 144 if (!io) vfio_pci_bar_rw() 154 io = pci_iomap(pdev, bar, 0); vfio_pci_bar_rw() 155 if (!io) { vfio_pci_bar_rw() 160 vdev->barmap[bar] = io; vfio_pci_bar_rw() 162 io = vdev->barmap[bar]; vfio_pci_bar_rw() 169 done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite); vfio_pci_bar_rw() 175 pci_unmap_rom(pdev, io); vfio_pci_bar_rw()
|
/linux-4.1.27/drivers/net/wireless/ti/wl1251/ |
H A D | Makefile | 2 acx.o boot.o init.o debugfs.o io.o
|
/linux-4.1.27/drivers/net/wireless/ti/wlcore/ |
H A D | Makefile | 1 wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
|
/linux-4.1.27/arch/mips/include/asm/mach-rc32434/ |
H A D | rc32434.h | 9 #include <linux/io.h>
|
/linux-4.1.27/arch/cris/arch-v10/kernel/ |
H A D | crisksyms.c | 2 #include <asm/io.h>
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | dmi.h | 5 #include <asm/io.h>
|
H A D | dma.h | 10 #include <asm/io.h> /* need byte IO */
|
/linux-4.1.27/sound/core/oss/ |
H A D | Makefile | 10 io.o copy.o linear.o mulaw.o route.o rate.o
|
/linux-4.1.27/sound/oss/ |
H A D | msnd.c | 34 #include <asm/io.h> 245 register unsigned int io = dev->io; msnd_wait_TXDE() local 249 if (msnd_inb(io + HP_ISR) & HPISR_TXDE) msnd_wait_TXDE() 257 register unsigned int io = dev->io; msnd_wait_HC0() local 261 if (!(msnd_inb(io + HP_CVR) & HPCVR_HC)) msnd_wait_HC0() 273 msnd_outb(cmd, dev->io + HP_CVR); msnd_send_dsp_cmd() 287 register unsigned int io = dev->io; msnd_send_word() local 290 msnd_outb(high, io + HP_TXH); msnd_send_word() 291 msnd_outb(mid, io + HP_TXM); msnd_send_word() 292 msnd_outb(low, io + HP_TXL); msnd_send_word() 314 msnd_inb(dev->io + HP_RXL); msnd_upload_host() 315 msnd_inb(dev->io + HP_CVR); msnd_upload_host() 331 msnd_outb(msnd_inb(dev->io + HP_ICR) | HPICR_TREQ, dev->io + HP_ICR); msnd_enable_irq() 333 msnd_outb(dev->irqid, dev->io + HP_IRQM); msnd_enable_irq() 334 msnd_outb(msnd_inb(dev->io + HP_ICR) & ~HPICR_TREQ, dev->io + HP_ICR); msnd_enable_irq() 335 msnd_outb(msnd_inb(dev->io + HP_ICR) | HPICR_RREQ, dev->io + HP_ICR); msnd_enable_irq() 362 msnd_outb(msnd_inb(dev->io + HP_ICR) & ~HPICR_RREQ, dev->io + HP_ICR); msnd_disable_irq() 364 msnd_outb(HPIRQ_NONE, dev->io + HP_IRQM); msnd_disable_irq()
|
H A D | kahlua.c | 46 static u8 mixer_read(unsigned long io, u8 reg) mixer_read() argument 48 outb(reg, io + 4); mixer_read() 50 reg = inb(io + 5); mixer_read() 60 unsigned long io; probe_one() local 85 io = 0x220 + 0x20 * (map & 3); probe_one() 88 printk(KERN_INFO "kahlua: XpressAudio at 0x%lx\n", io); probe_one() 97 irq = mixer_read(io, 0x80) & 0x0F; probe_one() 98 dma8 = mixer_read(io, 0x81); probe_one() 152 hw_config->io_base = io; probe_one() 159 if (!request_region(io, 16, "soundblaster")) probe_one() 165 release_region(io, 16); probe_one()
|
H A D | msnd_pinnacle.c | 45 #include <asm/io.h> 134 msnd_outb(HPBLKSEL_1, dev.io + HP_BLKS); reset_record_queue() 136 msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS); reset_record_queue() 840 msnd_outb(HPBLKSEL_1, dev.io + HP_BLKS); pack_DARQ_to_DARF() 845 msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS); pack_DARQ_to_DARF() 1103 msnd_inb(dev.io + HP_RXL); intr() 1133 msnd_outb(HPDSPRESET_ON, dev.io + HP_DSPR); reset_dsp() 1136 dev.info = msnd_inb(dev.io + HP_INFO); reset_dsp() 1138 msnd_outb(HPDSPRESET_OFF, dev.io + HP_DSPR); reset_dsp() 1141 if (msnd_inb(dev.io + HP_CVR) == HP_CVR_DEF) reset_dsp() 1158 if (!request_region(dev.io, dev.numio, "probing")) { probe_multisound() 1164 release_region(dev.io, dev.numio); probe_multisound() 1200 dev.io, dev.io + dev.numio - 1, probe_multisound() 1204 release_region(dev.io, dev.numio); probe_multisound() 1215 msnd_outb(dev.memid, dev.io + HP_MEMM); init_sma() 1217 msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS); init_sma() 1227 msnd_outb(HPBLKSEL_1, dev.io + HP_BLKS); init_sma() 1229 msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS); init_sma() 1304 msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS); upload_dsp_code() 1343 msnd_outb(HPPRORESET_ON, dev.io + HP_PROR); reset_proteus() 1345 msnd_outb(HPPRORESET_OFF, dev.io + HP_PROR); reset_proteus() 1355 msnd_outb(HPWAITSTATE_0, dev.io + HP_WAIT); initialize() 1356 msnd_outb(HPBITMODE_16, dev.io + HP_BITM); initialize() 1414 if (request_region(dev.io, dev.numio, dev.name) == NULL) { attach_multisound() 1421 release_region(dev.io, dev.numio); attach_multisound() 1428 release_region(dev.io, dev.numio); attach_multisound() 1436 release_region(dev.io, dev.numio); attach_multisound() 1445 release_region(dev.io, dev.numio); attach_multisound() 1463 release_region(dev.io, dev.numio); unload_multisound() 1485 static int __init msnd_write_cfg_io0(int cfg, int num, WORD io) msnd_write_cfg_io0() argument 1489 if (msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io))) msnd_write_cfg_io0() 1491 if (msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io))) msnd_write_cfg_io0() 1496 static int __init msnd_write_cfg_io1(int cfg, int num, WORD io) msnd_write_cfg_io1() argument 1500 if (msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io))) msnd_write_cfg_io1() 1502 if (msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io))) msnd_write_cfg_io1() 1624 static int io __initdata = -1; 1654 static int io __initdata = CONFIG_MSNDCLAS_IO; 1659 static int io __initdata = CONFIG_MSNDPIN_IO; 1729 module_param (io, int, 0); 1757 if (io == -1 || irq == -1 || mem == -1) msnd_init() 1758 printk(KERN_WARNING LOGNAME ": io, irq and mem must be set\n"); msnd_init() 1761 if (io == -1 || msnd_init() 1762 !(io == 0x290 || msnd_init() 1763 io == 0x260 || msnd_init() 1764 io == 0x250 || msnd_init() 1765 io == 0x240 || msnd_init() 1766 io == 0x230 || msnd_init() 1767 io == 0x220 || msnd_init() 1768 io == 0x210 || msnd_init() 1769 io == 0x3e0)) { msnd_init() 1770 printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must be set to 0x210, 0x220, 0x230, 0x240, 0x250, 0x260, 0x290, or 0x3E0\n"); msnd_init() 1774 if (io == -1 || msnd_init() 1775 io < 0x100 || msnd_init() 1776 io > 0x3e0 || msnd_init() 1777 (io % 0x10) != 0) { msnd_init() 1778 printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must within the range 0x100 to 0x3E0 and must be evenly divisible by 0x10\n"); msnd_init() 1834 pinnacle_devs[0].io0 = io; msnd_init() 1878 dev.io = io; msnd_init()
|
/linux-4.1.27/drivers/net/wan/ |
H A D | n2.c | 35 #include <asm/io.h> 120 u16 io; /* IO Base address */ member in struct:card_s 135 #define sca_reg(reg, card) (0x8000 | (card)->io | \ 153 return inb(card->io + N2_PSR) & PSR_PAGEBITS; sca_get_page() 159 u8 psr = inb(card->io + N2_PSR); openwin() 160 outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR); openwin() 170 int io = card->io; n2_set_iface() local 171 u8 mcr = inb(io + N2_MCR); n2_set_iface() 201 outb(mcr, io + N2_MCR); n2_set_iface() 214 int io = port->card->io; n2_open() local 215 u8 mcr = inb(io + N2_MCR) | (port->phy_node ? TX422_PORT1:TX422_PORT0); n2_open() 223 outb(mcr, io + N2_MCR); n2_open() 225 outb(inb(io + N2_PCR) | PCR_ENWIN, io + N2_PCR); /* open window */ n2_open() 226 outb(inb(io + N2_PSR) | PSR_DMAEN, io + N2_PSR); /* enable dma */ n2_open() 237 int io = port->card->io; n2_close() local 238 u8 mcr = inb(io+N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0); n2_close() 242 outb(mcr, io + N2_MCR); n2_close() 321 if (card->io) n2_destroy_card() 322 release_region(card->io, N2_IOPORTS); n2_destroy_card() 338 static int __init n2_run(unsigned long io, unsigned long irq, n2_run() argument 345 if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) { n2_run() 372 if (!request_region(io, N2_IOPORTS, devname)) { n2_run() 377 card->io = io; n2_run() 399 outb(0, io + N2_PCR); n2_run() 400 outb(winbase >> 12, io + N2_BAR); n2_run() 404 outb(WIN16K, io + N2_PSR); n2_run() 408 outb(WIN32K, io + N2_PSR); n2_run() 412 outb(WIN64K, io + N2_PSR); n2_run() 422 outb(pcr, io + N2_PCR); n2_run() 447 outb(pcr, io + N2_PCR); n2_run() 448 outb(0, io + N2_MCR); n2_run() 507 unsigned long io, irq, ram; n2_init() local 510 io = simple_strtoul(hw, &hw, 0); n2_init() 536 n2_run(io, irq, ram, valid[0], valid[1]); n2_init() 566 MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
|
/linux-4.1.27/drivers/clk/ |
H A D | clk-nspire.c | 13 #include <linux/io.h> 71 void __iomem *io; nspire_ahbdiv_setup() local 77 io = of_iomap(node, 0); nspire_ahbdiv_setup() 78 if (!io) nspire_ahbdiv_setup() 80 val = readl(io); nspire_ahbdiv_setup() 81 iounmap(io); nspire_ahbdiv_setup() 113 void __iomem *io; nspire_clk_setup() local 118 io = of_iomap(node, 0); nspire_clk_setup() 119 if (!io) nspire_clk_setup() 121 val = readl(io); nspire_clk_setup() 122 iounmap(io); nspire_clk_setup()
|
/linux-4.1.27/drivers/staging/lustre/lustre/lclient/ |
H A D | glimpse.c | 86 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, cl_glimpse_lock() argument 128 lock = cl_lock_request(env, io, descr, "glimpse", cl_glimpse_lock() 168 struct cl_io *io; cl_io_get() local 176 io = ccc_env_thread_io(env); cl_io_get() 177 io->ci_obj = clob; cl_io_get() 179 *ioout = io; cl_io_get() 201 struct cl_io *io = NULL; cl_glimpse_size0() local 205 result = cl_io_get(inode, &env, &io, &refcheck); cl_glimpse_size0() 208 io->ci_verify_layout = 1; cl_glimpse_size0() 209 result = cl_io_init(env, io, CIT_MISC, io->ci_obj); cl_glimpse_size0() 212 * nothing to do for this io. This currently happens cl_glimpse_size0() 215 result = io->ci_result; cl_glimpse_size0() 217 result = cl_glimpse_lock(env, io, inode, io->ci_obj, cl_glimpse_size0() 221 cl_io_fini(env, io); cl_glimpse_size0() 222 if (unlikely(io->ci_need_restart)) cl_glimpse_size0() 232 struct cl_io *io = NULL; cl_local_size() local 243 result = cl_io_get(inode, &env, &io, &refcheck); cl_local_size() 247 clob = io->ci_obj; cl_local_size() 248 result = cl_io_init(env, io, CIT_MISC, clob); cl_local_size() 250 result = io->ci_result; cl_local_size() 257 lock = cl_lock_peek(env, io, descr, "localsize", current); cl_local_size() 266 cl_io_fini(env, io); cl_local_size()
|
H A D | lcommon_misc.c | 131 struct cl_io *io; cl_get_grouplock() local 142 io = ccc_env_thread_io(env); cl_get_grouplock() 143 io->ci_obj = obj; cl_get_grouplock() 144 io->ci_ignore_layout = 1; cl_get_grouplock() 146 rc = cl_io_init(env, io, CIT_MISC, io->ci_obj); cl_get_grouplock() 165 lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current); cl_get_grouplock() 167 cl_io_fini(env, io); cl_get_grouplock() 173 cg->cg_io = io; cl_get_grouplock() 185 struct cl_io *io = cg->cg_io; cl_put_grouplock() local 197 cl_io_fini(env, io); cl_put_grouplock()
|
H A D | lcommon_cl.c | 472 struct cl_io *io) ccc_page_is_under_lock() 480 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || ccc_page_is_under_lock() 481 io->ci_type == CIT_FAULT) { ccc_page_is_under_lock() 489 result = cl_queue_match(&io->ci_lockset.cls_done, ccc_page_is_under_lock() 609 * layer. This function is executed every time io finds an existing lock in 611 * cached lock "fits" into io. 614 * \param io IO that wants a lock. 621 const struct cl_io *io) ccc_lock_fits_into() 689 * io operations. 695 struct cl_io *io = ios->cis_io; ccc_io_fini() local 697 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); ccc_io_fini() 700 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, ccc_io_one_lock_index() argument 706 struct cl_object *obj = io->ci_obj; ccc_io_one_lock_index() 725 cl_io_lock_add(env, io, &cio->cui_link); ccc_io_one_lock_index() 730 struct ccc_io *cio, struct cl_io *io) ccc_io_update_iov() 732 size_t size = io->u.ci_rw.crw_count; ccc_io_update_iov() 734 if (!cl_is_normalio(env, io) || cio->cui_iter == NULL) ccc_io_update_iov() 740 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, ccc_io_one_lock() argument 744 struct cl_object *obj = io->ci_obj; ccc_io_one_lock() 746 return ccc_io_one_lock_index(env, io, enqflags, mode, ccc_io_one_lock() 761 struct cl_io *io = ios->cis_io; ccc_io_advance() local 766 if (!cl_is_normalio(env, io)) ccc_io_advance() 784 struct cl_io *io, loff_t start, size_t count, int *exceed) ccc_prep_size() 822 result = cl_glimpse_lock(env, io, inode, obj, 0); ccc_prep_size() 944 struct cl_io *io; cl_setattr_ost() local 952 io = ccc_env_thread_io(env); cl_setattr_ost() 953 io->ci_obj = cl_i2info(inode)->lli_clob; cl_setattr_ost() 955 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime); cl_setattr_ost() 956 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime); cl_setattr_ost() 957 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime); cl_setattr_ost() 958 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size; cl_setattr_ost() 959 io->u.ci_setattr.sa_valid = attr->ia_valid; cl_setattr_ost() 960 io->u.ci_setattr.sa_capa = capa; cl_setattr_ost() 963 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { cl_setattr_ost() 971 result = cl_io_loop(env, io); cl_setattr_ost() 973 result = io->ci_result; cl_setattr_ost() 975 cl_io_fini(env, io); cl_setattr_ost() 976 if (unlikely(io->ci_need_restart)) cl_setattr_ost() 981 if (result == -ENODATA && io->ci_restore_needed && cl_setattr_ost() 982 io->ci_result != -ENODATA) cl_setattr_ost() 470 ccc_page_is_under_lock(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *io) ccc_page_is_under_lock() argument 618 ccc_lock_fits_into(const struct lu_env *env, const struct cl_lock_slice *slice, const struct cl_lock_descr *need, const struct cl_io *io) ccc_lock_fits_into() argument 729 ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio, struct cl_io *io) ccc_io_update_iov() argument 783 ccc_prep_size(const struct lu_env *env, struct cl_object *obj, struct cl_io *io, loff_t start, size_t count, int *exceed) ccc_prep_size() argument
|
/linux-4.1.27/arch/alpha/kernel/ |
H A D | err_marvel.c | 12 #include <asm/io.h> 330 marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io) marvel_print_po7_err_sum() argument 370 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE) { marvel_print_po7_err_sum() 373 (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE2) marvel_print_po7_err_sum() 375 marvel_print_po7_crrct_sym(io->po7_crrct_sym); marvel_print_po7_err_sum() 381 if (io->po7_error_sum & IO7__PO7_ERRSUM__HLT_INT) marvel_print_po7_err_sum() 383 if (io->po7_error_sum & IO7__PO7_ERRSUM__HP_INT) { marvel_print_po7_err_sum() 388 if (io->po7_error_sum & IO7__PO7_ERRSUM__CRD_INT) marvel_print_po7_err_sum() 391 if (io->po7_error_sum & IO7__PO7_ERRSUM__STV_INT) { marvel_print_po7_err_sum() 395 if (io->po7_error_sum & IO7__PO7_ERRSUM__HRD_INT) { marvel_print_po7_err_sum() 404 if (!(io->po7_error_sum & IO7__PO7_ERRSUM__ERR_VALID)) marvel_print_po7_err_sum() 415 if (!(io->po7_error_sum & (IO7__PO7_ERRSUM__CR_PIO_WBYTE | marvel_print_po7_err_sum() 422 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_PIO_WBYTE) marvel_print_po7_err_sum() 424 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CSR_NXM) marvel_print_po7_err_sum() 426 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RPID_ACV) marvel_print_po7_err_sum() 429 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RSP_NXM) marvel_print_po7_err_sum() 432 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_ERR_RESP) marvel_print_po7_err_sum() 434 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CLK_DERR) marvel_print_po7_err_sum() 436 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_DBE) marvel_print_po7_err_sum() 439 if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_GRBG) marvel_print_po7_err_sum() 442 if (io->po7_error_sum & IO7__PO7_ERRSUM__UGBGE) { marvel_print_po7_err_sum() 445 marvel_print_po7_ugbge_sym(io->po7_ugbge_sym); marvel_print_po7_err_sum() 447 if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_MAF_LOST) marvel_print_po7_err_sum() 450 if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_PKT_OVF) marvel_print_po7_err_sum() 452 if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_CDT_OVF) marvel_print_po7_err_sum() 454 if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_DEALLOC) marvel_print_po7_err_sum() 461 if (io->po7_error_sum & IO7__PO7_ERRSUM__MAF_TO) marvel_print_po7_err_sum() 464 if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CDT_TO) marvel_print_po7_err_sum() 466 if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CLK_HDR) marvel_print_po7_err_sum() 469 if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_DBE_HDR) marvel_print_po7_err_sum() 472 if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_GBG_HDR) marvel_print_po7_err_sum() 475 if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_BAD_CMD) marvel_print_po7_err_sum() 479 if (io->po7_error_sum & IO7__PO7_ERRSUM__ERR_LST) marvel_print_po7_err_sum() 486 err_print_prefix, io->po7_err_pkt0, marvel_print_po7_err_sum() 487 err_print_prefix, io->po7_err_pkt1); marvel_print_po7_err_sum() 494 marvel_print_po7_uncrr_sym(io->po7_uncrr_sym, uncrr_sym_valid); marvel_print_po7_err_sum() 799 struct ev7_pal_io_subpacket *io = lf_subpackets->io; marvel_find_io7_with_error() local 806 if (!io) marvel_find_io7_with_error() 812 memset(io, 0x55, sizeof(*io)); marvel_find_io7_with_error() 842 io->io_asic_rev = io7->csrs->IO_ASIC_REV.csr; marvel_find_io7_with_error() 843 io->io_sys_rev = io7->csrs->IO_SYS_REV.csr; marvel_find_io7_with_error() 844 io->io7_uph = io7->csrs->IO7_UPH.csr; marvel_find_io7_with_error() 845 io->hpi_ctl = io7->csrs->HPI_CTL.csr; marvel_find_io7_with_error() 846 io->crd_ctl = io7->csrs->CRD_CTL.csr; marvel_find_io7_with_error() 847 io->hei_ctl = io7->csrs->HEI_CTL.csr; marvel_find_io7_with_error() 848 io->po7_error_sum = io7->csrs->PO7_ERROR_SUM.csr; marvel_find_io7_with_error() 849 io->po7_uncrr_sym = io7->csrs->PO7_UNCRR_SYM.csr; marvel_find_io7_with_error() 850 io->po7_crrct_sym = io7->csrs->PO7_CRRCT_SYM.csr; marvel_find_io7_with_error() 851 io->po7_ugbge_sym = io7->csrs->PO7_UGBGE_SYM.csr; marvel_find_io7_with_error() 852 io->po7_err_pkt0 = io7->csrs->PO7_ERR_PKT[0].csr; marvel_find_io7_with_error() 853 io->po7_err_pkt1 = io7->csrs->PO7_ERR_PKT[1].csr; marvel_find_io7_with_error() 861 io->ports[i].pox_err_sum = csrs->POx_ERR_SUM.csr; marvel_find_io7_with_error() 862 io->ports[i].pox_tlb_err = csrs->POx_TLB_ERR.csr; marvel_find_io7_with_error() 863 io->ports[i].pox_spl_cmplt = csrs->POx_SPL_COMPLT.csr; marvel_find_io7_with_error() 864 io->ports[i].pox_trans_sum = csrs->POx_TRANS_SUM.csr; marvel_find_io7_with_error() 865 io->ports[i].pox_first_err = csrs->POx_FIRST_ERR.csr; marvel_find_io7_with_error() 866 io->ports[i].pox_mult_err = csrs->POx_MULT_ERR.csr; marvel_find_io7_with_error() 867 io->ports[i].pox_dm_source = csrs->POx_DM_SOURCE.csr; marvel_find_io7_with_error() 868 io->ports[i].pox_dm_dest = csrs->POx_DM_DEST.csr; marvel_find_io7_with_error() 869 io->ports[i].pox_dm_size = csrs->POx_DM_SIZE.csr; marvel_find_io7_with_error() 870 io->ports[i].pox_dm_ctrl = csrs->POx_DM_CTRL.csr; marvel_find_io7_with_error() 880 csrs->POx_TLB_ERR.csr = io->ports[i].pox_tlb_err; marvel_find_io7_with_error() 881 csrs->POx_ERR_SUM.csr = io->ports[i].pox_err_sum; marvel_find_io7_with_error() 889 io7->csrs->PO7_ERROR_SUM.csr = io->po7_error_sum; marvel_find_io7_with_error() 898 return io; marvel_find_io7_with_error() 907 struct ev7_pal_io_subpacket *io = lf_subpackets->io; marvel_process_io_error() local 913 if (!lf_subpackets->logout || !lf_subpackets->io) marvel_process_io_error() 932 if ((lf_subpackets->io->po7_error_sum & (1UL << 32)) || marvel_process_io_error() 933 ((lf_subpackets->io->po7_error_sum | marvel_process_io_error() 934 lf_subpackets->io->ports[0].pox_err_sum | marvel_process_io_error() 935 lf_subpackets->io->ports[1].pox_err_sum | marvel_process_io_error() 936 lf_subpackets->io->ports[2].pox_err_sum | marvel_process_io_error() 937 lf_subpackets->io->ports[3].pox_err_sum) & (1UL << 63))) { marvel_process_io_error() 963 if (lf_subpackets->io->po7_error_sum & IO7__PO7_ERRSUM__ERR_MASK) { marvel_process_io_error() 964 marvel_print_po7_err_sum(io); marvel_process_io_error() 975 err_print_prefix, io->po7_error_sum, marvel_process_io_error() 976 err_print_prefix, io->po7_uncrr_sym, marvel_process_io_error() 977 err_print_prefix, io->po7_crrct_sym, marvel_process_io_error() 978 err_print_prefix, io->po7_ugbge_sym, marvel_process_io_error() 979 err_print_prefix, io->po7_err_pkt0, marvel_process_io_error() 980 err_print_prefix, io->po7_err_pkt1); marvel_process_io_error() 988 if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) marvel_process_io_error() 993 lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); marvel_process_io_error() 994 marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); marvel_process_io_error() 997 err_print_prefix, io->ports[i].pox_first_err); marvel_process_io_error() 998 marvel_print_pox_err(io->ports[i].pox_first_err, marvel_process_io_error() 999 &io->ports[i]); marvel_process_io_error() 1105 * If we don't have one, point the io subpacket in marvel_machine_check() 1110 if (!lf_subpackets->io) marvel_machine_check() 1111 lf_subpackets->io = &scratch_io_packet; marvel_machine_check()
|
/linux-4.1.27/drivers/pcmcia/ |
H A D | rsrc_iodyn.c | 98 if (!s->io[i].res) iodyn_find_io() 104 if ((s->io[i].res->start & (align-1)) == *base) iodyn_find_io() 109 struct resource *res = s->io[i].res; iodyn_find_io() 120 res = s->io[i].res = __iodyn_find_io_region(s, *base, iodyn_find_io() 126 s->io[i].res->flags = iodyn_find_io() 129 s->io[i].InUse = num; iodyn_find_io() 137 if (adjust_resource(s->io[i].res, res->start, iodyn_find_io() 141 s->io[i].InUse += num; iodyn_find_io() 149 if (adjust_resource(s->io[i].res, iodyn_find_io() 154 s->io[i].InUse += num; iodyn_find_io()
|
H A D | pd6729.h | 18 unsigned long io_base; /* base io address of the socket */
|
H A D | at91_cf.c | 21 #include <linux/io.h> 148 static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io) at91_cf_set_io_map() argument 154 io->flags &= (MAP_ACTIVE | MAP_16BIT | MAP_AUTOSZ); at91_cf_set_io_map() 171 if (!(io->flags & (MAP_16BIT | MAP_AUTOSZ))) { at91_cf_set_io_map() 180 io->start = cf->socket.io_offset; at91_cf_set_io_map() 181 io->stop = io->start + SZ_2K - 1; at91_cf_set_io_map() 252 struct resource *io; at91_cf_probe() local 266 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); at91_cf_probe() 267 if (!io) at91_cf_probe() 276 cf->phys_baseaddr = io->start; at91_cf_probe() 330 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), "at91_cf")) { at91_cf_probe() 335 dev_info(&pdev->dev, "irqs det #%d, io #%d\n", at91_cf_probe() 345 cf->socket.io[0].res = io; at91_cf_probe()
|
H A D | pcmcia_resource.c | 72 if (!s->io[i].res) release_io_space() 74 if ((s->io[i].res->start <= res->start) && release_io_space() 75 (s->io[i].res->end >= res->end)) { release_io_space() 76 s->io[i].InUse -= num; release_io_space() 82 if (s->io[i].InUse == 0) { release_io_space() 83 release_resource(s->io[i].res); release_io_space() 84 kfree(s->io[i].res); release_io_space() 85 s->io[i].res = NULL; release_io_space() 250 * pcmcia_fixup_iowidth() - reduce io width to 8bit 277 if (!s->io[i].res) pcmcia_fixup_iowidth() 283 io_on.start = s->io[i].res->start; pcmcia_fixup_iowidth() 284 io_on.stop = s->io[i].res->end; pcmcia_fixup_iowidth() 352 pccard_io_map io = { 0, 0, 0, 0, 1 }; pcmcia_release_configuration() local 372 if (!s->io[i].res) pcmcia_release_configuration() 374 s->io[i].Config--; pcmcia_release_configuration() 375 if (s->io[i].Config != 0) pcmcia_release_configuration() 377 io.map = i; pcmcia_release_configuration() 378 s->ops->set_io_map(s, &io); pcmcia_release_configuration() 409 release_io_space(s, &c->io[0]); pcmcia_release_io() 411 if (c->io[1].end) pcmcia_release_io() 412 release_io_space(s, &c->io[1]); pcmcia_release_io() 581 u8 b = c->io[0].start & 0xff; pcmcia_enable_device() 583 b = (c->io[0].start >> 8) & 0xff; pcmcia_enable_device() 587 u8 b = resource_size(&c->io[0]) + resource_size(&c->io[1]) - 1; pcmcia_enable_device() 595 if (s->io[i].res) { pcmcia_enable_device() 598 switch (s->io[i].res->flags & IO_DATA_PATH_WIDTH) { pcmcia_enable_device() 606 iomap.start = s->io[i].res->start; pcmcia_enable_device() 607 iomap.stop = s->io[i].res->end; pcmcia_enable_device() 609 s->io[i].Config++; pcmcia_enable_device() 639 &c->io[0], &c->io[1]); pcmcia_request_io() 655 ret = alloc_io_space(s, &c->io[0], p_dev->io_lines); pcmcia_request_io() 659 if (c->io[1].end) { pcmcia_request_io() 660 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); pcmcia_request_io() 662 struct resource tmp = c->io[0]; pcmcia_request_io() 664 release_io_space(s, &c->io[0]); pcmcia_request_io() 666 c->io[0].end = resource_size(&tmp); pcmcia_request_io() 667 c->io[0].start = tmp.start; pcmcia_request_io() 668 c->io[0].flags = tmp.flags; pcmcia_request_io() 672 c->io[1].start = 0; pcmcia_request_io() 678 &c->io[0], &c->io[1]); pcmcia_request_io()
|
H A D | i82092.c | 20 #include <asm/io.h> 59 unsigned int io_base; /* base io address of the socket */ 399 pccard_io_map io = { 0, 0, 0, 0, 1 }; i82092aa_init() local 405 io.map = i; i82092aa_init() 406 i82092aa_set_io_map(sock, &io); i82092aa_init() 557 static int i82092aa_set_io_map(struct pcmcia_socket *socket, struct pccard_io_map *io) i82092aa_set_io_map() argument 564 map = io->map; i82092aa_set_io_map() 571 if ((io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)){ i82092aa_set_io_map() 572 leave("i82092aa_set_io_map with invalid io"); i82092aa_set_io_map() 580 /* printk("set_io_map: Setting range to %x - %x \n",io->start,io->stop); */ i82092aa_set_io_map() 583 indirect_write16(sock,I365_IO(map)+I365_W_START,io->start); i82092aa_set_io_map() 584 indirect_write16(sock,I365_IO(map)+I365_W_STOP,io->stop); i82092aa_set_io_map() 588 if (io->flags & (MAP_16BIT|MAP_AUTOSZ)) i82092aa_set_io_map() 594 if (io->flags & MAP_ACTIVE) i82092aa_set_io_map()
|
/linux-4.1.27/drivers/isdn/hisax/ |
H A D | hisax_fcpcipnp.c | 37 #include <asm/io.h> 162 outb(idx, adapter->io + AVM_INDEX); fcpci_read_isac() 163 val = inb(adapter->io + AVM_DATA + (offset & 0xf)); fcpci_read_isac() 181 outb(idx, adapter->io + AVM_INDEX); fcpci_write_isac() 182 outb(value, adapter->io + AVM_DATA + (offset & 0xf)); fcpci_write_isac() 193 outb(AVM_IDX_ISAC_FIFO, adapter->io + AVM_INDEX); fcpci_read_isac_fifo() 194 insb(adapter->io + AVM_DATA, data, size); fcpci_read_isac_fifo() 205 outb(AVM_IDX_ISAC_FIFO, adapter->io + AVM_INDEX); fcpci_write_isac_fifo() 206 outsb(adapter->io + AVM_DATA, data, size); fcpci_write_isac_fifo() 217 outl(idx, adapter->io + AVM_INDEX); fcpci_read_hdlc_status() 218 val = inl(adapter->io + AVM_DATA + HDLC_STATUS); fcpci_read_hdlc_status() 231 outl(idx, adapter->io + AVM_INDEX); __fcpci_write_ctrl() 232 outl(bcs->ctrl.ctrl, adapter->io + AVM_DATA + HDLC_CTRL); __fcpci_write_ctrl() 255 outl(offset, adapter->io + AVM_ISACSX_INDEX); fcpci2_read_isac() 256 val = inl(adapter->io + AVM_ISACSX_DATA); fcpci2_read_isac() 273 outl(offset, adapter->io + AVM_ISACSX_INDEX); fcpci2_write_isac() 274 outl(value, adapter->io + AVM_ISACSX_DATA); fcpci2_write_isac() 286 outl(0, adapter->io + AVM_ISACSX_INDEX); fcpci2_read_isac_fifo() 288 data[i] = inl(adapter->io + AVM_ISACSX_DATA); fcpci2_read_isac_fifo() 300 outl(0, adapter->io + AVM_ISACSX_INDEX); fcpci2_write_isac_fifo() 302 outl(data[i], adapter->io + AVM_ISACSX_DATA); fcpci2_write_isac_fifo() 310 return inl(adapter->io + offset); fcpci2_read_hdlc_status() 321 outl(bcs->ctrl.ctrl, adapter->io + offset); fcpci2_write_ctrl() 334 outb(idx, adapter->io + AVM_INDEX); fcpnp_read_hdlc_status() 335 val = inb(adapter->io + AVM_DATA + HDLC_STATUS); fcpnp_read_hdlc_status() 337 val |= inb(adapter->io + AVM_DATA + HDLC_STATUS + 1) << 8; fcpnp_read_hdlc_status() 350 outb(idx, adapter->io + AVM_INDEX); __fcpnp_write_ctrl() 353 adapter->io + AVM_DATA + HDLC_STATUS + 2); __fcpnp_write_ctrl() 356 adapter->io + AVM_DATA + HDLC_STATUS + 1); __fcpnp_write_ctrl() 359 adapter->io + AVM_DATA + HDLC_STATUS + 0); __fcpnp_write_ctrl() 413 outsl(adapter->io + AVM_DATA + HDLC_FIFO, hdlc_fill_fifo() 419 outsl(adapter->io + hdlc_fill_fifo() 427 outsb(adapter->io + AVM_DATA, p, count); hdlc_fill_fifo() 449 outl(idx, adapter->io + AVM_INDEX); hdlc_empty_fifo() 450 insl(adapter->io + AVM_DATA + HDLC_FIFO, hdlc_empty_fifo() 455 insl(adapter->io + hdlc_empty_fifo() 461 outb(idx, adapter->io + AVM_INDEX); hdlc_empty_fifo() 462 insb(adapter->io + AVM_DATA, p, count); hdlc_empty_fifo() 661 val = inb(adapter->io + AVM_STATUS0); fcpci2_irq() 681 sval = inb(adapter->io + 2); fcpci_irq() 698 outb(AVM_STATUS0_RES_TIMER, adapter->io + AVM_STATUS0); fcpci2_init() 699 outb(AVM_STATUS0_ENA_IRQ, adapter->io + AVM_STATUS0); fcpci2_init() 706 AVM_STATUS0_ENA_IRQ, adapter->io + AVM_STATUS0); fcpci_init() 709 adapter->io + AVM_STATUS1); fcpci_init() 725 if (!request_region(adapter->io, 32, "fcpcipnp")) fcpcipnp_setup() 731 val = inl(adapter->io); fcpcipnp_setup() 734 val = inb(adapter->io); fcpcipnp_setup() 735 val |= inb(adapter->io + 1) << 8; fcpcipnp_setup() 775 outb(0, adapter->io + AVM_STATUS0); fcpcipnp_setup() 777 outb(AVM_STATUS0_RESET, adapter->io + AVM_STATUS0); fcpcipnp_setup() 779 outb(0, adapter->io + AVM_STATUS0); fcpcipnp_setup() 823 release_region(adapter->io, 32); fcpcipnp_setup() 832 outb(0, adapter->io + AVM_STATUS0); fcpcipnp_release() 834 release_region(adapter->io, 32); fcpcipnp_release() 899 adapter->io = pci_resource_start(pdev, 1); fcpci_probe() 942 adapter->io = pnp_port_start(pdev, 0); fcpnp_probe() 946 (char *) dev_id->driver_data, adapter->io, adapter->irq); fcpnp_probe()
|
H A D | hisax_fcpcipnp.h | 49 unsigned int io; member in struct:fritz_adapter
|
/linux-4.1.27/drivers/rtc/ |
H A D | rtc-stmp3xxx.c | 22 #include <linux/io.h> 71 void __iomem *io; member in struct:stmp3xxx_rtc_data 95 writel(timeout, rtc_data->io + STMP3XXX_RTC_WATCHDOG); stmp3xxx_wdt_set_timeout() 97 rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_SET); stmp3xxx_wdt_set_timeout() 99 rtc_data->io + STMP3XXX_RTC_PERSISTENT1 + STMP_OFFSET_REG_SET); stmp3xxx_wdt_set_timeout() 102 rtc_data->io + STMP3XXX_RTC_CTRL + STMP_OFFSET_REG_CLR); stmp3xxx_wdt_set_timeout() 104 rtc_data->io + STMP3XXX_RTC_PERSISTENT1 + STMP_OFFSET_REG_CLR); stmp3xxx_wdt_set_timeout() 143 if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) & stmp3xxx_wait_time() 148 return (readl(rtc_data->io + STMP3XXX_RTC_STAT) & stmp3xxx_wait_time() 162 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); stmp3xxx_rtc_gettime() 170 writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); stmp3xxx_rtc_set_mmss() 178 u32 status = readl(rtc_data->io + STMP3XXX_RTC_CTRL); stmp3xxx_rtc_interrupt() 182 rtc_data->io + STMP3XXX_RTC_CTRL_CLR); stmp3xxx_rtc_interrupt() 197 rtc_data->io + STMP3XXX_RTC_PERSISTENT0_SET); stmp3xxx_alarm_irq_enable() 199 rtc_data->io + STMP3XXX_RTC_CTRL_SET); stmp3xxx_alarm_irq_enable() 203 rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR); stmp3xxx_alarm_irq_enable() 205 rtc_data->io + STMP3XXX_RTC_CTRL_CLR); stmp3xxx_alarm_irq_enable() 214 rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_ALARM), &alm->time); stmp3xxx_rtc_read_alarm() 224 writel(t, rtc_data->io + STMP3XXX_RTC_ALARM); stmp3xxx_rtc_set_alarm() 248 rtc_data->io + STMP3XXX_RTC_CTRL_CLR); stmp3xxx_rtc_remove() 272 rtc_data->io = devm_ioremap(&pdev->dev, r->start, resource_size(r)); stmp3xxx_rtc_probe() 273 if (!rtc_data->io) { stmp3xxx_rtc_probe() 280 rtc_stat = readl(rtc_data->io + STMP3XXX_RTC_STAT); stmp3xxx_rtc_probe() 288 err = stmp_reset_block(rtc_data->io); stmp3xxx_rtc_probe() 337 writel(pers0_set, rtc_data->io + STMP3XXX_RTC_PERSISTENT0_SET); stmp3xxx_rtc_probe() 342 rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR); stmp3xxx_rtc_probe() 346 rtc_data->io + STMP3XXX_RTC_CTRL_CLR); stmp3xxx_rtc_probe() 375 stmp_reset_block(rtc_data->io); stmp3xxx_rtc_resume() 379 rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR); stmp3xxx_rtc_resume()
|
/linux-4.1.27/drivers/staging/lustre/lustre/lov/ |
H A D | lov_io.c | 83 static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio, lov_io_sub_inherit() argument 89 switch (io->ci_type) { lov_io_sub_inherit() 91 io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr; lov_io_sub_inherit() 92 io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid; lov_io_sub_inherit() 93 io->u.ci_setattr.sa_capa = parent->u.ci_setattr.sa_capa; lov_io_sub_inherit() 94 if (cl_io_is_trunc(io)) { lov_io_sub_inherit() 98 io->u.ci_setattr.sa_attr.lvb_size = new_size; lov_io_sub_inherit() 106 io->u.ci_fault = parent->u.ci_fault; lov_io_sub_inherit() 108 io->u.ci_fault.ft_index = cl_index(obj, off); lov_io_sub_inherit() 112 io->u.ci_fsync.fi_start = start; lov_io_sub_inherit() 113 io->u.ci_fsync.fi_end = end; lov_io_sub_inherit() 114 io->u.ci_fsync.fi_capa = parent->u.ci_fsync.fi_capa; lov_io_sub_inherit() 115 io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid; lov_io_sub_inherit() 116 io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode; lov_io_sub_inherit() 121 io->u.ci_wr.wr_sync = cl_io_is_sync_write(parent); lov_io_sub_inherit() 123 io->u.ci_wr.wr_append = 1; lov_io_sub_inherit() 125 io->u.ci_rw.crw_pos = start; lov_io_sub_inherit() 126 io->u.ci_rw.crw_count = end - start; lov_io_sub_inherit() 142 struct cl_io *io = lio->lis_cl.cis_io; lov_io_sub_init() local 175 * First sub-io. Use ->lis_single_subio to lov_io_sub_init() 196 sub_io->ci_parent = io; lov_io_sub_init() 197 sub_io->ci_lockreq = io->ci_lockreq; lov_io_sub_init() 198 sub_io->ci_type = io->ci_type; lov_io_sub_init() 199 sub_io->ci_no_srvlock = io->ci_no_srvlock; lov_io_sub_init() 200 sub_io->ci_noatime = io->ci_noatime; lov_io_sub_init() 204 io->ci_type, sub_obj); lov_io_sub_init() 244 * Lov io operations. 277 struct cl_io *io) lov_io_subio_init() 301 struct lov_object *obj, struct cl_io *io) lov_io_slice_init() 303 io->ci_result = 0; lov_io_slice_init() 309 switch (io->ci_type) { lov_io_slice_init() 312 lio->lis_pos = io->u.ci_rw.crw_pos; lov_io_slice_init() 313 lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count; lov_io_slice_init() 315 if (cl_io_is_append(io)) { lov_io_slice_init() 316 LASSERT(io->ci_type == CIT_WRITE); lov_io_slice_init() 323 if (cl_io_is_trunc(io)) lov_io_slice_init() 324 lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size; lov_io_slice_init() 331 pgoff_t index = io->u.ci_fault.ft_index; lov_io_slice_init() 332 lio->lis_pos = cl_offset(io->ci_obj, index); lov_io_slice_init() 333 lio->lis_endpos = cl_offset(io->ci_obj, index + 1); lov_io_slice_init() 338 lio->lis_pos = io->u.ci_fsync.fi_start; lov_io_slice_init() 339 lio->lis_endpos = io->u.ci_fsync.fi_end; lov_io_slice_init() 430 struct cl_io *io = ios->cis_io; lov_io_rw_iter_init() local 432 __u64 start = io->u.ci_rw.crw_pos; lov_io_rw_iter_init() 436 LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); lov_io_rw_iter_init() 439 if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) { lov_io_rw_iter_init() 446 io->ci_continue = next < lio->lis_io_endpos; lov_io_rw_iter_init() 447 io->u.ci_rw.crw_count = min_t(loff_t, lio->lis_io_endpos, lov_io_rw_iter_init() 448 next) - io->u.ci_rw.crw_pos; lov_io_rw_iter_init() 449 lio->lis_pos = io->u.ci_rw.crw_pos; lov_io_rw_iter_init() 450 lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count; lov_io_rw_iter_init() 492 static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io) lov_io_end_wrapper() argument 496 * sub-io, either because previous sub-io failed, or upper layer lov_io_end_wrapper() 499 if (io->ci_state == CIS_IO_GOING) lov_io_end_wrapper() 500 cl_io_end(env, io); lov_io_end_wrapper() 502 io->ci_state = CIS_IO_FINISHED; lov_io_end_wrapper() 506 static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io) lov_io_iter_fini_wrapper() argument 508 cl_io_iter_fini(env, io); lov_io_iter_fini_wrapper() 512 static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io) lov_io_unlock_wrapper() argument 514 cl_io_unlock(env, io); lov_io_unlock_wrapper() 819 * Empty lov io operations. 841 * An io operation vector for files without stripes. 894 struct cl_io *io) lov_io_init_raid0() 900 lov_io_slice_init(lio, lov, io); lov_io_init_raid0() 901 if (io->ci_result == 0) { lov_io_init_raid0() 902 io->ci_result = lov_io_subio_init(env, lio, io); lov_io_init_raid0() 903 if (io->ci_result == 0) { lov_io_init_raid0() 904 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops); lov_io_init_raid0() 908 return io->ci_result; lov_io_init_raid0() 912 struct cl_io *io) lov_io_init_empty() 919 switch (io->ci_type) { lov_io_init_empty() 940 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops); lov_io_init_empty() 944 io->ci_result = result < 0 ? result : 0; lov_io_init_empty() 949 struct cl_io *io) lov_io_init_released() 958 switch (io->ci_type) { lov_io_init_released() 960 LASSERTF(0, "invalid type %d\n", io->ci_type); lov_io_init_released() 971 if (cl_io_is_trunc(io)) lov_io_init_released() 972 io->ci_restore_needed = 1; lov_io_init_released() 978 io->ci_restore_needed = 1; lov_io_init_released() 983 cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops); lov_io_init_released() 987 io->ci_result = result < 0 ? result : 0; lov_io_init_released() 276 lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, struct cl_io *io) lov_io_subio_init() argument 300 lov_io_slice_init(struct lov_io *lio, struct lov_object *obj, struct cl_io *io) lov_io_slice_init() argument 893 lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) lov_io_init_raid0() argument 911 lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) lov_io_init_empty() argument 948 lov_io_init_released(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) lov_io_init_released() argument
|
/linux-4.1.27/drivers/char/ipmi/ |
H A D | ipmi_si_intf.c | 62 #include <asm/io.h> 178 struct si_sm_io io; member in struct:smi_info 593 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); check_bt_irq() 601 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, check_bt_irq() 604 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); check_bt_irq() 1207 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, si_bt_irq_handler() 1424 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); std_irq_cleanup() 1443 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, std_irq_setup() 1464 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) port_inb() argument 1466 unsigned int addr = io->addr_data; port_inb() 1468 return inb(addr + (offset * io->regspacing)); port_inb() 1471 static void port_outb(struct si_sm_io *io, unsigned int offset, port_outb() argument 1474 unsigned int addr = io->addr_data; port_outb() 1476 outb(b, addr + (offset * io->regspacing)); port_outb() 1479 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) port_inw() argument 1481 unsigned int addr = io->addr_data; port_inw() 1483 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; port_inw() 1486 static void port_outw(struct si_sm_io *io, unsigned int offset, port_outw() argument 1489 unsigned int addr = io->addr_data; port_outw() 1491 outw(b << io->regshift, addr + (offset * io->regspacing)); port_outw() 1494 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) port_inl() argument 1496 unsigned int addr = io->addr_data; port_inl() 1498 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; port_inl() 1501 static void port_outl(struct si_sm_io *io, unsigned int offset, port_outl() argument 1504 unsigned int addr = io->addr_data; port_outl() 1506 outl(b << io->regshift, addr+(offset * io->regspacing)); port_outl() 1511 unsigned int addr = info->io.addr_data; port_cleanup() 1516 release_region(addr + idx * info->io.regspacing, port_cleanup() 1517 info->io.regsize); port_cleanup() 1523 unsigned int addr = info->io.addr_data; port_setup() 1535 switch (info->io.regsize) { port_setup() 1537 info->io.inputb = port_inb; port_setup() 1538 info->io.outputb = port_outb; port_setup() 1541 info->io.inputb = port_inw; port_setup() 1542 info->io.outputb = port_outw; port_setup() 1545 info->io.inputb = port_inl; port_setup() 1546 info->io.outputb = port_outl; port_setup() 1550 info->io.regsize); port_setup() 1561 if (request_region(addr + idx * info->io.regspacing, port_setup() 1562 info->io.regsize, DEVICE_NAME) == NULL) { port_setup() 1565 release_region(addr + idx * info->io.regspacing, port_setup() 1566 info->io.regsize); port_setup() 1574 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) intf_mem_inb() argument 1576 return readb((io->addr)+(offset * io->regspacing)); intf_mem_inb() 1579 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, intf_mem_outb() argument 1582 writeb(b, (io->addr)+(offset * io->regspacing)); intf_mem_outb() 1585 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) intf_mem_inw() argument 1587 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) intf_mem_inw() 1591 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, intf_mem_outw() argument 1594 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); intf_mem_outw() 1597 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) intf_mem_inl() argument 1599 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) intf_mem_inl() 1603 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, intf_mem_outl() argument 1606 writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); intf_mem_outl() 1610 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset) mem_inq() argument 1612 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) mem_inq() 1616 static void mem_outq(struct si_sm_io *io, unsigned int offset, mem_outq() argument 1619 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); mem_outq() 1625 unsigned long addr = info->io.addr_data; mem_cleanup() 1628 if (info->io.addr) { mem_cleanup() 1629 iounmap(info->io.addr); mem_cleanup() 1631 mapsize = ((info->io_size * info->io.regspacing) mem_cleanup() 1632 - (info->io.regspacing - info->io.regsize)); mem_cleanup() 1640 unsigned long addr = info->io.addr_data; mem_setup() 1652 switch (info->io.regsize) { mem_setup() 1654 info->io.inputb = intf_mem_inb; mem_setup() 1655 info->io.outputb = intf_mem_outb; mem_setup() 1658 info->io.inputb = intf_mem_inw; mem_setup() 1659 info->io.outputb = intf_mem_outw; mem_setup() 1662 info->io.inputb = intf_mem_inl; mem_setup() 1663 info->io.outputb = intf_mem_outl; mem_setup() 1667 info->io.inputb = mem_inq; mem_setup() 1668 info->io.outputb = mem_outq; mem_setup() 1673 info->io.regsize); mem_setup() 1684 mapsize = ((info->io_size * info->io.regspacing) mem_setup() 1685 - (info->io.regspacing - info->io.regsize)); mem_setup() 1690 info->io.addr = ioremap(addr, mapsize); mem_setup() 1691 if (info->io.addr == NULL) { mem_setup() 1909 info->io.addr_data = addr; hotmod_handler() 1910 info->io.addr_type = addr_space; hotmod_handler() 1916 info->io.addr = NULL; hotmod_handler() 1917 info->io.regspacing = regspacing; hotmod_handler() 1918 if (!info->io.regspacing) hotmod_handler() 1919 info->io.regspacing = DEFAULT_REGSPACING; hotmod_handler() 1920 info->io.regsize = regsize; hotmod_handler() 1921 if (!info->io.regsize) hotmod_handler() 1922 info->io.regsize = DEFAULT_REGSPACING; hotmod_handler() 1923 info->io.regshift = regshift; hotmod_handler() 1945 if (e->io.addr_type != addr_space) hotmod_handler() 1949 if (e->io.addr_data == addr) hotmod_handler() 1995 info->io.addr_data = ports[i]; hardcode_find_bmc() 1996 info->io.addr_type = IPMI_IO_ADDR_SPACE; hardcode_find_bmc() 2000 info->io.addr_data = addrs[i]; hardcode_find_bmc() 2001 info->io.addr_type = IPMI_MEM_ADDR_SPACE; hardcode_find_bmc() 2010 info->io.addr = NULL; hardcode_find_bmc() 2011 info->io.regspacing = regspacings[i]; hardcode_find_bmc() 2012 if (!info->io.regspacing) hardcode_find_bmc() 2013 info->io.regspacing = DEFAULT_REGSPACING; hardcode_find_bmc() 2014 info->io.regsize = regsizes[i]; hardcode_find_bmc() 2015 if (!info->io.regsize) hardcode_find_bmc() 2016 info->io.regsize = DEFAULT_REGSPACING; hardcode_find_bmc() 2017 info->io.regshift = regshifts[i]; hardcode_find_bmc() 2198 info->io.regspacing = spmi->addr.bit_width / 8; try_init_spmi() 2200 info->io.regspacing = DEFAULT_REGSPACING; try_init_spmi() 2202 info->io.regsize = info->io.regspacing; try_init_spmi() 2203 info->io.regshift = spmi->addr.bit_offset; try_init_spmi() 2207 info->io.addr_type = IPMI_MEM_ADDR_SPACE; try_init_spmi() 2210 info->io.addr_type = IPMI_IO_ADDR_SPACE; try_init_spmi() 2216 info->io.addr_data = spmi->addr.address; try_init_spmi() 2219 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", try_init_spmi() 2220 info->io.addr_data, info->io.regsize, info->io.regspacing, try_init_spmi() 2305 info->io.addr_type = IPMI_IO_ADDR_SPACE; ipmi_pnp_probe() 2310 info->io.addr_type = IPMI_MEM_ADDR_SPACE; ipmi_pnp_probe() 2317 info->io.addr_data = res->start; ipmi_pnp_probe() 2319 info->io.regspacing = DEFAULT_REGSPACING; ipmi_pnp_probe() 2321 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? ipmi_pnp_probe() 2325 if (res_second->start > info->io.addr_data) ipmi_pnp_probe() 2326 info->io.regspacing = res_second->start - info->io.addr_data; ipmi_pnp_probe() 2328 info->io.regsize = DEFAULT_REGSPACING; ipmi_pnp_probe() 2329 info->io.regshift = 0; ipmi_pnp_probe() 2345 res, info->io.regsize, info->io.regspacing, ipmi_pnp_probe() 2484 info->io.addr_type = IPMI_MEM_ADDR_SPACE; try_init_dmi() 2489 info->io.addr_type = IPMI_IO_ADDR_SPACE; try_init_dmi() 2498 info->io.addr_data = ipmi_data->base_addr; try_init_dmi() 2500 info->io.regspacing = ipmi_data->offset; try_init_dmi() 2501 if (!info->io.regspacing) try_init_dmi() 2502 info->io.regspacing = DEFAULT_REGSPACING; try_init_dmi() 2503 info->io.regsize = DEFAULT_REGSPACING; try_init_dmi() 2504 info->io.regshift = 0; try_init_dmi() 2513 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", try_init_dmi() 2514 info->io.addr_data, info->io.regsize, info->io.regspacing, try_init_dmi() 2563 info->io.regsize = DEFAULT_REGSIZE; ipmi_pci_probe_regspacing() 2564 info->io.regshift = 0; ipmi_pci_probe_regspacing() 2570 info->io.regspacing = regspacing; ipmi_pci_probe_regspacing() 2577 info->io.outputb(&info->io, 1, 0x10); ipmi_pci_probe_regspacing() 2579 status = info->io.inputb(&info->io, 1); ipmi_pci_probe_regspacing() 2634 info->io.addr_type = IPMI_IO_ADDR_SPACE; ipmi_pci_probe() 2637 info->io.addr_type = IPMI_MEM_ADDR_SPACE; ipmi_pci_probe() 2639 info->io.addr_data = pci_resource_start(pdev, 0); ipmi_pci_probe() 2641 info->io.regspacing = ipmi_pci_probe_regspacing(info); ipmi_pci_probe() 2642 info->io.regsize = DEFAULT_REGSIZE; ipmi_pci_probe() 2643 info->io.regshift = 0; ipmi_pci_probe() 2653 &pdev->resource[0], info->io.regsize, info->io.regspacing, ipmi_pci_probe() 2746 info->io.addr_type = IPMI_IO_ADDR_SPACE; ipmi_probe() 2749 info->io.addr_type = IPMI_MEM_ADDR_SPACE; ipmi_probe() 2752 info->io.addr_data = resource.start; ipmi_probe() 2754 info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE; ipmi_probe() 2755 info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING; ipmi_probe() 2756 info->io.regshift = regshift ? be32_to_cpup(regshift) : 0; ipmi_probe() 2762 info->io.addr_data, info->io.regsize, info->io.regspacing, ipmi_probe() 2821 info->io.addr_type = IPMI_MEM_ADDR_SPACE; ipmi_parisc_probe() 2822 info->io.addr_data = dev->hpa.start; ipmi_parisc_probe() 2823 info->io.regsize = 1; ipmi_parisc_probe() 2824 info->io.regspacing = 1; ipmi_parisc_probe() 2825 info->io.regshift = 0; ipmi_parisc_probe() 2830 dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data); ipmi_parisc_probe() 3165 addr_space_to_str[smi->io.addr_type], smi_params_proc_show() 3166 smi->io.addr_data, smi_params_proc_show() 3167 smi->io.regspacing, smi_params_proc_show() 3168 smi->io.regsize, smi_params_proc_show() 3169 smi->io.regshift, smi_params_proc_show() 3371 info->io.addr_data = ipmi_defaults[i].port; default_find_bmc() 3372 info->io.addr_type = IPMI_IO_ADDR_SPACE; default_find_bmc() 3374 info->io.addr = NULL; default_find_bmc() 3375 info->io.regspacing = DEFAULT_REGSPACING; default_find_bmc() 3376 info->io.regsize = DEFAULT_REGSPACING; default_find_bmc() 3377 info->io.regshift = 0; default_find_bmc() 3385 addr_space_to_str[info->io.addr_type], default_find_bmc() 3386 info->io.addr_data); default_find_bmc() 3400 if (e->io.addr_type != info->io.addr_type) is_new_interface() 3402 if (e->io.addr_data == info->io.addr_data) is_new_interface() 3447 addr_space_to_str[new_smi->io.addr_type], try_smi_init() 3448 new_smi->io.addr_data, try_smi_init() 3479 &new_smi->io); try_smi_init()
|
H A D | ipmi_smic_sm.c | 110 struct si_sm_io *io; member in struct:si_sm_data 123 struct si_sm_io *io) init_smic_data() 126 smic->io = io; init_smic_data() 199 return smic->io->inputb(smic->io, 2); read_smic_flags() 204 return smic->io->inputb(smic->io, 1); read_smic_status() 209 return smic->io->inputb(smic->io, 0); read_smic_data() 215 smic->io->outputb(smic->io, 2, flags); write_smic_flags() 221 smic->io->outputb(smic->io, 1, control); write_smic_control() 227 smic->io->outputb(smic->io, 0, data); write_si_sm_data() 343 init_smic_data(smic, smic->io); smic_event() 555 init_smic_data(smic, smic->io); smic_event() 122 init_smic_data(struct si_sm_data *smic, struct si_sm_io *io) init_smic_data() argument
|
/linux-4.1.27/arch/cris/include/arch-v32/arch/hwregs/ |
H A D | Makefile | 47 REGDESC += $(BASEDIR)/io/ata/rtl/ata_regs.r 48 REGDESC += $(BASEDIR)/io/bif/rtl/bif_core_regs.r 49 REGDESC += $(BASEDIR)/io/bif/rtl/bif_slave_regs.r 50 #REGDESC += $(BASEDIR)/io/bif/sw/bif_slave_ext_regs.r 53 REGDESC += $(BASEDIR)/io/eth/rtl/eth_regs.r 54 REGDESC += $(BASEDIR)/io/bif/mod/extmem/extmem_regs.r 59 #REGDESC += $(BASEDIR)/io/par_port/rtl/par_regs.r 60 REGDESC += $(BASEDIR)/io/pinmux/rtl/guinness/pinmux_regs.r 61 REGDESC += $(BASEDIR)/io/ser/rtl/ser_regs.r 63 REGDESC += $(BASEDIR)/io/strmux/rtl/guinness/strmux_regs.r 64 REGDESC += $(BASEDIR)/io/timer/rtl/timer_regs.r 65 #REGDESC += $(BASEDIR)/io/usb/usb1_1/rtl/usb_regs.r 90 ata_defs.h: $(BASEDIR)/io/ata/rtl/ata_regs.r 106 eth_defs.h: $(BASEDIR)/io/eth/rtl/eth_regs.r 108 extmem_defs.h: $(BASEDIR)/io/bif/mod/extmem/extmem_regs.r 123 par_defs.h: $(BASEDIR)/io/par_port/rtl/par_regs.r 135 ser_defs.h: $(BASEDIR)/io/ser/rtl/ser_regs.r 141 strmux_defs.h: $(BASEDIR)/io/strmux/rtl/guinness/strmux_regs.r 143 timer_defs.h: $(BASEDIR)/io/timer/rtl/timer_regs.r 145 usb_defs.h: $(BASEDIR)/io/usb/usb1_1/rtl/usb_regs.r
|
/linux-4.1.27/arch/mips/include/asm/mach-au1x00/ |
H A D | au1000_dma.h | 33 #include <linux/io.h> /* need byte IO */ 109 void __iomem *io; member in struct:dma_chan 160 __raw_writel(DMA_BE0, chan->io + DMA_MODE_SET); enable_dma_buffer0() 169 __raw_writel(DMA_BE1, chan->io + DMA_MODE_SET); enable_dma_buffer1() 177 __raw_writel(DMA_BE0 | DMA_BE1, chan->io + DMA_MODE_SET); enable_dma_buffers() 186 __raw_writel(DMA_GO, chan->io + DMA_MODE_SET); start_dma() 198 __raw_writel(DMA_GO, chan->io + DMA_MODE_CLEAR); halt_dma() 202 if (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT) halt_dma() 218 __raw_writel(~DMA_GO, chan->io + DMA_MODE_CLEAR); disable_dma() 227 return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_HALT) ? 1 : 0; dma_halted() 242 __raw_writel(CPHYSADDR(chan->fifo_addr), chan->io + DMA_PERIPHERAL_ADDR); init_dma() 248 __raw_writel(~mode, chan->io + DMA_MODE_CLEAR); init_dma() 249 __raw_writel(mode, chan->io + DMA_MODE_SET); init_dma() 286 return (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? 1 : 0; get_dma_active_buffer() 307 __raw_writel(CPHYSADDR(a), chan->io + DMA_PERIPHERAL_ADDR); set_dma_fifo_addr() 319 __raw_writel(DMA_D0, chan->io + DMA_MODE_CLEAR); clear_dma_done0() 328 __raw_writel(DMA_D1, chan->io + DMA_MODE_CLEAR); clear_dma_done1() 347 __raw_writel(a, chan->io + DMA_BUFFER0_START); set_dma_addr0() 359 __raw_writel(a, chan->io + DMA_BUFFER1_START); set_dma_addr1() 373 __raw_writel(count, chan->io + DMA_BUFFER0_COUNT); set_dma_count0() 386 __raw_writel(count, chan->io + DMA_BUFFER1_COUNT); set_dma_count1() 399 __raw_writel(count, chan->io + DMA_BUFFER0_COUNT); set_dma_count() 400 __raw_writel(count, chan->io + DMA_BUFFER1_COUNT); set_dma_count() 413 return __raw_readl(chan->io + DMA_MODE_READ) & (DMA_D0 | DMA_D1); get_dma_buffer_done() 440 curBufCntReg = (__raw_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? get_dma_residue() 443 count = __raw_readl(chan->io + curBufCntReg) & DMA_COUNT_MASK; get_dma_residue()
|
/linux-4.1.27/drivers/isdn/hardware/eicon/ |
H A D | io.c | 35 #include "io.h" 590 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_in() 592 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_in() 598 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_inw() 600 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_inw() 605 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_in_dw() 610 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_in_dw() 614 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_in_buffer() 616 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_in_buffer() 620 PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io; mem_look_ahead() 628 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_out() 630 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_out() 634 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_outw() 636 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_outw() 640 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_out_dw() 646 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_out_dw() 650 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_out_buffer() 652 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_out_buffer() 656 volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io); mem_inc() 659 DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base); mem_inc() 662 /* ram access functions for io-mapped cards */ 667 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); io_in() 670 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_in() 676 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); io_inw() 679 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_inw() 684 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); io_in_buffer() 693 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_in_buffer() 699 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_in_buffer() 703 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); io_look_ahead() 705 ((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port); io_look_ahead() 706 inppw_buffer(Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1); io_look_ahead() 707 e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer); io_look_ahead() 708 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_look_ahead() 712 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); io_out() 715 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_out() 719 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); io_outw() 722 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_outw() 726 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); io_out_buffer() 735 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_out_buffer() 741 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_out_buffer() 746 byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io); io_inc() 751 DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port); io_inc() 760 IoAdapter = (PISDN_ADAPTER) a->io; free_entity() 770 IoAdapter = (PISDN_ADAPTER) a->io; assign_queue() 782 IoAdapter = (PISDN_ADAPTER) a->io; get_assign() 798 IoAdapter = (PISDN_ADAPTER) a->io; req_queue() 814 IoAdapter = (PISDN_ADAPTER) a->io; look_req() 821 IoAdapter = (PISDN_ADAPTER) a->io; next_req() 833 IoAdapter = (PISDN_ADAPTER)a->io; entity_ptr()
|
H A D | Makefile | 12 divas-y := divasmain.o divasfunc.o di.o io.o istream.o \
|
/linux-4.1.27/drivers/isdn/sc/ |
H A D | init.c | 30 static unsigned int io[] = {0, 0, 0, 0}; variable 35 module_param_array(io, int, NULL, 0); 78 pr_debug("I/O Base for board %d is 0x%x, %s probe\n", b, io[b], sc_init() 79 io[b] == 0 ? "will" : "won't"); sc_init() 80 if (io[b]) { sc_init() 85 if (!request_region(io[b] + i * 0x400, 1, "sc test")) { sc_init() 86 pr_debug("request_region for 0x%x failed\n", io[b] + i * 0x400); sc_init() 87 io[b] = 0; sc_init() 90 release_region(io[b] + i * 0x400, 1); sc_init() 96 if (io[b] == 0) { sc_init() 101 outb(0x18, io[b] + 0x400 * EXP_PAGE0); sc_init() 102 if (inb(io[b] + 0x400 * EXP_PAGE0) != 0x18) { sc_init() 104 io[b] + 0x400 * EXP_PAGE0); sc_init() 134 io[b] = i; sc_init() 135 outb(0x18, io[b] + 0x400 * EXP_PAGE0); sc_init() 136 if (inb(io[b] + 0x400 * EXP_PAGE0) != 0x18) { sc_init() 154 outb(0xFF, io[b] + RESET_OFFSET); sc_init() 168 model = identify_board(ram[b], io[b]); sc_init() 180 model = identify_board(i, io[b]); sc_init() 346 sc_adapter[cinst]->iobase = io[b]; sc_init() 348 sc_adapter[cinst]->ioport[i] = io[b] + i * 0x400; sc_init() 354 sc_adapter[cinst]->ioport[IRQ_SELECT] = io[b] + 0x2; sc_init() 366 boardname[model], channels, irq[b], io[b], ram[b]); sc_init() 446 pr_debug("Attempting to identify adapter @ 0x%lx io 0x%x\n", identify_board()
|
/linux-4.1.27/drivers/scsi/ |
H A D | dmx3191d.c | 29 #include <asm/io.h> 76 unsigned long io; dmx3191d_probe_one() local 82 io = pci_resource_start(pdev, 0); dmx3191d_probe_one() 83 if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) { dmx3191d_probe_one() 85 io, io + DMX3191D_REGION_LEN); dmx3191d_probe_one() 93 shost->io_port = io; dmx3191d_probe_one() 112 release_region(io, DMX3191D_REGION_LEN); dmx3191d_probe_one()
|
/linux-4.1.27/crypto/ |
H A D | camellia_generic.c | 864 static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max) camellia_do_encrypt() argument 869 io[0] ^= SUBKEY_L(0); camellia_do_encrypt() 870 io[1] ^= SUBKEY_R(0); camellia_do_encrypt() 874 CAMELLIA_ROUNDSM(io[0], io[1], \ camellia_do_encrypt() 876 io[2], io[3], il, ir); \ camellia_do_encrypt() 877 CAMELLIA_ROUNDSM(io[2], io[3], \ camellia_do_encrypt() 879 io[0], io[1], il, ir); \ camellia_do_encrypt() 880 CAMELLIA_ROUNDSM(io[0], io[1], \ camellia_do_encrypt() 882 io[2], io[3], il, ir); \ camellia_do_encrypt() 883 CAMELLIA_ROUNDSM(io[2], io[3], \ camellia_do_encrypt() 885 io[0], io[1], il, ir); \ camellia_do_encrypt() 886 CAMELLIA_ROUNDSM(io[0], io[1], \ camellia_do_encrypt() 888 io[2], io[3], il, ir); \ camellia_do_encrypt() 889 CAMELLIA_ROUNDSM(io[2], io[3], \ camellia_do_encrypt() 891 io[0], io[1], il, ir); \ camellia_do_encrypt() 894 CAMELLIA_FLS(io[0], io[1], io[2], io[3], \ camellia_do_encrypt() 914 io[2] ^= SUBKEY_L(max); camellia_do_encrypt() 915 io[3] ^= SUBKEY_R(max); camellia_do_encrypt() 916 /* NB: io[0],[1] should be swapped with [2],[3] by caller! */ camellia_do_encrypt() 919 static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i) camellia_do_decrypt() argument 924 io[0] ^= SUBKEY_L(i); camellia_do_decrypt() 925 io[1] ^= SUBKEY_R(i); camellia_do_decrypt() 929 CAMELLIA_ROUNDSM(io[0], io[1], \ camellia_do_decrypt() 931 io[2], io[3], il, ir); \ camellia_do_decrypt() 932 CAMELLIA_ROUNDSM(io[2], io[3], \ camellia_do_decrypt() 934 io[0], io[1], il, ir); \ camellia_do_decrypt() 935 CAMELLIA_ROUNDSM(io[0], io[1], \ camellia_do_decrypt() 937 io[2], io[3], il, ir); \ camellia_do_decrypt() 938 CAMELLIA_ROUNDSM(io[2], io[3], \ camellia_do_decrypt() 940 io[0], io[1], il, ir); \ camellia_do_decrypt() 941 CAMELLIA_ROUNDSM(io[0], io[1], \ camellia_do_decrypt() 943 io[2], io[3], il, ir); \ camellia_do_decrypt() 944 CAMELLIA_ROUNDSM(io[2], io[3], \ camellia_do_decrypt() 946 io[0], io[1], il, ir); \ camellia_do_decrypt() 949 CAMELLIA_FLS(io[0], io[1], io[2], io[3], \ camellia_do_decrypt() 969 io[2] ^= SUBKEY_L(0); camellia_do_decrypt() 970 io[3] ^= SUBKEY_R(0); camellia_do_decrypt()
|
/linux-4.1.27/drivers/scsi/arm/ |
H A D | Makefile | 5 acornscsi_mod-objs := acornscsi.o acornscsi-io.o
|
/linux-4.1.27/drivers/staging/sm750fb/ |
H A D | ddk750_help.c | 9 /* after driver mapped io registers, use this function first */ ddk750_set_mmio()
|
H A D | ddk750_help.h | 7 #include <asm/io.h>
|
/linux-4.1.27/arch/sparc/lib/ |
H A D | iomap.c | 6 #include <asm/io.h>
|
/linux-4.1.27/arch/sparc/mm/ |
H A D | Makefile | 10 obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | dmi.h | 7 #include <asm/io.h>
|
/linux-4.1.27/arch/metag/kernel/ |
H A D | da.c | 9 #include <linux/io.h>
|
/linux-4.1.27/arch/microblaze/pci/ |
H A D | iomap.c | 10 #include <linux/io.h>
|
/linux-4.1.27/include/acpi/ |
H A D | acpi_io.h | 4 #include <linux/io.h>
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | dma.h | 4 #include <asm/io.h>
|
/linux-4.1.27/arch/sh/boards/mach-se/ |
H A D | board-se7619.c | 11 #include <asm/io.h>
|
/linux-4.1.27/arch/sh/include/mach-sh03/mach/ |
H A D | io.h | 2 * include/asm-sh/sh03/io.h
|
/linux-4.1.27/arch/sh/kernel/cpu/sh3/ |
H A D | serial-sh7710.c | 3 #include <linux/io.h>
|
/linux-4.1.27/arch/sh/kernel/cpu/sh4a/ |
H A D | serial-sh7722.c | 3 #include <linux/io.h>
|
/linux-4.1.27/arch/mips/include/asm/sn/ |
H A D | hub.h | 7 #include <asm/sn/io.h>
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | agp.h | 5 #include <asm/io.h>
|
H A D | ide.h | 10 #include <asm/io.h>
|
H A D | floppy.h | 34 #define fd_dma_setup(addr,size,mode,io) fd_ops->_dma_setup(addr,size,mode,io) 42 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); 117 static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) vdma_dma_setup() argument 120 virtual_dma_port = io; vdma_dma_setup() 128 static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) hard_dma_setup() argument 159 virtual_dma_port = io; hard_dma_setup()
|
/linux-4.1.27/arch/cris/arch-v32/kernel/ |
H A D | crisksyms.c | 6 #include <arch/io.h>
|
/linux-4.1.27/arch/cris/arch-v32/mm/ |
H A D | l2cache.c | 8 #include <asm/io.h>
|
/linux-4.1.27/arch/ia64/sn/kernel/sn2/ |
H A D | Makefile | 14 obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
|
/linux-4.1.27/arch/ia64/sn/pci/pcibr/ |
H A D | Makefile | 8 # Makefile for the sn2 io routines.
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | v7m.c | 8 #include <linux/io.h>
|
/linux-4.1.27/arch/alpha/include/asm/ |
H A D | agp.h | 4 #include <asm/io.h>
|
H A D | mc146818rtc.h | 7 #include <asm/io.h>
|
/linux-4.1.27/sound/pci/emu10k1/ |
H A D | Makefile | 7 irq.o memory.o voice.o emumpu401.o emupcm.o io.o \
|
/linux-4.1.27/include/linux/ |
H A D | clksrc-dbx500-prcmu.h | 12 #include <linux/io.h>
|
H A D | dm-io.h | 61 struct dm_io_memory mem; /* Memory to use for io */ 67 * For async io calls, users can alternatively use the dm_io() function below 78 * error occurred doing io to the corresponding region.
|
H A D | ioprio.h | 21 * These are the io priority groups as implemented by CFQ. RT is the realtime 50 * if process has set io priority explicitly, use that. if not, convert 51 * the cpu scheduler nice value to an io priority
|
/linux-4.1.27/drivers/xen/ |
H A D | biomerge.c | 2 #include <linux/io.h>
|
/linux-4.1.27/drivers/staging/comedi/drivers/ |
H A D | 8255.c | 58 int (*io)(struct comedi_device *, int, int, int, unsigned long); member in struct:subdev_8255_private 94 spriv->io(dev, 1, I8255_DATA_A_REG, subdev_8255_insn() 97 spriv->io(dev, 1, I8255_DATA_B_REG, subdev_8255_insn() 100 spriv->io(dev, 1, I8255_DATA_C_REG, subdev_8255_insn() 104 v = spriv->io(dev, 0, I8255_DATA_A_REG, 0, regbase); subdev_8255_insn() 105 v |= (spriv->io(dev, 0, I8255_DATA_B_REG, 0, regbase) << 8); subdev_8255_insn() 106 v |= (spriv->io(dev, 0, I8255_DATA_C_REG, 0, regbase) << 16); subdev_8255_insn() 131 spriv->io(dev, 1, I8255_CTRL_REG, config, regbase); subdev_8255_do_config() 163 int (*io)(struct comedi_device *, __subdev_8255_init() 174 if (io) __subdev_8255_init() 175 spriv->io = io; __subdev_8255_init() 177 spriv->io = subdev_8255_mmio; __subdev_8255_init() 179 spriv->io = subdev_8255_io; __subdev_8255_init() 199 * @io: (optional) register I/O call-back function 223 int (*io)(struct comedi_device *, subdev_8255_init() 227 return __subdev_8255_init(dev, s, io, regbase, false); subdev_8255_init() 235 * @io: (optional) register I/O call-back function 259 int (*io)(struct comedi_device *, subdev_8255_mm_init() 263 return __subdev_8255_init(dev, s, io, regbase, true); subdev_8255_mm_init()
|
/linux-4.1.27/drivers/input/touchscreen/ |
H A D | s3c2410_ts.c | 35 #include <linux/io.h> 67 * @io: Pointer to the IO base. 80 void __iomem *io; member in struct:s3c2410ts 111 data0 = readl(ts.io + S3C2410_ADCDAT0); touch_timer_fire() 112 data1 = readl(ts.io + S3C2410_ADCDAT1); touch_timer_fire() 144 writel(WAIT4INT | INT_DOWN, ts.io + S3C2410_ADCTSC); touch_timer_fire() 163 data0 = readl(ts.io + S3C2410_ADCDAT0); stylus_irq() 164 data1 = readl(ts.io + S3C2410_ADCDAT1); stylus_irq() 179 writel(0x0, ts.io + S3C64XX_ADCCLRINTPNDNUP); stylus_irq() 226 ts.io + S3C2410_ADCTSC); s3c24xx_ts_select() 229 writel(WAIT4INT | INT_UP, ts.io + S3C2410_ADCTSC); s3c24xx_ts_select() 283 ts.io = ioremap(res->start, resource_size(res)); s3c2410ts_probe() 284 if (ts.io == NULL) { s3c2410ts_probe() 304 writel(info->delay & 0xffff, ts.io + S3C2410_ADCDLY); s3c2410ts_probe() 306 writel(WAIT4INT | INT_DOWN, ts.io + S3C2410_ADCTSC); s3c2410ts_probe() 354 iounmap(ts.io); s3c2410ts_probe() 376 iounmap(ts.io); s3c2410ts_remove() 384 writel(TSC_SLEEP, ts.io + S3C2410_ADCTSC); s3c2410ts_suspend() 401 writel(info->delay & 0xffff, ts.io + S3C2410_ADCDLY); s3c2410ts_resume() 403 writel(WAIT4INT | INT_DOWN, ts.io + S3C2410_ADCTSC); s3c2410ts_resume()
|
/linux-4.1.27/sound/isa/msnd/ |
H A D | msnd_pinnacle.c | 59 #include <linux/io.h> 175 /* inb(chip->io + HP_RXL); */ snd_msnd_interrupt() 191 inb(chip->io + HP_RXL); snd_msnd_interrupt() 196 static int snd_msnd_reset_dsp(long io, unsigned char *info) snd_msnd_reset_dsp() argument 200 outb(HPDSPRESET_ON, io + HP_DSPR); snd_msnd_reset_dsp() 204 *info = inb(io + HP_INFO); snd_msnd_reset_dsp() 206 outb(HPDSPRESET_OFF, io + HP_DSPR); snd_msnd_reset_dsp() 209 if (inb(io + HP_CVR) == HP_CVR_DEF) snd_msnd_reset_dsp() 228 if (!request_region(chip->io, DSP_NUMIO, "probing")) { snd_msnd_probe() 233 if (snd_msnd_reset_dsp(chip->io, &info) < 0) { snd_msnd_probe() 234 release_region(chip->io, DSP_NUMIO); snd_msnd_probe() 244 chip->io, chip->io + DSP_NUMIO - 1, snd_msnd_probe() 305 chip->io, chip->io + DSP_NUMIO - 1, snd_msnd_probe() 310 release_region(chip->io, DSP_NUMIO); snd_msnd_probe() 321 outb(chip->memid, chip->io + HP_MEMM); snd_msnd_init_sma() 323 outb(HPBLKSEL_0, chip->io + HP_BLKS); snd_msnd_init_sma() 336 outb(HPBLKSEL_1, chip->io + HP_BLKS); snd_msnd_init_sma() 338 outb(HPBLKSEL_0, chip->io + HP_BLKS); snd_msnd_init_sma() 389 outb(HPBLKSEL_0, chip->io + HP_BLKS); upload_dsp_code() 421 outb(HPPRORESET_ON, chip->io + HP_PROR); reset_proteus() 423 outb(HPPRORESET_OFF, chip->io + HP_PROR); reset_proteus() 434 outb(HPWAITSTATE_0, chip->io + HP_WAIT); snd_msnd_initialize() 435 outb(HPBITMODE_16, chip->io + HP_BITM); snd_msnd_initialize() 445 err = snd_msnd_reset_dsp(chip->io, NULL); snd_msnd_initialize() 554 if (request_region(chip->io, DSP_NUMIO, card->shortname) == NULL) { snd_msnd_attach() 563 release_region(chip->io, DSP_NUMIO); snd_msnd_attach() 632 release_region(chip->io, DSP_NUMIO); snd_msnd_attach() 644 release_region(chip->io, DSP_NUMIO); snd_msnd_unload() 664 static int snd_msnd_write_cfg_io0(int cfg, int num, u16 io) snd_msnd_write_cfg_io0() argument 668 if (snd_msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io))) snd_msnd_write_cfg_io0() 670 if (snd_msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io))) snd_msnd_write_cfg_io0() 675 static int snd_msnd_write_cfg_io1(int cfg, int num, u16 io) snd_msnd_write_cfg_io1() argument 679 if (snd_msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io))) snd_msnd_write_cfg_io1() 681 if (snd_msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io))) snd_msnd_write_cfg_io1() 764 static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; variable 803 module_param_array(io, long, NULL, S_IRUGO); 804 MODULE_PARM_DESC(io, "IO port #"); 824 if (io[i] == SNDRV_AUTO_PORT) snd_msnd_isa_match() 828 printk(KERN_WARNING LOGNAME ": io, irq and mem must be set\n"); snd_msnd_isa_match() 833 if (!(io[i] == 0x290 || snd_msnd_isa_match() 834 io[i] == 0x260 || snd_msnd_isa_match() 835 io[i] == 0x250 || snd_msnd_isa_match() 836 io[i] == 0x240 || snd_msnd_isa_match() 837 io[i] == 0x230 || snd_msnd_isa_match() 838 io[i] == 0x220 || snd_msnd_isa_match() 839 io[i] == 0x210 || snd_msnd_isa_match() 840 io[i] == 0x3e0)) { snd_msnd_isa_match() 841 printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must be set " snd_msnd_isa_match() 847 if (io[i] < 0x100 || io[i] > 0x3e0 || (io[i] % 0x10) != 0) { snd_msnd_isa_match() 849 ": \"io\" - DSP I/O base must within the range 0x100 " snd_msnd_isa_match() 963 io[idx], 0, snd_msnd_isa_probe() 1022 chip->io = io[idx]; snd_msnd_isa_probe() 1135 io[idx] = pnp_port_start(pnp_dev, 0); snd_msnd_pnp_detect() 1147 chip->io = io[idx]; snd_msnd_pnp_detect()
|
H A D | msnd.c | 41 #include <linux/io.h> 67 unsigned int io = dev->io; snd_msnd_wait_TXDE() local 71 if (inb(io + HP_ISR) & HPISR_TXDE) snd_msnd_wait_TXDE() 79 unsigned int io = dev->io; snd_msnd_wait_HC0() local 83 if (!(inb(io + HP_CVR) & HPCVR_HC)) snd_msnd_wait_HC0() 95 outb(cmd, dev->io + HP_CVR); snd_msnd_send_dsp_cmd() 110 unsigned int io = dev->io; snd_msnd_send_word() local 113 outb(high, io + HP_TXH); snd_msnd_send_word() 114 outb(mid, io + HP_TXM); snd_msnd_send_word() 115 outb(low, io + HP_TXL); snd_msnd_send_word() 139 inb(dev->io + HP_RXL); snd_msnd_upload_host() 140 inb(dev->io + HP_CVR); snd_msnd_upload_host() 157 outb(inb(dev->io + HP_ICR) | HPICR_TREQ, dev->io + HP_ICR); snd_msnd_enable_irq() 159 outb(dev->irqid, dev->io + HP_IRQM); snd_msnd_enable_irq() 161 outb(inb(dev->io + HP_ICR) & ~HPICR_TREQ, dev->io + HP_ICR); snd_msnd_enable_irq() 162 outb(inb(dev->io + HP_ICR) | HPICR_RREQ, dev->io + HP_ICR); snd_msnd_enable_irq() 192 outb(inb(dev->io + HP_ICR) & ~HPICR_RREQ, dev->io + HP_ICR); snd_msnd_disable_irq() 194 outb(HPIRQ_NONE, dev->io + HP_IRQM); snd_msnd_disable_irq() 292 outb(HPBLKSEL_1, chip->io + HP_BLKS); snd_msnd_DARQ() 297 outb(HPBLKSEL_0, chip->io + HP_BLKS); snd_msnd_DARQ() 300 outb(HPBLKSEL_0, chip->io + HP_BLKS); snd_msnd_DARQ() 414 outb(HPBLKSEL_1, chip->io + HP_BLKS); snd_msnd_capture_reset_queue() 416 outb(HPBLKSEL_0, chip->io + HP_BLKS); snd_msnd_capture_reset_queue()
|
/linux-4.1.27/drivers/isdn/hardware/mISDN/ |
H A D | mISDNinfineon.c | 96 struct _ioport io; member in union:_ioaddr::__anon5269 272 IOFUNC_IO(ISAC, inf_hw, isac.a.io) 273 IOFUNC_IO(IPAC, inf_hw, hscx.a.io) 274 IOFUNC_IND(ISAC, inf_hw, isac.a.io) 275 IOFUNC_IND(IPAC, inf_hw, hscx.a.io) 730 hw->isac.a.io.ale = (u32)hw->cfg.start + DIVA_ISAC_ALE; setup_io() 731 hw->isac.a.io.port = (u32)hw->cfg.start + DIVA_ISAC_PORT; setup_io() 733 hw->hscx.a.io.ale = (u32)hw->cfg.start + DIVA_HSCX_ALE; setup_io() 734 hw->hscx.a.io.port = (u32)hw->cfg.start + DIVA_HSCX_PORT; setup_io() 756 hw->isac.a.io.ale = (u32)hw->cfg.start + TIGER_IPAC_ALE; setup_io() 757 hw->isac.a.io.port = (u32)hw->cfg.start + TIGER_IPAC_PORT; setup_io() 759 hw->hscx.a.io.ale = (u32)hw->cfg.start + TIGER_IPAC_ALE; setup_io() 760 hw->hscx.a.io.port = (u32)hw->cfg.start + TIGER_IPAC_PORT; setup_io() 771 hw->isac.a.io.ale = (u32)hw->addr.start; setup_io() 772 hw->isac.a.io.port = (u32)hw->addr.start + 1; setup_io() 774 hw->hscx.a.io.ale = (u32)hw->addr.start; setup_io() 775 hw->hscx.a.io.port = (u32)hw->addr.start + 1; setup_io() 781 hw->isac.a.io.ale = (u32)hw->addr.start + NICCY_ISAC_ALE; setup_io() 782 hw->isac.a.io.port = (u32)hw->addr.start + NICCY_ISAC_PORT; setup_io() 784 hw->hscx.a.io.ale = (u32)hw->addr.start + NICCY_HSCX_ALE; setup_io() 785 hw->hscx.a.io.port = (u32)hw->addr.start + NICCY_HSCX_PORT; setup_io() 790 hw->isac.a.io.ale = (u32)hw->addr.start; setup_io() 791 hw->isac.a.io.port = hw->isac.a.io.ale + 4; setup_io() 793 hw->hscx.a.io.ale = hw->isac.a.io.ale; setup_io() 794 hw->hscx.a.io.port = hw->isac.a.io.port; setup_io() 800 hw->isac.a.io.ale = (u32)hw->addr.start + 0x08; setup_io() 801 hw->isac.a.io.port = hw->isac.a.io.ale + 4; setup_io() 803 hw->hscx.a.io.ale = hw->isac.a.io.ale; setup_io() 804 hw->hscx.a.io.port = hw->isac.a.io.port; setup_io() 810 hw->isac.a.io.ale = (u32)hw->addr.start + 0x10; setup_io() 811 hw->isac.a.io.port = hw->isac.a.io.ale + 4; setup_io() 813 hw->hscx.a.io.ale = hw->isac.a.io.ale; setup_io() 814 hw->hscx.a.io.port = hw->isac.a.io.port; setup_io() 820 hw->isac.a.io.ale = (u32)hw->addr.start + 0x20; setup_io() 821 hw->isac.a.io.port = hw->isac.a.io.ale + 4; setup_io() 823 hw->hscx.a.io.ale = hw->isac.a.io.ale; setup_io() 824 hw->hscx.a.io.port = hw->isac.a.io.port; setup_io() 831 hw->isac.a.io.port = (u32)hw->addr.start; setup_io() 833 hw->hscx.a.io.port = hw->isac.a.io.port; setup_io() 839 hw->isac.a.io.ale = (u32)hw->addr.start; setup_io() 840 hw->isac.a.io.port = (u32)hw->addr.start + GAZEL_IPAC_DATA_PORT; setup_io() 842 hw->hscx.a.io.ale = hw->isac.a.io.ale; setup_io() 843 hw->hscx.a.io.port = hw->isac.a.io.port; setup_io()
|
/linux-4.1.27/drivers/net/irda/ |
H A D | w83977af_ir.c | 55 #include <asm/io.h> 73 static unsigned int io[] = { 0x180, ~0, ~0, ~0 }; variable 113 for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) { w83977af_init() 114 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) w83977af_init() 183 self->io.fir_base = iobase; w83977af_open() 184 self->io.irq = irq; w83977af_open() 185 self->io.fir_ext = CHIP_IO_EXTENT; w83977af_open() 186 self->io.dma = dma; w83977af_open() 187 self->io.fifo_size = 32; w83977af_open() 266 iobase = self->io.fir_base; w83977af_close() 285 __func__ , self->io.fir_base); w83977af_close() 286 release_region(self->io.fir_base, self->io.fir_ext); w83977af_close() 405 iobase = self->io.fir_base; w83977af_change_speed() 408 self->io.speed = speed; w83977af_change_speed() 490 iobase = self->io.fir_base; w83977af_hard_xmit() 500 if ((speed != self->io.speed) && (speed != -1)) { w83977af_hard_xmit() 514 if (self->io.speed > PIO_MAX_SPEED) { w83977af_hard_xmit() 566 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, w83977af_dma_write() 568 self->io.direction = IO_XMIT; w83977af_dma_write() 632 iobase = self->io.fir_base; w83977af_dma_xmit_complete() 686 iobase= self->io.fir_base; w83977af_dma_receive() 700 self->io.direction = IO_RECV; w83977af_dma_receive() 706 disable_dma(self->io.dma); w83977af_dma_receive() 707 clear_dma_ff(self->io.dma); w83977af_dma_receive() 708 set_dma_mode(self->io.dma, DMA_MODE_READ); w83977af_dma_receive() 709 set_dma_addr(self->io.dma, self->rx_buff_dma); w83977af_dma_receive() 710 set_dma_count(self->io.dma, self->rx_buff.truesize); w83977af_dma_receive() 712 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, w83977af_dma_receive() 729 enable_dma(self->io.dma); w83977af_dma_receive() 759 iobase = self->io.fir_base; w83977af_dma_receive_complete() 764 iobase = self->io.fir_base; w83977af_dma_receive_complete() 833 if (self->io.speed < 4000000) { w83977af_dma_receive_complete() 874 iobase = self->io.fir_base; w83977af_pio_receive() 899 iobase = self->io.fir_base; w83977af_sir_interrupt() 903 actual = w83977af_pio_write(self->io.fir_base, w83977af_sir_interrupt() 906 self->io.fifo_size); w83977af_sir_interrupt() 911 self->io.direction = IO_XMIT; w83977af_sir_interrupt() 939 self->io.direction = IO_RECV; w83977af_sir_interrupt() 965 iobase = self->io.fir_base; w83977af_fir_interrupt() 999 if (self->io.direction == IO_XMIT) { w83977af_fir_interrupt() 1047 iobase = self->io.fir_base; w83977af_interrupt() 1060 if (self->io.speed > PIO_MAX_SPEED ) w83977af_interrupt() 1085 if (self->io.speed > 115200) { w83977af_is_receiving() 1086 iobase = self->io.fir_base; w83977af_is_receiving() 1121 iobase = self->io.fir_base; w83977af_net_open() 1123 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name, w83977af_net_open() 1131 if (request_dma(self->io.dma, dev->name)) { w83977af_net_open() 1132 free_irq(self->io.irq, dev); w83977af_net_open() 1141 if (self->io.speed > 115200) { w83977af_net_open() 1154 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base); w83977af_net_open() 1183 iobase = self->io.fir_base; w83977af_net_close() 1193 disable_dma(self->io.dma); w83977af_net_close() 1202 free_irq(self->io.irq, dev); w83977af_net_close() 1203 free_dma(self->io.dma); w83977af_net_close() 1267 module_param_array(io, int, NULL, 0); 1268 MODULE_PARM_DESC(io, "Base I/O addresses");
|
H A D | via-ircc.c | 54 #include <asm/io.h> 300 self->io.cfg_base = info->cfg_base; via_ircc_open() 301 self->io.fir_base = info->fir_base; via_ircc_open() 302 self->io.irq = info->irq; via_ircc_open() 303 self->io.fir_ext = CHIP_IO_EXTENT; via_ircc_open() 304 self->io.dma = info->dma; via_ircc_open() 305 self->io.dma2 = info->dma2; via_ircc_open() 306 self->io.fifo_size = 32; via_ircc_open() 312 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) { via_ircc_open() 314 __func__, self->io.fir_base); via_ircc_open() 324 dongle_id = via_ircc_read_dongle_id(self->io.fir_base); via_ircc_open() 325 self->io.dongle_id = dongle_id; via_ircc_open() 329 switch( self->io.dongle_id ){ via_ircc_open() 393 self->io.speed = 9600; via_ircc_open() 403 release_region(self->io.fir_base, self->io.fir_ext); via_ircc_open() 420 iobase = self->io.fir_base; via_remove_one() 428 __func__, self->io.fir_base); via_remove_one() 429 release_region(self->io.fir_base, self->io.fir_ext); via_remove_one() 451 int iobase = self->io.fir_base; via_hw_init() 489 self->io.speed = 9600; via_hw_init() 492 via_ircc_change_dongle_speed(iobase, self->io.speed, via_hw_init() 493 self->io.dongle_id); via_hw_init() 665 iobase = self->io.fir_base; via_ircc_change_speed() 667 self->io.speed = speed; via_ircc_change_speed() 720 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id); via_ircc_change_speed() 768 iobase = self->io.fir_base; via_ircc_hard_xmit_sir() 773 if ((speed != self->io.speed) && (speed != -1)) { via_ircc_hard_xmit_sir() 799 SetBaudRate(iobase, self->io.speed); via_ircc_hard_xmit_sir() 817 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, via_ircc_hard_xmit_sir() 839 iobase = self->io.fir_base; via_ircc_hard_xmit_fir() 849 if ((speed != self->io.speed) && (speed != -1)) { via_ircc_hard_xmit_fir() 882 self->io.direction = IO_XMIT; via_ircc_dma_xmit() 894 irda_setup_dma(self->io.dma, via_ircc_dma_xmit() 922 iobase = self->io.fir_base; via_ircc_dma_xmit_complete() 924 // DisableDmaChannel(self->io.dma); via_ircc_dma_xmit_complete() 986 iobase = self->io.fir_base; via_ircc_dma_receive() 991 self->io.direction = IO_RECV; via_ircc_dma_receive() 1009 irda_setup_dma(self->io.dma2, self->rx_buff_dma, via_ircc_dma_receive() 1032 iobase = self->io.fir_base; via_ircc_dma_receive_complete() 1035 if (self->io.speed < 4000000) { //Speed below FIR via_ircc_dma_receive_complete() 1288 iobase = self->io.fir_base; via_ircc_interrupt() 1302 if (self->io.direction == IO_XMIT) { via_ircc_interrupt() 1305 if (self->io.direction == IO_RECV) { via_ircc_interrupt() 1388 iobase = self->io.fir_base; hwreset() 1411 via_ircc_change_speed(self, self->io.speed); hwreset() 1429 iobase = self->io.fir_base; via_ircc_is_receiving() 1455 iobase = self->io.fir_base; via_ircc_net_open() 1456 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) { via_ircc_net_open() 1458 driver_name, self->io.irq); via_ircc_net_open() 1465 if (request_dma(self->io.dma, dev->name)) { via_ircc_net_open() 1467 driver_name, self->io.dma); via_ircc_net_open() 1468 free_irq(self->io.irq, dev); via_ircc_net_open() 1471 if (self->io.dma2 != self->io.dma) { via_ircc_net_open() 1472 if (request_dma(self->io.dma2, dev->name)) { via_ircc_net_open() 1474 driver_name, self->io.dma2); via_ircc_net_open() 1475 free_irq(self->io.irq, dev); via_ircc_net_open() 1476 free_dma(self->io.dma); via_ircc_net_open() 1526 iobase = self->io.fir_base; via_ircc_net_close() 1529 DisableDmaChannel(self->io.dma); via_ircc_net_close() 1533 free_irq(self->io.irq, dev); via_ircc_net_close() 1534 free_dma(self->io.dma); via_ircc_net_close() 1535 if (self->io.dma2 != self->io.dma) via_ircc_net_close() 1536 free_dma(self->io.dma2); via_ircc_net_close()
|
H A D | ali-ircc.c | 40 #include <asm/io.h> 71 static unsigned int io[] = { ~0, ~0, ~0, ~0 }; variable 179 info.fir_base = io[i]; ali_ircc_init() 211 if (io[i] < 2000) ali_ircc_init() 312 self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */ ali_ircc_open() 313 self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */ ali_ircc_open() 314 self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */ ali_ircc_open() 315 self->io.irq = info->irq; ali_ircc_open() 316 self->io.fir_ext = CHIP_IO_EXTENT; ali_ircc_open() 317 self->io.dma = info->dma; ali_ircc_open() 318 self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */ ali_ircc_open() 321 if (!request_region(self->io.fir_base, self->io.fir_ext, ali_ircc_open() 324 __func__, self->io.fir_base); ali_ircc_open() 387 self->io.dongle_id = dongle_id; ali_ircc_open() 399 release_region(self->io.fir_base, self->io.fir_ext); ali_ircc_open() 419 iobase = self->io.fir_base; ali_ircc_close() 425 pr_debug("%s(), Releasing Region %03x\n", __func__, self->io.fir_base); ali_ircc_close() 426 release_region(self->io.fir_base, self->io.fir_ext); ali_ircc_close() 667 if (self->io.speed > 115200) ali_ircc_interrupt() 688 iobase = self->io.fir_base; ali_ircc_fir_interrupt() 711 if (self->io.direction == IO_XMIT) /* TX */ ali_ircc_fir_interrupt() 775 if (self->io.direction == IO_XMIT) ali_ircc_fir_interrupt() 814 iobase = self->io.sir_base; ali_ircc_sir_interrupt() 866 iobase = self->io.sir_base; ali_ircc_sir_receive() 900 iobase = self->io.sir_base; ali_ircc_sir_write_wakeup() 906 actual = ali_ircc_sir_write(iobase, self->io.fifo_size, ali_ircc_sir_write_wakeup() 925 if (self->io.speed > 115200) ali_ircc_sir_write_wakeup() 959 iobase = self->io.fir_base; ali_ircc_change_speed() 1006 iobase = self->io.fir_base; ali_ircc_fir_change_speed() 1008 pr_debug("%s(), self->io.speed = %d, change to speed = %d\n", ali_ircc_fir_change_speed() 1009 __func__, self->io.speed, baud); ali_ircc_fir_change_speed() 1012 if(self->io.speed <=115200) ali_ircc_fir_change_speed() 1018 self->io.speed = baud; ali_ircc_fir_change_speed() 1045 iobase = self->io.sir_base; ali_ircc_sir_change_speed() 1048 if(self->io.speed >115200) ali_ircc_sir_change_speed() 1062 self->io.speed = speed; ali_ircc_sir_change_speed() 1075 if (self->io.speed < 38400) ali_ircc_sir_change_speed() 1105 iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */ ali_ircc_change_dongle_speed() 1106 dongle_id = self->io.dongle_id; ali_ircc_change_dongle_speed() 1324 iobase = self->io.fir_base; ali_ircc_net_open() 1327 if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev)) ali_ircc_net_open() 1330 ALI_IRCC_DRIVER_NAME, self->io.irq); ali_ircc_net_open() 1338 if (request_dma(self->io.dma, dev->name)) { ali_ircc_net_open() 1340 ALI_IRCC_DRIVER_NAME, self->io.dma); ali_ircc_net_open() 1341 free_irq(self->io.irq, dev); ali_ircc_net_open() 1352 sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base); ali_ircc_net_open() 1390 disable_dma(self->io.dma); ali_ircc_net_close() 1395 free_irq(self->io.irq, dev); ali_ircc_net_close() 1396 free_dma(self->io.dma); ali_ircc_net_close() 1419 iobase = self->io.fir_base; ali_ircc_fir_hard_xmit() 1432 if ((speed != self->io.speed) && (speed != -1)) { ali_ircc_fir_hard_xmit() 1511 self->io.direction = IO_XMIT; ali_ircc_fir_hard_xmit() 1557 iobase = self->io.fir_base; ali_ircc_dma_xmit() 1570 self->io.direction = IO_XMIT; ali_ircc_dma_xmit() 1572 irda_setup_dma(self->io.dma, ali_ircc_dma_xmit() 1625 iobase = self->io.fir_base; ali_ircc_dma_xmit_complete() 1695 iobase = self->io.fir_base; ali_ircc_dma_receive() 1714 self->io.direction = IO_RECV; ali_ircc_dma_receive() 1724 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, ali_ircc_dma_receive() 1755 iobase = self->io.fir_base; ali_ircc_dma_receive_complete() 1938 iobase = self->io.sir_base; ali_ircc_sir_hard_xmit() 1951 if ((speed != self->io.speed) && (speed != -1)) { ali_ircc_sir_hard_xmit() 2058 if (self->io.speed > 115200) ali_ircc_is_receiving() 2060 iobase = self->io.fir_base; ali_ircc_is_receiving() 2089 if (self->io.suspended) ali_ircc_suspend() 2094 self->io.suspended = 1; ali_ircc_suspend() 2103 if (!self->io.suspended) ali_ircc_resume() 2110 self->io.suspended = 0; ali_ircc_resume() 2122 int iobase = self->io.fir_base; /* or sir_base */ SetCOMInterrupts() 2129 if (self->io.direction == IO_XMIT) SetCOMInterrupts() 2131 if (self->io.speed > 115200) /* FIR, MIR */ SetCOMInterrupts() 2141 if (self->io.speed > 115200) /* FIR, MIR */ SetCOMInterrupts() 2158 if (self->io.speed > 115200) SetCOMInterrupts() 2216 module_param_array(io, int, NULL, 0); 2217 MODULE_PARM_DESC(io, "Base I/O addresses");
|
H A D | nsc-ircc.c | 61 #include <asm/io.h> 94 static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 }; variable 286 info.fir_base = io[i]; nsc_ircc_init() 295 if (io[i] < 0x2000) { nsc_ircc_init() 405 self->io.cfg_base = info->cfg_base; nsc_ircc_open() 406 self->io.fir_base = info->fir_base; nsc_ircc_open() 407 self->io.irq = info->irq; nsc_ircc_open() 408 self->io.fir_ext = CHIP_IO_EXTENT; nsc_ircc_open() 409 self->io.dma = info->dma; nsc_ircc_open() 410 self->io.fifo_size = 32; nsc_ircc_open() 413 ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); nsc_ircc_open() 416 __func__, self->io.fir_base); nsc_ircc_open() 476 dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); nsc_ircc_open() 485 self->io.dongle_id = dongle_id; nsc_ircc_open() 486 nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); nsc_ircc_open() 507 release_region(self->io.fir_base, self->io.fir_ext); nsc_ircc_open() 526 iobase = self->io.fir_base; nsc_ircc_close() 535 __func__, self->io.fir_base); nsc_ircc_close() 536 release_region(self->io.fir_base, self->io.fir_ext); nsc_ircc_close() 837 pr_debug("%s(): nsc_ircc_init_39x (user settings): io=0x%04x, irq=%d, dma=%d\n", nsc_ircc_init_39x() 913 pr_debug("%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", nsc_ircc_probe_39x() 1266 iobase = self->io.fir_base; nsc_ircc_change_speed() 1269 self->io.speed = speed; nsc_ircc_change_speed() 1317 nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id); nsc_ircc_change_speed() 1375 iobase = self->io.fir_base; nsc_ircc_hard_xmit_sir() 1384 if ((speed != self->io.speed) && (speed != -1)) { nsc_ircc_hard_xmit_sir() 1391 if (self->io.direction == IO_RECV) { nsc_ircc_hard_xmit_sir() 1446 iobase = self->io.fir_base; nsc_ircc_hard_xmit_fir() 1455 if ((speed != self->io.speed) && (speed != -1)) { nsc_ircc_hard_xmit_fir() 1528 self->io.direction = IO_XMIT; nsc_ircc_hard_xmit_fir() 1580 self->io.direction = IO_XMIT; nsc_ircc_dma_xmit() 1586 irda_setup_dma(self->io.dma, nsc_ircc_dma_xmit() 1652 iobase = self->io.fir_base; nsc_ircc_dma_xmit_complete() 1714 iobase = self->io.fir_base; nsc_ircc_dma_receive() 1731 self->io.direction = IO_RECV; nsc_ircc_dma_receive() 1741 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, nsc_ircc_dma_receive() 1834 if (st_fifo->pending_bytes < self->io.fifo_size) { nsc_ircc_dma_receive_complete() 1883 if (self->io.speed < 4000000) { nsc_ircc_dma_receive_complete() 1923 iobase = self->io.fir_base; nsc_ircc_pio_receive() 1946 actual = nsc_ircc_pio_write(self->io.fir_base, nsc_ircc_sir_interrupt() 1949 self->io.fifo_size); nsc_ircc_sir_interrupt() 1953 self->io.direction = IO_XMIT; nsc_ircc_sir_interrupt() 1969 self->io.direction = IO_RECV; nsc_ircc_sir_interrupt() 1972 * Need to be after self->io.direction to avoid race with nsc_ircc_sir_interrupt() 1982 if (self->io.speed > 115200) { nsc_ircc_sir_interrupt() 2030 if (self->io.direction == IO_XMIT) { nsc_ircc_fir_interrupt() 2095 iobase = self->io.fir_base; nsc_ircc_interrupt() 2107 if (self->io.speed > 115200) nsc_ircc_interrupt() 2137 if (self->io.speed > 115200) { nsc_ircc_is_receiving() 2138 iobase = self->io.fir_base; nsc_ircc_is_receiving() 2175 iobase = self->io.fir_base; nsc_ircc_net_open() 2177 if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) { nsc_ircc_net_open() 2179 driver_name, self->io.irq); nsc_ircc_net_open() 2186 if (request_dma(self->io.dma, dev->name)) { nsc_ircc_net_open() 2188 driver_name, self->io.dma); nsc_ircc_net_open() 2189 free_irq(self->io.irq, dev); nsc_ircc_net_open() 2207 sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base); nsc_ircc_net_open() 2244 iobase = self->io.fir_base; nsc_ircc_net_close() 2246 disable_dma(self->io.dma); nsc_ircc_net_close() 2255 free_irq(self->io.irq, dev); nsc_ircc_net_close() 2256 free_dma(self->io.dma); nsc_ircc_net_close() 2317 int iobase = self->io.fir_base; nsc_ircc_suspend() 2319 if (self->io.suspended) nsc_ircc_suspend() 2339 free_irq(self->io.irq, self->netdev); nsc_ircc_suspend() 2340 disable_dma(self->io.dma); nsc_ircc_suspend() 2342 self->io.suspended = 1; nsc_ircc_suspend() 2353 if (!self->io.suspended) nsc_ircc_resume() 2359 nsc_ircc_setup(&self->io); nsc_ircc_resume() 2360 nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id); nsc_ircc_resume() 2363 if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, nsc_ircc_resume() 2366 driver_name, self->io.irq); nsc_ircc_resume() 2375 nsc_ircc_change_speed(self, self->io.speed); nsc_ircc_resume() 2385 self->io.suspended = 0; nsc_ircc_resume() 2398 module_param_array(io, int, NULL, 0); 2399 MODULE_PARM_DESC(io, "Base I/O addresses");
|
H A D | smsc-ircc2.c | 57 #include <asm/io.h> 153 chipio_t io; /* IrDA controller information */ member in struct:smsc_ircc_cb 498 if (self->io.speed > 115200) smsc_ircc_net_xmit() 557 dev->base_addr = self->io.fir_base = fir_base; smsc_ircc_open() 558 dev->irq = self->io.irq = irq; smsc_ircc_open() 703 self->io.fir_base = fir_base; smsc_ircc_setup_io() 704 self->io.sir_base = sir_base; smsc_ircc_setup_io() 705 self->io.fir_ext = SMSC_IRCC2_FIR_CHIP_IO_EXTENT; smsc_ircc_setup_io() 706 self->io.sir_ext = SMSC_IRCC2_SIR_CHIP_IO_EXTENT; smsc_ircc_setup_io() 707 self->io.fifo_size = SMSC_IRCC2_FIFO_SIZE; smsc_ircc_setup_io() 708 self->io.speed = SMSC_IRCC2_C_IRDA_FALLBACK_SPEED; smsc_ircc_setup_io() 714 self->io.irq = irq; smsc_ircc_setup_io() 716 self->io.irq = chip_irq; smsc_ircc_setup_io() 722 self->io.dma = dma; smsc_ircc_setup_io() 724 self->io.dma = chip_dma; smsc_ircc_setup_io() 755 int iobase = self->io.fir_base; smsc_ircc_init_chip() 814 * self->io.speed and the hardware - Jean II */ smsc_ircc_net_ioctl() 861 dev->name, self->io.speed); smsc_ircc_timeout() 864 smsc_ircc_change_speed(self, self->io.speed); smsc_ircc_timeout() 894 /* Make sure test of self->io.speed & speed change are atomic */ smsc_ircc_hard_xmit_sir() 899 if (speed != self->io.speed && speed != -1) { smsc_ircc_hard_xmit_sir() 930 outb(UART_IER_THRI, self->io.sir_base + UART_IER); smsc_ircc_hard_xmit_sir() 950 fir_base = self->io.fir_base; smsc_ircc_set_fir_speed() 952 self->io.speed = speed; smsc_ircc_set_fir_speed() 1008 fir_base = self->io.fir_base; smsc_ircc_fir_start() 1051 fir_base = self->io.fir_base; smsc_ircc_fir_stop() 1076 last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED; smsc_ircc_change_speed() 1081 self->io.speed = speed; smsc_ircc_change_speed() 1086 if (self->io.speed == 0) smsc_ircc_change_speed() 1090 if (!last_speed_was_sir) speed = self->io.speed; smsc_ircc_change_speed() 1093 if (self->io.speed != speed) smsc_ircc_change_speed() 1096 self->io.speed = speed; smsc_ircc_change_speed() 1142 iobase = self->io.sir_base; smsc_ircc_set_sir_speed() 1145 self->io.speed = speed; smsc_ircc_set_sir_speed() 1159 fcr |= self->io.speed < 38400 ? smsc_ircc_set_sir_speed() 1198 /* Make sure test of self->io.speed & speed change are atomic */ smsc_ircc_hard_xmit_fir() 1203 if (speed != self->io.speed && speed != -1) { smsc_ircc_hard_xmit_fir() 1231 bofs = mtt * (self->io.speed / 1000) / 8000; smsc_ircc_hard_xmit_fir() 1255 int iobase = self->io.fir_base; smsc_ircc_dma_xmit() 1268 self->io.direction = IO_XMIT; smsc_ircc_dma_xmit() 1280 /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/ smsc_ircc_dma_xmit() 1288 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, smsc_ircc_dma_xmit() 1310 int iobase = self->io.fir_base; smsc_ircc_dma_xmit_complete() 1355 int iobase = self->io.fir_base; smsc_ircc_dma_receive() 1372 self->io.direction = IO_RECV; smsc_ircc_dma_receive() 1381 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, smsc_ircc_dma_receive() 1412 int iobase = self->io.fir_base; smsc_ircc_dma_receive_complete() 1428 get_dma_residue(self->io.dma)); smsc_ircc_dma_receive_complete() 1430 len = self->rx_buff.truesize - get_dma_residue(self->io.dma); smsc_ircc_dma_receive_complete() 1447 len -= self->io.speed < 4000000 ? 2 : 4; smsc_ircc_dma_receive_complete() 1485 iobase = self->io.sir_base; smsc_ircc_sir_receive() 1521 if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) { smsc_ircc_interrupt() 1526 iobase = self->io.fir_base; smsc_ircc_interrupt() 1542 if (self->io.direction == IO_RECV) smsc_ircc_interrupt() 1580 iobase = self->io.sir_base; smsc_ircc_interrupt_sir() 1639 get_dma_residue(self->io.dma)); 1651 error = request_irq(self->io.irq, smsc_ircc_interrupt, 0, smsc_ircc_request_irq() 1655 __func__, self->io.irq, error); smsc_ircc_request_irq() 1666 self->io.speed = 0; smsc_ircc_start_interrupts() 1674 int iobase = self->io.fir_base; smsc_ircc_stop_interrupts() 1705 if (self->io.suspended) { smsc_ircc_net_open() 1710 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, smsc_ircc_net_open() 1713 __func__, self->io.irq); smsc_ircc_net_open() 1721 sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base); smsc_ircc_net_open() 1733 if (request_dma(self->io.dma, dev->name)) { smsc_ircc_net_open() 1737 __func__, self->io.dma); smsc_ircc_net_open() 1773 if (!self->io.suspended) smsc_ircc_net_close() 1774 free_irq(self->io.irq, dev); smsc_ircc_net_close() 1776 disable_dma(self->io.dma); smsc_ircc_net_close() 1777 free_dma(self->io.dma); smsc_ircc_net_close() 1786 if (!self->io.suspended) { smsc_ircc_suspend() 1793 free_irq(self->io.irq, self->netdev); smsc_ircc_suspend() 1794 disable_dma(self->io.dma); smsc_ircc_suspend() 1796 self->io.suspended = 1; smsc_ircc_suspend() 1807 if (self->io.suspended) { smsc_ircc_resume() 1820 enable_dma(self->io.dma); smsc_ircc_resume() 1825 self->io.suspended = 0; smsc_ircc_resume() 1852 self->io.fir_base); smsc_ircc_close() 1854 release_region(self->io.fir_base, self->io.fir_ext); smsc_ircc_close() 1857 self->io.sir_base); smsc_ircc_close() 1859 release_region(self->io.sir_base, self->io.sir_ext); smsc_ircc_close() 1908 fir_base = self->io.fir_base; smsc_ircc_sir_start() 1909 sir_base = self->io.sir_base; smsc_ircc_sir_start() 1939 iobase = self->io.sir_base; smsc_ircc_sir_stop() 1966 iobase = self->io.sir_base; smsc_ircc_sir_write_wakeup() 1971 actual = smsc_ircc_sir_write(iobase, self->io.fifo_size, smsc_ircc_sir_write_wakeup() 1996 if (self->io.speed <= 115200) { smsc_ircc_sir_write_wakeup() 2002 fcr |= self->io.speed < 38400 ? smsc_ircc_sir_write_wakeup() 2064 if (smsc_transceivers[i].probe(self->io.fir_base)) { smsc_ircc_probe_transceiver() 2090 smsc_transceivers[trx - 1].set_for_speed(self->io.fir_base, speed); smsc_ircc_set_transceiver_for_speed() 2118 int iobase = self->io.sir_base; smsc_ircc_sir_wait_hw_transmitter_finish()
|
H A D | irtty-sir.h | 31 chipio_t io; /* IrDA controller information */ member in struct:sirtty_cb
|
/linux-4.1.27/drivers/parport/ |
H A D | parport_pc.c | 59 #include <linux/io.h> 98 int io; member in struct:superio_struct 949 if (superios[i].io == 0) find_free_superio() 956 static void show_parconfig_smsc37c669(int io, int key) show_parconfig_smsc37c669() argument 967 outb(key, io); show_parconfig_smsc37c669() 968 outb(key, io); show_parconfig_smsc37c669() 969 outb(1, io); show_parconfig_smsc37c669() 970 cr1 = inb(io + 1); show_parconfig_smsc37c669() 971 outb(4, io); show_parconfig_smsc37c669() 972 cr4 = inb(io + 1); show_parconfig_smsc37c669() 973 outb(0x0a, io); show_parconfig_smsc37c669() 974 cra = inb(io + 1); show_parconfig_smsc37c669() 975 outb(0x23, io); show_parconfig_smsc37c669() 976 cr23 = inb(io + 1); show_parconfig_smsc37c669() 977 outb(0x26, io); show_parconfig_smsc37c669() 978 cr26 = inb(io + 1); show_parconfig_smsc37c669() 979 outb(0x27, io); show_parconfig_smsc37c669() 980 cr27 = inb(io + 1); show_parconfig_smsc37c669() 981 outb(0xaa, io); show_parconfig_smsc37c669() 993 "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n", show_parconfig_smsc37c669() 1009 the choices to standard settings, i.e. io-address and IRQ show_parconfig_smsc37c669() 1020 s->io = 0x3bc; show_parconfig_smsc37c669() 1024 s->io = 0x378; show_parconfig_smsc37c669() 1028 s->io = 0x278; show_parconfig_smsc37c669() 1041 static void show_parconfig_winbond(int io, int key) show_parconfig_winbond() argument 1061 outb(key, io); show_parconfig_winbond() 1062 outb(key, io); show_parconfig_winbond() 1063 outb(0x07, io); /* Register 7: Select Logical Device */ show_parconfig_winbond() 1064 outb(0x01, io + 1); /* LD1 is Parallel Port */ show_parconfig_winbond() 1065 outb(0x30, io); show_parconfig_winbond() 1066 cr30 = inb(io + 1); show_parconfig_winbond() 1067 outb(0x60, io); show_parconfig_winbond() 1068 cr60 = inb(io + 1); show_parconfig_winbond() 1069 outb(0x61, io); show_parconfig_winbond() 1070 cr61 = inb(io + 1); show_parconfig_winbond() 1071 outb(0x70, io); show_parconfig_winbond() 1072 cr70 = inb(io + 1); show_parconfig_winbond() 1073 outb(0x74, io); show_parconfig_winbond() 1074 cr74 = inb(io + 1); show_parconfig_winbond() 1075 outb(0xf0, io); show_parconfig_winbond() 1076 crf0 = inb(io + 1); show_parconfig_winbond() 1077 outb(0xaa, io); show_parconfig_winbond() 1083 printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ", show_parconfig_winbond() 1101 s->io = (cr60 << 8) | cr61; show_parconfig_winbond() 1164 void (*func)(int io, int key); decode_smsc() 1195 static void winbond_check(int io, int key) winbond_check() argument 1199 if (!request_region(io, 3, __func__)) winbond_check() 1202 origval = inb(io); /* Save original value */ winbond_check() 1205 outb(0x20, io); winbond_check() 1206 x_devid = inb(io + 1); winbond_check() 1207 outb(0x21, io); winbond_check() 1208 x_devrev = inb(io + 1); winbond_check() 1209 outb(0x09, io); winbond_check() 1210 x_oldid = inb(io + 1); winbond_check() 1212 outb(key, io); winbond_check() 1213 outb(key, io); /* Write Magic Sequence to EFER, extended winbond_check() 1215 outb(0x20, io); /* Write EFIR, extended function index register */ winbond_check() 1216 devid = inb(io + 1); /* Read EFDR, extended function data register */ winbond_check() 1217 outb(0x21, io); winbond_check() 1218 devrev = inb(io + 1); winbond_check() 1219 outb(0x09, io); winbond_check() 1220 oldid = inb(io + 1); winbond_check() 1221 outb(0xaa, io); /* Magic Seal */ winbond_check() 1223 outb(origval, io); /* in case we poked some entirely different hardware */ winbond_check() 1228 decode_winbond(io, key, devid, devrev, oldid); winbond_check() 1230 release_region(io, 3); winbond_check() 1233 static void winbond_check2(int io, int key) winbond_check2() argument 1237 if (!request_region(io, 3, __func__)) winbond_check2() 1240 origval[0] = inb(io); /* Save original values */ winbond_check2() 1241 origval[1] = inb(io + 1); winbond_check2() 1242 origval[2] = inb(io + 2); winbond_check2() 1245 outb(0x20, io + 2); winbond_check2() 1246 x_devid = inb(io + 2); winbond_check2() 1247 outb(0x21, io + 1); winbond_check2() 1248 x_devrev = inb(io + 2); winbond_check2() 1249 outb(0x09, io + 1); winbond_check2() 1250 x_oldid = inb(io + 2); winbond_check2() 1252 outb(key, io); /* Write Magic Byte to EFER, extended winbond_check2() 1254 outb(0x20, io + 2); /* Write EFIR, extended function index register */ winbond_check2() 1255 devid = inb(io + 2); /* Read EFDR, extended function data register */ winbond_check2() 1256 outb(0x21, io + 1); winbond_check2() 1257 devrev = inb(io + 2); winbond_check2() 1258 outb(0x09, io + 1); winbond_check2() 1259 oldid = inb(io + 2); winbond_check2() 1260 outb(0xaa, io); /* Magic Seal */ winbond_check2() 1262 outb(origval[0], io); /* in case we poked some entirely different hardware */ winbond_check2() 1263 outb(origval[1], io + 1); winbond_check2() 1264 outb(origval[2], io + 2); winbond_check2() 1269 decode_winbond(io, key, devid, devrev, oldid); winbond_check2() 1271 release_region(io, 3); winbond_check2() 1274 static void smsc_check(int io, int key) smsc_check() argument 1278 if (!request_region(io, 3, __func__)) smsc_check() 1281 origval = inb(io); /* Save original value */ smsc_check() 1284 outb(0x0d, io); smsc_check() 1285 x_oldid = inb(io + 1); smsc_check() 1286 outb(0x0e, io); smsc_check() 1287 x_oldrev = inb(io + 1); smsc_check() 1288 outb(0x20, io); smsc_check() 1289 x_id = inb(io + 1); smsc_check() 1290 outb(0x21, io); smsc_check() 1291 x_rev = inb(io + 1); smsc_check() 1293 outb(key, io); smsc_check() 1294 outb(key, io); /* Write Magic Sequence to EFER, extended smsc_check() 1296 outb(0x0d, io); /* Write EFIR, extended function index register */ smsc_check() 1297 oldid = inb(io + 1); /* Read EFDR, extended function data register */ smsc_check() 1298 outb(0x0e, io); smsc_check() 1299 oldrev = inb(io + 1); smsc_check() 1300 outb(0x20, io); smsc_check() 1301 id = inb(io + 1); smsc_check() 1302 outb(0x21, io); smsc_check() 1303 rev = inb(io + 1); smsc_check() 1304 outb(0xaa, io); /* Magic Seal */ smsc_check() 1306 outb(origval, io); /* in case we poked some entirely different hardware */ smsc_check() 1312 decode_smsc(io, key, oldid, oldrev); smsc_check() 1314 release_region(io, 3); smsc_check() 1380 if (superios[i].io != p->base) find_superio() 1483 * Old style XT ports alias io ports every 0x400, hence accessing ECR 2399 "parport_pc: ITE 8872 parallel port: io=0x%X", sio_ite_8872_probe() 2582 "parport_pc: VIA parallel port: io=0x%X", port1); sio_via_probe() 2591 printk(KERN_WARNING "parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n", sio_via_probe() 2687 a 1K io window */ 3079 static int __initdata io[PARPORT_PC_MAX_PORTS+1] = { variable 3152 MODULE_PARM_DESC(io, "Base I/O address (SPP regs)"); 3153 module_param_array(io, int, NULL, 0); 3182 for (i = 0; i < PARPORT_PC_MAX_PORTS && io[i]; i++) { parse_parport_params() 3190 if (!io[0]) { parse_parport_params() 3201 "without base address. Use 'io=' " parse_parport_params() 3214 "without base address. Use 'io=' " parse_parport_params() 3242 io[0] = PARPORT_DISABLE; parport_setup() 3263 io[parport_setup_ptr] = val; parport_setup() 3285 return io[0] == PARPORT_DISABLE; parse_parport_params() 3313 if (io[0]) { parport_pc_init() 3318 if (!io[i]) parport_pc_init() 3321 io_hi[i] = 0x400 + io[i]; parport_pc_init() 3322 parport_pc_probe_port(io[i], io_hi[i], parport_pc_init()
|
/linux-4.1.27/drivers/usb/core/ |
H A D | message.c | 253 static void sg_clean(struct usb_sg_request *io) sg_clean() argument 255 if (io->urbs) { sg_clean() 256 while (io->entries--) sg_clean() 257 usb_free_urb(io->urbs[io->entries]); sg_clean() 258 kfree(io->urbs); sg_clean() 259 io->urbs = NULL; sg_clean() 261 io->dev = NULL; sg_clean() 266 struct usb_sg_request *io = urb->context; sg_complete() local 269 spin_lock(&io->lock); sg_complete() 281 if (io->status sg_complete() 282 && (io->status != -ECONNRESET sg_complete() 285 dev_err(io->dev->bus->controller, sg_complete() 287 io->dev->devpath, sg_complete() 290 status, io->status); sg_complete() 294 if (io->status == 0 && status && status != -ECONNRESET) { sg_complete() 297 io->status = status; sg_complete() 303 spin_unlock(&io->lock); sg_complete() 304 for (i = 0, found = 0; i < io->entries; i++) { sg_complete() 305 if (!io->urbs[i] || !io->urbs[i]->dev) sg_complete() 308 retval = usb_unlink_urb(io->urbs[i]); sg_complete() 313 dev_err(&io->dev->dev, sg_complete() 316 } else if (urb == io->urbs[i]) sg_complete() 319 spin_lock(&io->lock); sg_complete() 323 io->bytes += urb->actual_length; sg_complete() 324 io->count--; sg_complete() 325 if (!io->count) sg_complete() 326 complete(&io->complete); sg_complete() 328 spin_unlock(&io->lock); sg_complete() 334 * @io: request block being initialized. until usb_sg_wait() returns, 359 int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, usb_sg_init() argument 367 if (!io || !dev || !sg usb_sg_init() 373 spin_lock_init(&io->lock); usb_sg_init() 374 io->dev = dev; usb_sg_init() 375 io->pipe = pipe; usb_sg_init() 379 io->entries = 1; usb_sg_init() 382 io->entries = nents; usb_sg_init() 386 io->urbs = kmalloc(io->entries * sizeof(*io->urbs), mem_flags); usb_sg_init() 387 if (!io->urbs) usb_sg_init() 394 for_each_sg(sg, sg, io->entries, i) { usb_sg_init() 400 io->entries = i; usb_sg_init() 403 io->urbs[i] = urb; usb_sg_init() 410 urb->context = io; usb_sg_init() 443 io->entries = i + 1; usb_sg_init() 448 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; usb_sg_init() 451 io->count = io->entries; usb_sg_init() 452 io->status = 0; usb_sg_init() 453 io->bytes = 0; usb_sg_init() 454 init_completion(&io->complete); usb_sg_init() 458 sg_clean(io); usb_sg_init() 465 * @io: request block handle, as initialized with usb_sg_init(). 475 * (1) success, where io->status is zero. The number of io->bytes 477 * (2) error, where io->status is a negative errno value. The number 478 * of io->bytes transferred before the error is usually less 506 void usb_sg_wait(struct usb_sg_request *io) usb_sg_wait() argument 509 int entries = io->entries; usb_sg_wait() 512 spin_lock_irq(&io->lock); usb_sg_wait() 514 while (i < entries && !io->status) { usb_sg_wait() 517 io->urbs[i]->dev = io->dev; usb_sg_wait() 518 retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC); usb_sg_wait() 521 * we handshake using io->status. usb_sg_wait() 523 spin_unlock_irq(&io->lock); usb_sg_wait() 546 io->urbs[i]->status = retval; usb_sg_wait() 547 dev_dbg(&io->dev->dev, "%s, submit --> %d\n", usb_sg_wait() 549 usb_sg_cancel(io); usb_sg_wait() 551 spin_lock_irq(&io->lock); usb_sg_wait() 552 if (retval && (io->status == 0 || io->status == -ECONNRESET)) usb_sg_wait() 553 io->status = retval; usb_sg_wait() 555 io->count -= entries - i; usb_sg_wait() 556 if (io->count == 0) usb_sg_wait() 557 complete(&io->complete); usb_sg_wait() 558 spin_unlock_irq(&io->lock); usb_sg_wait() 564 wait_for_completion(&io->complete); usb_sg_wait() 566 sg_clean(io); usb_sg_wait() 572 * @io: request block, initialized with usb_sg_init() 578 void usb_sg_cancel(struct usb_sg_request *io) usb_sg_cancel() argument 582 spin_lock_irqsave(&io->lock, flags); usb_sg_cancel() 585 if (!io->status) { usb_sg_cancel() 588 io->status = -ECONNRESET; usb_sg_cancel() 589 spin_unlock(&io->lock); usb_sg_cancel() 590 for (i = 0; i < io->entries; i++) { usb_sg_cancel() 593 if (!io->urbs[i]->dev) usb_sg_cancel() 595 retval = usb_unlink_urb(io->urbs[i]); usb_sg_cancel() 600 dev_warn(&io->dev->dev, "%s, unlink --> %d\n", usb_sg_cancel() 603 spin_lock(&io->lock); usb_sg_cancel() 605 spin_unlock_irqrestore(&io->lock, flags); usb_sg_cancel()
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
H A D | lclient.h | 49 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, 97 * True iff io is processing glimpse right now. 112 * True, if \a io is a normal io, False for splice_{read,write}. 115 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io); 145 struct cl_io *io = &ccc_env_info(env)->cti_io; ccc_env_thread_io() local 147 memset(io, 0, sizeof(*io)); ccc_env_thread_io() 148 return io; ccc_env_thread_io() 290 struct cl_lock *lock, const struct cl_io *io, 301 const struct cl_page_slice *slice, struct cl_io *io); 306 struct cl_io *io, int nonblock); 309 struct cl_io *io); 312 struct cl_io *io); 315 struct cl_io *io); 318 struct cl_io *io); 321 struct cl_io *io); 327 struct cl_io *io, __u32 enqflags); 334 const struct cl_io *io); 340 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, 343 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, 350 struct cl_io *io); 352 struct cl_io *io, loff_t start, size_t count, int *exceed);
|
H A D | cl_object.h | 41 * Client objects implement io operations and cache pages. 73 * call is referred to as "an io", whereas low-level I/O operation, like 333 const struct cl_io *io); 335 * Initialize io state for a given layer. 337 * called top-to-bottom once per io existence to initialize io 338 * state. If layer wants to keep some state for this type of io, it 341 * participating in this io share the same session. 344 struct cl_object *obj, struct cl_io *io); 496 * this io an exclusive access to this page w.r.t. other io attempts and 504 * While lustre client maintains the notion of an page ownership by io, 593 * - [cl_page_state::CPS_OWNED] io comes across the page and 610 * - io creating new page and immediately owning it; 612 * - [cl_page_state::CPS_CACHED] io finding existing cached page 615 * - [cl_page_state::CPS_OWNED] io finding existing owned page 620 * - [cl_page_state::CPS_CACHED] io decides to leave the page in 623 * - [cl_page_state::CPS_PAGEIN] io starts read transfer for 626 * - [cl_page_state::CPS_PAGEOUT] io starts immediate write 629 * - [cl_page_state::CPS_FREEING] io decides to destroy this 648 * - [cl_page_state::CPS_OWNED] an io requesting an immediate 755 * by sub-io. Protected by a VM lock. 828 * Methods taking an \a io argument are for the activity happening in the 829 * context of given \a io. Page is assumed to be owned by that io, except for 847 * Called when \a io acquires this page into the exclusive 849 * not owned by other io, and no transfer is going on against 857 struct cl_io *io, int nonblock); 864 const struct cl_page_slice *slice, struct cl_io *io); 866 * Called for a page that is already "owned" by \a io from VM point of 873 const struct cl_page_slice *slice, struct cl_io *io); 883 struct cl_io *io); 899 const struct cl_page_slice *slice, struct cl_io *io); 922 struct cl_io *io); 953 struct cl_io *io); 980 * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...); 995 struct cl_io *io); 1042 struct cl_io *io); 1043 } io[CRT_NR]; member in struct:cl_page_operations 1085 struct cl_io *io); 1708 struct cl_io *io, __u32 enqflags); 1749 * Returns true, iff given lock is suitable for the given io, idea 1756 * io. Probably lock description or something similar. 1763 const struct cl_io *io); 1859 * - own pages on behalf of certain io (waiting for each page in turn), 1900 * per-layer io state is stored in the session, associated with the io, see 1904 * There is a small predefined number of possible io types, enumerated in enum 1909 * specifically, to detect when io is done, and its state can be safely 1912 * For read/write io overall execution plan is as following: 1914 * (0) initialize io state through all layers; 1931 * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to 1933 * special error condition from per-page method when current sub-io has to 1934 * block. This causes io loop to be repeated, and lov switches to the next 1935 * sub-io in its cl_io_operations::cio_iter_init() implementation. 1956 * Miscellaneous io. This is used for occasional io activity that 1959 * - cancellation of an extent lock. This io exists as a context 1963 * - VM induced page write-out. An io context for writing page out 1966 * - glimpse. An io context to acquire glimpse lock. 1968 * - grouplock. An io context to acquire group lock. 1970 * CIT_MISC io is used simply as a context in which locks and pages 1971 * are manipulated. Such io has no internal "process", that is, 2014 /** io operations. Immutable after creation. */ 2025 * Per-layer io operations. 2030 * Vector of io state transition methods for every io type. 2032 * \see cl_page_operations::io 2036 * Prepare io iteration at a given layer. 2039 * "io loop" (if it makes sense for this type of io). Here 2047 * Finalize io iteration. 2049 * Called bottom-to-top at the end of each iteration of "io 2058 * Collect locks for the current iteration of io. 2078 * Start io iteration. 2089 * Called top-to-bottom at the end of io loop. Here layer 2090 * might wait for an unfinished asynchronous io. 2102 * Called once per io, bottom-to-top to release io resources. 2127 * \pre io->ci_type == CIT_READ 2137 * \pre io->ci_type == CIT_WRITE 2148 * \pre io->ci_type == CIT_WRITE 2158 * Optional debugging helper. Print given io slice. 2215 * Link between lock and io. Intermediate structure is needed, because the 2216 * same lock can be part of multiple io's simultaneously. 2229 * Lock-set represents a collection of locks, that io needs at a 2299 * State for io. 2312 /** main object this io is against. Immutable after creation. */ 2315 * Upper layer io, of which this io is a part of. Immutable after 2321 /** list of locks (to be) acquired by this io. */ 2370 * This io has held grouplock, to inform sublayers that 2415 * - immediate transfer: this is started when a high level io wants a page 2423 * when io wants to transfer a page to the server some time later, when 2437 * For the immediate transfer io submits a cl_page_list, that req-formation 2725 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, 2793 struct cl_io *io, 2826 * Functions dealing with the ownership of page by io. 2831 struct cl_io *io, struct cl_page *page); 2833 struct cl_io *io, struct cl_page *page); 2835 struct cl_io *io, struct cl_page *page); 2837 struct cl_io *io, struct cl_page *pg); 2839 struct cl_io *io, struct cl_page *page); 2840 int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); 2851 int cl_page_prep (const struct lu_env *env, struct cl_io *io, 2857 int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, 2862 int cl_page_flush (const struct lu_env *env, struct cl_io *io, 2873 void cl_page_discard (const struct lu_env *env, struct cl_io *io, 2876 int cl_page_unmap (const struct lu_env *env, struct cl_io *io, 2882 int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, 2901 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io, 2904 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, 2907 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, 2984 struct cl_io *io, __u32 flags); 2988 struct cl_io *io, __u32 flags); 3039 int cl_io_init (const struct lu_env *env, struct cl_io *io, 3041 int cl_io_sub_init (const struct lu_env *env, struct cl_io *io, 3043 int cl_io_rw_init (const struct lu_env *env, struct cl_io *io, 3045 int cl_io_loop (const struct lu_env *env, struct cl_io *io); 3047 void cl_io_fini (const struct lu_env *env, struct cl_io *io); 3048 int cl_io_iter_init (const struct lu_env *env, struct cl_io *io); 3049 void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io); 3050 int cl_io_lock (const struct lu_env *env, struct cl_io *io); 3051 void cl_io_unlock (const struct lu_env *env, struct cl_io *io); 3052 int cl_io_start (const struct lu_env *env, struct cl_io *io); 3053 void cl_io_end (const struct lu_env *env, struct cl_io *io); 3054 int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, 3056 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, 3058 int cl_io_read_page (const struct lu_env *env, struct cl_io *io, 3060 int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, 3062 int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, 3064 int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, 3066 int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, 3069 void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, 3071 int cl_io_cancel (const struct lu_env *env, struct cl_io *io, 3076 * True, iff \a io is an O_APPEND write(2). 3078 static inline int cl_io_is_append(const struct cl_io *io) cl_io_is_append() argument 3080 return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; cl_io_is_append() 3083 static inline int cl_io_is_sync_write(const struct cl_io *io) cl_io_is_sync_write() argument 3085 return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync; cl_io_is_sync_write() 3088 static inline int cl_io_is_mkwrite(const struct cl_io *io) cl_io_is_mkwrite() argument 3090 return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite; cl_io_is_mkwrite() 3094 * True, iff \a io is a truncate(2). 3096 static inline int cl_io_is_trunc(const struct cl_io *io) cl_io_is_trunc() argument 3098 return io->ci_type == CIT_SETATTR && cl_io_is_trunc() 3099 (io->u.ci_setattr.sa_valid & ATTR_SIZE); cl_io_is_trunc() 3102 struct cl_io *cl_io_top(struct cl_io *io); 3105 lu_printer_t printer, const struct cl_io *io); 3151 struct cl_io *io, struct cl_page_list *plist); 3153 struct cl_io *io, struct cl_page_list *plist); 3155 struct cl_io *io, struct cl_page_list *plist); 3157 struct cl_io *io, struct cl_page_list *plist); 3159 struct cl_io *io, struct cl_page_list *plist); 3165 struct cl_io *io, struct cl_2queue *queue); 3167 struct cl_io *io, struct cl_2queue *queue); 3169 struct cl_io *io, struct cl_2queue *queue); 3209 int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
|
/linux-4.1.27/arch/arm/mach-w90x900/ |
H A D | nuc910.c | 33 /* define specific CPU platform io map */ 43 /*Init NUC910 evb io*/
|
H A D | nuc950.c | 32 /* define specific CPU platform io map */ 37 /*Init NUC950 evb io*/
|
H A D | nuc960.c | 30 /* define specific CPU platform io map */ 35 /*Init NUC960 evb io*/
|
/linux-4.1.27/drivers/media/pci/cx18/ |
H A D | Makefile | 5 cx18-dvb.o cx18-io.o
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/ |
H A D | Makefile | 3 iwlwifi-objs += iwl-io.o
|
H A D | iwl-devtrace-io.h | 48 TP_printk("[%s] read io[%#x] = %#x", 65 TP_printk("[%s] write io[%#x] = %#x)", 82 TP_printk("[%s] write io[%#x] = %#x)", 154 #define TRACE_INCLUDE_FILE iwl-devtrace-io
|
/linux-4.1.27/arch/xtensa/platforms/xt2000/include/platform/ |
H A D | serial.h | 15 #include <asm/io.h>
|
/linux-4.1.27/arch/sh/lib/ |
H A D | Makefile | 21 obj-y += io.o
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | io-unit.h | 0 /* io-unit.h: Definitions for the sun4d IO-UNIT. 12 /* The io-unit handles all virtual to physical address translations 15 * translated by the on chip SRMMU. The io-unit and the srmmu do 18 * Basically the io-unit handles all dvma sbus activity.
|
H A D | mc146818rtc_32.h | 7 #include <asm/io.h>
|
H A D | mc146818rtc_64.h | 7 #include <asm/io.h>
|
/linux-4.1.27/include/uapi/linux/ |
H A D | wil6210_uapi.h | 36 * struct wil_memio io; 38 * .ifr_data = &io, 49 * struct wil_memio_block io = { 53 * .ifr_data = &io,
|
/linux-4.1.27/arch/sh/boards/ |
H A D | board-shmin.c | 13 #include <asm/io.h>
|
H A D | board-titan.c | 13 #include <asm/io.h>
|
/linux-4.1.27/arch/sh/boards/mach-lboxre2/ |
H A D | irq.c | 17 #include <asm/io.h>
|
/linux-4.1.27/arch/sh/drivers/pci/ |
H A D | fixups-r7780rp.c | 14 #include <linux/io.h>
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | io_generic.h | 4 * alpha. Must be included _before_ io.h to avoid preprocessor-induced
|
/linux-4.1.27/arch/sh/kernel/cpu/ |
H A D | adc.c | 9 #include <asm/io.h>
|
/linux-4.1.27/arch/powerpc/boot/ |
H A D | mvme5100.c | 15 #include "io.h"
|
H A D | cuboot-pq2.c | 17 #include "io.h" 126 *io = NULL, *mem_base = NULL; fixup_pci() local 169 io = &pci_ranges_buf[i]; fixup_pci() 172 if (!mem || !mmio || !io) fixup_pci() 178 if (io->size[1] & (io->size[1] - 1)) fixup_pci() 191 out_be32(&pci_regs[1][1], io->phys_addr | 1); fixup_pci() 192 out_be32(&pci_regs[2][1], ~(io->size[1] - 1)); fixup_pci() 202 out_le32(&pci_regs[0][12], io->pci_addr[1] >> 12); fixup_pci() 203 out_le32(&pci_regs[0][14], io->phys_addr >> 12); fixup_pci() 204 out_le32(&pci_regs[0][16], (~(io->size[1] - 1) >> 12) | 0xc0000000); fixup_pci()
|
/linux-4.1.27/arch/powerpc/perf/ |
H A D | hv-common.c | 1 #include <asm/io.h>
|
/linux-4.1.27/arch/frv/kernel/ |
H A D | Makefile | 15 obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | blinken.h | 16 #include <asm/io.h>
|
/linux-4.1.27/arch/arm/mach-footbridge/include/mach/ |
H A D | io.h | 2 * arch/arm/mach-footbridge/include/mach/io.h
|
/linux-4.1.27/arch/arm/mach-gemini/ |
H A D | reset.c | 13 #include <linux/io.h>
|
/linux-4.1.27/arch/arm/mach-omap1/ |
H A D | flash.c | 9 #include <linux/io.h>
|
/linux-4.1.27/lib/ |
H A D | check_signature.c | 1 #include <linux/io.h>
|
/linux-4.1.27/include/asm-generic/ |
H A D | io-64-nonatomic-hi-lo.h | 4 #include <linux/io.h>
|
H A D | io-64-nonatomic-lo-hi.h | 4 #include <linux/io.h>
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
H A D | osc_io.c | 84 * io operations. 88 static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io) osc_io_fini() argument 94 * layer. Iterates over pages in the in-queue, prepares each for io by calling 108 struct cl_io *io; osc_io_submit() local 138 io = page->cp_owner; cl_page_list_for_each_safe() 139 LASSERT(io != NULL); cl_page_list_for_each_safe() 153 result = cl_page_prep(env, io, page, crt); cl_page_list_for_each_safe() 305 /* see osc_io_prepare_write() for lockless io handling. */ osc_io_commit_write() 314 struct cl_io *io; osc_io_fault_start() local 317 io = ios->cis_io; osc_io_fault_start() 318 fio = &io->u.ci_fault; osc_io_fault_start() 344 static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, trunc_check_cb() argument 374 static void osc_trunc_check(const struct lu_env *env, struct cl_io *io, osc_trunc_check() argument 388 cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF, osc_trunc_check() 395 struct cl_io *io = slice->cis_io; osc_io_setattr_start() local 402 __u64 size = io->u.ci_setattr.sa_attr.lvb_size; osc_io_setattr_start() 403 unsigned int ia_valid = io->u.ci_setattr.sa_valid; osc_io_setattr_start() 408 if (cl_io_is_trunc(io)) osc_io_setattr_start() 415 struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr; osc_io_setattr_start() 460 oinfo.oi_capa = io->u.ci_setattr.sa_capa; osc_io_setattr_start() 480 struct cl_io *io = slice->cis_io; osc_io_setattr_end() local 488 result = io->ci_result = cbargs->opc_rc; osc_io_setattr_end() 495 LASSERT(cl_io_is_trunc(io)); osc_io_setattr_end() 501 if (cl_io_is_trunc(io)) { osc_io_setattr_end() 502 __u64 size = io->u.ci_setattr.sa_attr.lvb_size; osc_io_setattr_end() 504 osc_trunc_check(env, io, oio, size); osc_io_setattr_end() 578 struct cl_io *io = slice->cis_io; osc_io_fsync_start() local 579 struct cl_fsync_io *fio = &io->u.ci_fsync; osc_io_fsync_start() 778 /* check for lockless io. */ osc_req_attr_set() 795 struct cl_object *obj, struct cl_io *io) osc_io_init() 800 cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops); osc_io_init() 794 osc_io_init(const struct lu_env *env, struct cl_object *obj, struct cl_io *io) osc_io_init() argument
|
/linux-4.1.27/drivers/staging/media/lirc/ |
H A D | lirc_sir.c | 54 #include <linux/io.h> 122 static int io = LIRC_PORT; variable 168 return inb(io + offset); sinp() 173 outb(value, io + offset); soutp() 433 outb(UART_FCR_CLEAR_RCVR, io + UART_FCR); sir_timeout() 454 while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { sir_interrupt() 457 (void) inb(io + UART_MSR); sir_interrupt() 460 (void) inb(io + UART_LSR); sir_interrupt() 465 outb(data, io + UART_TX) sir_interrupt() 473 data = inb(io + UART_RX); sir_interrupt() 527 lsr = inb(io + UART_LSR); sir_interrupt() 551 outb(PULSE, io + UART_TX); send_pulse() 553 while (!(inb(io + UART_LSR) & UART_LSR_THRE)) send_pulse() 629 outb(0, io + UART_MCR); init_hardware() 630 outb(0, io + UART_IER); init_hardware() 633 outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR); init_hardware() 634 outb(1, io + UART_DLL); outb(0, io + UART_DLM); init_hardware() 636 outb(UART_LCR_WLEN7, io + UART_LCR); init_hardware() 638 outb(UART_FCR_ENABLE_FIFO, io + UART_FCR); init_hardware() 640 /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */ init_hardware() 641 outb(UART_IER_RDI, io + UART_IER); init_hardware() 643 outb(UART_MCR_DTR|UART_MCR_RTS|UART_MCR_OUT2, io + UART_MCR); init_hardware() 661 outb(0, io + UART_IER); drop_hardware() 673 if (request_region(io, 8, LIRC_DRIVER_NAME) == NULL) { init_port() 674 pr_err("i/o port 0x%.4x already in use.\n", io); init_port() 680 release_region(io, 8); init_port() 684 pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq); init_port() 695 release_region(io, 8); drop_port() 1004 module_param(io, int, S_IRUGO); 1005 MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
|
/linux-4.1.27/arch/unicore32/include/asm/ |
H A D | io.h | 2 * linux/arch/unicore32/include/asm/io.h 21 #include <asm-generic/io.h> 34 * Documentation/io-mapping.txt.
|
/linux-4.1.27/drivers/pnp/pnpbios/ |
H A D | rsparser.c | 84 int io, size, mask, i, flags; pnpbios_parse_allocated_resource_data() local 109 io = *(short *)&p[4]; pnpbios_parse_allocated_resource_data() 111 pnpbios_parse_allocated_memresource(dev, io, size); pnpbios_parse_allocated_resource_data() 125 io = *(int *)&p[4]; pnpbios_parse_allocated_resource_data() 127 pnpbios_parse_allocated_memresource(dev, io, size); pnpbios_parse_allocated_resource_data() 133 io = *(int *)&p[4]; pnpbios_parse_allocated_resource_data() 135 pnpbios_parse_allocated_memresource(dev, io, size); pnpbios_parse_allocated_resource_data() 142 io = -1; pnpbios_parse_allocated_resource_data() 146 io = i; pnpbios_parse_allocated_resource_data() 147 if (io != -1) pnpbios_parse_allocated_resource_data() 148 pcibios_penalize_isa_irq(io, 1); pnpbios_parse_allocated_resource_data() 151 pnp_add_irq_resource(dev, io, flags); pnpbios_parse_allocated_resource_data() 158 io = -1; pnpbios_parse_allocated_resource_data() 162 io = i; pnpbios_parse_allocated_resource_data() 163 if (io == -1) pnpbios_parse_allocated_resource_data() 165 pnp_add_dma_resource(dev, io, flags); pnpbios_parse_allocated_resource_data() 171 io = p[2] + p[3] * 256; pnpbios_parse_allocated_resource_data() 173 pnpbios_parse_allocated_ioresource(dev, io, size); pnpbios_parse_allocated_resource_data() 183 io = p[1] + p[2] * 256; pnpbios_parse_allocated_resource_data() 185 pnpbios_parse_allocated_ioresource(dev, io, size); pnpbios_parse_allocated_resource_data() 632 pnp_dbg(&dev->dev, " encode io %#lx-%#lx\n", base, base + len - 1); pnpbios_encode_port()
|
/linux-4.1.27/drivers/pci/hotplug/ |
H A D | ibmphp_pci.c | 329 if (cur_func->io[i]) { ibmphp_configure_card() 330 ibmphp_remove_resource (cur_func->io[i]); ibmphp_configure_card() 331 cur_func->io[i] = NULL; ibmphp_configure_card() 363 struct resource_node *io[6]; configure_device() local 406 io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL); configure_device() 408 if (!io[count]) { configure_device() 412 io[count]->type = IO; configure_device() 413 io[count]->busno = func->busno; configure_device() 414 io[count]->devfunc = PCI_DEVFN(func->device, func->function); configure_device() 415 io[count]->len = len[count]; configure_device() 416 if (ibmphp_check_resource(io[count], 0) == 0) { configure_device() 417 ibmphp_add_resource (io[count]); configure_device() 418 func->io[count] = io[count]; configure_device() 420 err ("cannot allocate requested io for bus %x device %x function %x len %x\n", configure_device() 422 kfree (io[count]); configure_device() 425 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start); configure_device() 428 debug ("b4 writing, the IO address is %x\n", func->io[count]->start); configure_device() 582 struct resource_node *io = NULL; configure_bridge() local 653 /* First we need to allocate mem/io for the bridge itself in case it needs it */ configure_bridge() 688 func->io[count] = bus_io[count]; configure_bridge() 690 err ("cannot allocate requested io for bus %x, device %x, len %x\n", configure_bridge() 696 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start); configure_bridge() 806 debug ("amount_needed->io = %x\n", amount_needed->io); configure_bridge() 816 func->io[count] = NULL; configure_bridge() 829 if (!amount_needed->io) { configure_bridge() 833 debug ("it wants %x IO behind the bridge\n", amount_needed->io); configure_bridge() 834 io = kzalloc(sizeof(*io), GFP_KERNEL); configure_bridge() 836 if (!io) { configure_bridge() 841 io->type = IO; configure_bridge() 842 io->busno = func->busno; configure_bridge() 843 io->devfunc = PCI_DEVFN(func->device, func->function); configure_bridge() 844 io->len = amount_needed->io; configure_bridge() 845 if (ibmphp_check_resource (io, 1) == 0) { configure_bridge() 846 debug ("were we able to add io\n"); configure_bridge() 847 ibmphp_add_resource (io); configure_bridge() 933 rc = add_new_bus (bus, io, mem, pfmem, func->busno); configure_bridge() 935 rc = add_new_bus (bus, io, mem, pfmem, 0xFF); configure_bridge() 954 debug ("io 32\n"); configure_bridge() 1053 if (io) configure_bridge() 1054 ibmphp_remove_resource (io); configure_bridge() 1060 func->io[i] = NULL; configure_bridge() 1166 amount->io += len[count]; scan_behind_bridge() 1198 if ((amount->io) && (amount->io < IOBRIDGE)) scan_behind_bridge() 1199 amount->io = IOBRIDGE; scan_behind_bridge() 1227 struct resource_node *io; unconfigure_boot_device() local 1270 if (ibmphp_find_resource (bus, start_address, &io, IO) < 0) { unconfigure_boot_device() 1274 debug ("io->start = %x\n", io->start); unconfigure_boot_device() 1275 temp_end = io->end; unconfigure_boot_device() 1276 start_address = io->end + 1; unconfigure_boot_device() 1277 ibmphp_remove_resource (io); unconfigure_boot_device() 1280 if (ibmphp_find_resource (bus, start_address, &io, IO) < 0) { unconfigure_boot_device() 1284 debug ("io->start = %x\n", io->start); unconfigure_boot_device() 1285 temp_end = io->end; unconfigure_boot_device() 1286 start_address = io->end + 1; unconfigure_boot_device() 1287 ibmphp_remove_resource (io); unconfigure_boot_device() 1338 struct resource_node *io = NULL; unconfigure_boot_bridge() local 1397 if (ibmphp_find_resource (bus, start_address, &io, IO) < 0) { unconfigure_boot_bridge() 1401 if (io) unconfigure_boot_bridge() 1402 debug ("io->start = %x\n", io->start); unconfigure_boot_bridge() 1404 ibmphp_remove_resource (io); unconfigure_boot_bridge() 1593 if (cur_func->io[i]) { ibmphp_unconfigure_card() 1594 debug ("io[%d] exists\n", i); ibmphp_unconfigure_card() 1596 ibmphp_remove_resource (cur_func->io[i]); ibmphp_unconfigure_card() 1597 cur_func->io[i] = NULL; ibmphp_unconfigure_card() 1633 static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno) add_new_bus() argument 1650 if (io) { add_new_bus() 1656 io_range->start = io->start; add_new_bus() 1657 io_range->end = io->end; add_new_bus()
|
/linux-4.1.27/arch/sh/kernel/ |
H A D | io_trapped.c | 2 * Trapped io support 6 * Intercept io operations by trapping. 20 #include <asm/io.h> 83 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n", register_trapped_io() 85 res->flags & IORESOURCE_IO ? "io" : "mmio", register_trapped_io() 105 pr_warning("unable to install trapped io filter\n"); register_trapped_io() 227 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt); from_device() 240 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp); from_device() 250 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt); to_device() 262 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp); to_device()
|
/linux-4.1.27/drivers/pnp/pnpacpi/ |
H A D | rsparser.c | 342 struct acpi_resource_io *io) pnpacpi_parse_port_option() 346 if (io->io_decode == ACPI_DECODE_16) pnpacpi_parse_port_option() 348 pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, pnpacpi_parse_port_option() 349 io->alignment, io->address_length, flags); pnpacpi_parse_port_option() 354 struct acpi_resource_fixed_io *io) pnpacpi_parse_fixed_port_option() 356 pnp_register_port_resource(dev, option_flags, io->address, io->address, pnpacpi_parse_fixed_port_option() 357 0, io->address_length, IORESOURCE_IO_FIXED); pnpacpi_parse_fixed_port_option() 489 pnpacpi_parse_port_option(dev, option_flags, &res->data.io); pnpacpi_option_resource() 761 struct acpi_resource_io *io = &resource->data.io; pnpacpi_encode_io() local 765 io->io_decode = (p->flags & IORESOURCE_IO_16BIT_ADDR) ? pnpacpi_encode_io() 767 io->minimum = p->start; pnpacpi_encode_io() 768 io->maximum = p->end; pnpacpi_encode_io() 769 io->alignment = 0; /* Correct? */ pnpacpi_encode_io() 770 io->address_length = resource_size(p); pnpacpi_encode_io() 772 io->minimum = 0; pnpacpi_encode_io() 773 io->address_length = 0; pnpacpi_encode_io() 776 pnp_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum, pnpacpi_encode_io() 777 io->minimum + io->address_length - 1, io->io_decode); pnpacpi_encode_io() 340 pnpacpi_parse_port_option(struct pnp_dev *dev, unsigned int option_flags, struct acpi_resource_io *io) pnpacpi_parse_port_option() argument 352 pnpacpi_parse_fixed_port_option(struct pnp_dev *dev, unsigned int option_flags, struct acpi_resource_fixed_io *io) pnpacpi_parse_fixed_port_option() argument
|
/linux-4.1.27/drivers/net/appletalk/ |
H A D | ltpc.c | 205 static int io; variable 233 #include <asm/io.h> 982 inb_p(io+1); ltpc_probe_dma() 983 inb_p(io+0); ltpc_probe_dma() 986 if ( 0xfa == inb_p(io+6) ) break; ltpc_probe_dma() 989 inb_p(io+3); ltpc_probe_dma() 990 inb_p(io+2); ltpc_probe_dma() 992 if ( 0xfb == inb_p(io+6) ) break; ltpc_probe_dma() 1034 if (io != 0x240 && request_region(0x220,8,"ltpc")) { ltpc_probe() 1037 io = 0x220; ltpc_probe() 1042 if (io != 0x220 && request_region(0x240,8,"ltpc")) { ltpc_probe() 1045 io = 0x240; ltpc_probe() 1063 inb_p(io+7); ltpc_probe() 1064 inb_p(io+7); ltpc_probe() 1066 inb_p(io+6); ltpc_probe() 1071 printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io); ltpc_probe() 1093 inb_p(io+1); ltpc_probe() 1094 inb_p(io+3); ltpc_probe() 1098 inb_p(io+0); ltpc_probe() 1099 inb_p(io+2); ltpc_probe() 1100 inb_p(io+7); /* clear reset */ ltpc_probe() 1101 inb_p(io+4); ltpc_probe() 1102 inb_p(io+5); ltpc_probe() 1103 inb_p(io+5); /* enable dma */ ltpc_probe() 1104 inb_p(io+6); /* tri-state interrupt line */ ltpc_probe() 1112 dma = ltpc_probe_dma(io, dma); ltpc_probe() 1121 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma); ltpc_probe() 1123 printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma); ltpc_probe() 1126 dev->base_addr = io; ltpc_probe() 1143 (void) inb_p(io+3); ltpc_probe() 1144 (void) inb_p(io+2); ltpc_probe() 1148 if( 0xf9 == inb_p(io+6)) ltpc_probe() 1160 (void) inb_p(io+7); /* enable interrupts from board */ ltpc_probe() 1161 (void) inb_p(io+7); /* and reset irq line */ ltpc_probe() 1187 release_region(io, 8); ltpc_probe() 1195 /* handles "ltpc=io,irq,dma" kernel command lines */ ltpc_setup() 1213 io = ints[1]; ltpc_setup() 1234 module_param(io, int, 0); 1241 if(io == 0) ltpc_module_init()
|
/linux-4.1.27/drivers/isdn/hardware/avm/ |
H A D | b1isa.c | 22 #include <asm/io.h> 169 static int io[MAX_CARDS]; variable 172 module_param_array(io, int, NULL, 0); 174 MODULE_PARM_DESC(io, "I/O base address(es)"); 214 if (!io[i]) b1isa_init() 217 isa_dev[i].resource[0].start = io[i]; b1isa_init()
|
/linux-4.1.27/drivers/net/arcnet/ |
H A D | com20020-isa.c | 42 #include <asm/io.h> 121 static int io = 0x0; /* <--- EDIT THESE LINES FOR YOUR CONFIGURATION */ variable 122 static int irq = 0; /* or use the insmod io= irq= shmem= options */ 130 module_param(io, int, 0); 163 dev->base_addr = io; com20020_init() 209 io = ints[1]; com20020isa_setup()
|
/linux-4.1.27/arch/arm/mach-mvebu/ |
H A D | kirkwood.c | 88 void __iomem *io; kirkwood_dt_eth_fixup() local 103 io = of_iomap(pnp, 0); kirkwood_dt_eth_fixup() 104 if (!io) kirkwood_dt_eth_fixup() 127 reg = readl(io + MV643XX_ETH_MAC_ADDR_HIGH); kirkwood_dt_eth_fixup() 133 reg = readl(io + MV643XX_ETH_MAC_ADDR_LOW); kirkwood_dt_eth_fixup() 140 iounmap(io); kirkwood_dt_eth_fixup()
|
/linux-4.1.27/drivers/media/usb/pvrusb2/ |
H A D | Makefile | 11 pvrusb2-context.o pvrusb2-io.o pvrusb2-ioread.o \
|
/linux-4.1.27/drivers/memory/tegra/ |
H A D | mc.h | 12 #include <linux/io.h>
|