seg 30 arch/alpha/include/asm/processor.h unsigned long seg; seg 23 arch/alpha/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 38 arch/alpha/include/asm/uaccess.h (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; }) seg 119 arch/arm64/kernel/pci.c u16 seg = root->segment; seg 128 arch/arm64/kernel/pci.c dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); seg 142 arch/arm64/kernel/pci.c dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, seg 32 arch/c6x/include/asm/thread_info.h unsigned long seg; seg 8 arch/csky/include/asm/segment.h unsigned long seg; seg 16 arch/csky/include/asm/segment.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 21 arch/csky/include/asm/uaccess.h unsigned long limit = current_thread_info()->addr_limit.seg; seg 20 arch/h8300/include/asm/segment.h unsigned long seg; seg 36 arch/h8300/include/asm/segment.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 26 arch/hexagon/include/asm/thread_info.h unsigned long seg; seg 37 arch/hexagon/include/asm/uaccess.h ((get_fs().seg == KERNEL_DS.seg) || \ seg 38 arch/hexagon/include/asm/uaccess.h (((unsigned long)addr < get_fs().seg) && \ seg 39 arch/hexagon/include/asm/uaccess.h (unsigned long)size < (get_fs().seg - (unsigned long)addr))) seg 247 arch/ia64/include/asm/processor.h unsigned long seg; seg 54 arch/ia64/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 65 arch/ia64/include/asm/uaccess.h unsigned long seg = get_fs().seg; seg 66 arch/ia64/include/asm/uaccess.h return likely(addr <= seg) && seg 67 arch/ia64/include/asm/uaccess.h (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); seg 40 arch/ia64/pci/pci.c #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \ seg 41 arch/ia64/pci/pci.c (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg)) seg 45 arch/ia64/pci/pci.c #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \ seg 46 arch/ia64/pci/pci.c (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg)) seg 48 arch/ia64/pci/pci.c int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn, seg 54 arch/ia64/pci/pci.c if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095)) seg 57 arch/ia64/pci/pci.c if ((seg | reg) <= 255) { seg 58 arch/ia64/pci/pci.c addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); seg 61 arch/ia64/pci/pci.c addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); seg 75 arch/ia64/pci/pci.c int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn, seg 81 arch/ia64/pci/pci.c if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095)) seg 84 arch/ia64/pci/pci.c if ((seg | reg) <= 255) { seg 85 arch/ia64/pci/pci.c addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); seg 88 arch/ia64/pci/pci.c addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); seg 22 arch/m68k/include/asm/segment.h unsigned long seg; seg 37 arch/m68k/include/asm/segment.h __asm__ ("movec %/dfc,%0":"=r" (_v.seg):); seg 45 arch/m68k/include/asm/segment.h : /* no outputs */ : "r" (val.seg) : "memory"); seg 55 arch/m68k/include/asm/segment.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 148 arch/m68k/include/asm/tlbflush.h unsigned char seg; seg 155 arch/m68k/include/asm/tlbflush.h seg = sun3_get_segmap(i); seg 156 arch/m68k/include/asm/tlbflush.h if(seg == SUN3_INVALID_PMEG) seg 160 arch/m68k/include/asm/tlbflush.h pmeg_alloc[seg] = 0; seg 161 arch/m68k/include/asm/tlbflush.h pmeg_ctx[seg] = 0; seg 162 arch/m68k/include/asm/tlbflush.h pmeg_vaddr[seg] = 0; seg 196 arch/m68k/include/asm/tlbflush.h unsigned char seg, oldctx; seg 205 arch/m68k/include/asm/tlbflush.h if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG) seg 207 arch/m68k/include/asm/tlbflush.h if(pmeg_ctx[seg] == mm->context) { seg 208 arch/m68k/include/asm/tlbflush.h pmeg_alloc[seg] = 0; seg 209 arch/m68k/include/asm/tlbflush.h pmeg_ctx[seg] = 0; seg 210 arch/m68k/include/asm/tlbflush.h pmeg_vaddr[seg] = 0; seg 139 arch/m68k/kernel/process.c p->thread.fs = get_fs().seg; seg 58 arch/m68k/mm/cache.c : "a" (vaddr), "d" (get_fs().seg)); seg 130 arch/m68k/sun3/mmu_emu.c unsigned long seg, num; seg 153 arch/m68k/sun3/mmu_emu.c for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) { seg 154 arch/m68k/sun3/mmu_emu.c i = sun3_get_segmap(seg); seg 159 arch/m68k/sun3/mmu_emu.c print_pte_vaddr (seg); seg 161 arch/m68k/sun3/mmu_emu.c sun3_put_segmap(seg, SUN3_INVALID_PMEG); seg 166 arch/m68k/sun3/mmu_emu.c for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) { seg 167 arch/m68k/sun3/mmu_emu.c if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) { seg 171 arch/m68k/sun3/mmu_emu.c print_pte_vaddr (seg + (i*PAGE_SIZE)); seg 178 arch/m68k/sun3/mmu_emu.c m68k_vmalloc_end = seg; seg 183 arch/m68k/sun3/mmu_emu.c pmeg_alloc[sun3_get_segmap(seg)] = 2; seg 192 arch/m68k/sun3/mmu_emu.c for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE) seg 193 arch/m68k/sun3/mmu_emu.c sun3_put_segmap(seg, SUN3_INVALID_PMEG); seg 196 arch/m68k/sun3/mmu_emu.c for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) { seg 197 arch/m68k/sun3/mmu_emu.c i = sun3_get_segmap(seg); seg 199 arch/m68k/sun3/mmu_emu.c (*(romvec->pv_setctxt))(j, (void *)seg, i); seg 423 arch/m68k/sun3/mmu_emu.c pr_info("seg:%ld crp:%p ->", get_fs().seg, crp); seg 63 arch/microblaze/include/asm/thread_info.h unsigned long seg; seg 48 arch/microblaze/include/asm/uaccess.h # define segment_eq(a, b) ((a).seg == (b).seg) seg 71 arch/microblaze/include/asm/uaccess.h if ((get_fs().seg < ((unsigned long)addr)) || seg 72 arch/microblaze/include/asm/uaccess.h (get_fs().seg < ((unsigned long)addr + size - 1))) { seg 75 arch/microblaze/include/asm/uaccess.h (u32)get_fs().seg); seg 81 arch/microblaze/include/asm/uaccess.h (u32)get_fs().seg); seg 228 arch/mips/include/asm/processor.h unsigned long seg; seg 75 arch/mips/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 130 arch/mips/include/asm/uaccess.h return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; seg 983 arch/mips/kernel/traps.c mm_segment_t seg; seg 985 arch/mips/kernel/traps.c seg = get_fs(); seg 1061 arch/mips/kernel/traps.c set_fs(seg); seg 1075 arch/mips/kernel/traps.c mm_segment_t seg; seg 1078 arch/mips/kernel/traps.c seg = get_fs(); seg 1103 arch/mips/kernel/traps.c set_fs(seg); seg 890 arch/mips/kernel/unaligned.c mm_segment_t seg; seg 968 arch/mips/kernel/unaligned.c seg = get_fs(); seg 973 arch/mips/kernel/unaligned.c set_fs(seg); seg 978 arch/mips/kernel/unaligned.c set_fs(seg); seg 986 arch/mips/kernel/unaligned.c set_fs(seg); seg 991 arch/mips/kernel/unaligned.c set_fs(seg); seg 999 arch/mips/kernel/unaligned.c set_fs(seg); seg 1004 arch/mips/kernel/unaligned.c set_fs(seg); seg 1012 arch/mips/kernel/unaligned.c set_fs(seg); seg 1019 arch/mips/kernel/unaligned.c set_fs(seg); seg 1025 arch/mips/kernel/unaligned.c set_fs(seg); seg 1032 arch/mips/kernel/unaligned.c set_fs(seg); seg 1037 arch/mips/kernel/unaligned.c set_fs(seg); seg 1040 arch/mips/kernel/unaligned.c set_fs(seg); seg 2294 arch/mips/kernel/unaligned.c mm_segment_t seg; seg 2329 arch/mips/kernel/unaligned.c seg = get_fs(); seg 2334 arch/mips/kernel/unaligned.c set_fs(seg); seg 2340 arch/mips/kernel/unaligned.c seg = get_fs(); seg 2345 arch/mips/kernel/unaligned.c set_fs(seg); seg 2357 arch/mips/kernel/unaligned.c seg = get_fs(); seg 2361 arch/mips/kernel/unaligned.c set_fs(seg); seg 515 arch/nds32/mm/alignment.c mm_segment_t seg = get_fs(); seg 529 arch/nds32/mm/alignment.c set_fs(seg); seg 30 arch/nios2/include/asm/thread_info.h unsigned long seg; seg 31 arch/nios2/include/asm/uaccess.h #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) seg 33 arch/nios2/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 36 arch/nios2/include/asm/uaccess.h (((signed long)(((long)get_fs().seg) & \ seg 109 arch/parisc/include/asm/processor.h int seg; seg 17 arch/parisc/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 87 arch/powerpc/include/asm/processor.h unsigned long seg; seg 41 arch/powerpc/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 43 arch/powerpc/include/asm/uaccess.h #define user_addr_max() (get_fs().seg) seg 51 arch/powerpc/include/asm/uaccess.h (((addr) <= (segment).seg) && ((size) <= (segment).seg)) seg 56 arch/powerpc/include/asm/uaccess.h mm_segment_t seg) seg 58 arch/powerpc/include/asm/uaccess.h if (addr > seg.seg) seg 60 arch/powerpc/include/asm/uaccess.h return (size == 0 || size - 1 <= seg.seg - addr); seg 113 arch/powerpc/lib/sstep.c regs->dar = USER_DS.seg; seg 24 arch/riscv/include/asm/thread_info.h unsigned long seg; seg 46 arch/riscv/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 48 arch/riscv/include/asm/uaccess.h #define user_addr_max() (get_fs().seg) seg 80 arch/riscv/include/asm/uaccess.h return size <= fs.seg && addr <= fs.seg - size; seg 165 arch/s390/mm/extmem.c query_segment_type (struct dcss_segment *seg) seg 183 arch/s390/mm/extmem.c memcpy (qin->qname, seg->dcss_name, 8); seg 203 arch/s390/mm/extmem.c seg->vm_segtype = qout->range[0].start & 0xff; seg 222 arch/s390/mm/extmem.c seg->vm_segtype = SEG_TYPE_EWEN; seg 226 arch/s390/mm/extmem.c seg->start_addr = qout->segstart; seg 227 arch/s390/mm/extmem.c seg->end = qout->segend; seg 229 arch/s390/mm/extmem.c memcpy (seg->range, qout->range, 6*sizeof(struct qrange)); seg 230 arch/s390/mm/extmem.c seg->segcnt = qout->segcnt; seg 254 arch/s390/mm/extmem.c struct dcss_segment seg; seg 259 arch/s390/mm/extmem.c dcss_mkname(name, seg.dcss_name); seg 260 arch/s390/mm/extmem.c rc = query_segment_type (&seg); seg 263 arch/s390/mm/extmem.c return seg.vm_segtype; seg 271 arch/s390/mm/extmem.c segment_overlaps_others (struct dcss_segment *seg) seg 279 arch/s390/mm/extmem.c if ((tmp->start_addr >> 20) > (seg->end >> 20)) seg 281 arch/s390/mm/extmem.c if ((tmp->end >> 20) < (seg->start_addr >> 20)) seg 283 arch/s390/mm/extmem.c if (seg == tmp) seg 297 arch/s390/mm/extmem.c struct dcss_segment *seg; seg 301 arch/s390/mm/extmem.c seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); seg 302 arch/s390/mm/extmem.c if (seg == NULL) { seg 306 arch/s390/mm/extmem.c dcss_mkname (name, seg->dcss_name); seg 307 arch/s390/mm/extmem.c rc = query_segment_type (seg); seg 311 arch/s390/mm/extmem.c if (segment_overlaps_others(seg)) { seg 316 arch/s390/mm/extmem.c rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1); seg 321 arch/s390/mm/extmem.c seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); seg 322 arch/s390/mm/extmem.c if (seg->res == NULL) { seg 326 arch/s390/mm/extmem.c seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; seg 327 arch/s390/mm/extmem.c seg->res->start = seg->start_addr; seg 328 arch/s390/mm/extmem.c seg->res->end = seg->end; seg 329 arch/s390/mm/extmem.c memcpy(&seg->res_name, seg->dcss_name, 8); seg 330 arch/s390/mm/extmem.c EBCASC(seg->res_name, 8); seg 331 arch/s390/mm/extmem.c seg->res_name[8] = '\0'; seg 332 arch/s390/mm/extmem.c strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name)); seg 333 arch/s390/mm/extmem.c seg->res->name = seg->res_name; seg 334 arch/s390/mm/extmem.c rc = seg->vm_segtype; seg 337 arch/s390/mm/extmem.c seg->res->flags |= IORESOURCE_READONLY; seg 338 arch/s390/mm/extmem.c if (request_resource(&iomem_resource, seg->res)) { seg 340 arch/s390/mm/extmem.c kfree(seg->res); seg 345 arch/s390/mm/extmem.c diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name, seg 348 arch/s390/mm/extmem.c diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name, seg 351 arch/s390/mm/extmem.c dcss_diag(&purgeseg_scode, seg->dcss_name, seg 359 arch/s390/mm/extmem.c dcss_diag(&purgeseg_scode, seg->dcss_name, seg 363 arch/s390/mm/extmem.c seg->start_addr = start_addr; seg 364 arch/s390/mm/extmem.c seg->end = end_addr; seg 365 arch/s390/mm/extmem.c seg->do_nonshared = do_nonshared; seg 366 arch/s390/mm/extmem.c refcount_set(&seg->ref_count, 1); seg 367 arch/s390/mm/extmem.c list_add(&seg->list, &dcss_list); seg 368 arch/s390/mm/extmem.c *addr = seg->start_addr; seg 369 arch/s390/mm/extmem.c *end = seg->end; seg 372 arch/s390/mm/extmem.c "exclusive-writable\n", name, (void*) seg->start_addr, seg 373 arch/s390/mm/extmem.c (void*) seg->end, segtype_string[seg->vm_segtype]); seg 376 arch/s390/mm/extmem.c "shared access mode\n", name, (void*) seg->start_addr, seg 377 arch/s390/mm/extmem.c (void*) seg->end, segtype_string[seg->vm_segtype]); seg 381 arch/s390/mm/extmem.c release_resource(seg->res); seg 382 arch/s390/mm/extmem.c kfree(seg->res); seg 384 arch/s390/mm/extmem.c vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); seg 386 arch/s390/mm/extmem.c kfree(seg); seg 414 arch/s390/mm/extmem.c struct dcss_segment *seg; seg 421 arch/s390/mm/extmem.c seg = segment_by_name (name); seg 422 arch/s390/mm/extmem.c if (seg == NULL) seg 425 arch/s390/mm/extmem.c if (do_nonshared == seg->do_nonshared) { seg 426 arch/s390/mm/extmem.c refcount_inc(&seg->ref_count); seg 427 arch/s390/mm/extmem.c *addr = seg->start_addr; seg 428 arch/s390/mm/extmem.c *end = seg->end; seg 429 arch/s390/mm/extmem.c rc = seg->vm_segtype; seg 455 arch/s390/mm/extmem.c struct dcss_segment *seg; seg 461 arch/s390/mm/extmem.c seg = segment_by_name (name); seg 462 arch/s390/mm/extmem.c if (seg == NULL) { seg 466 arch/s390/mm/extmem.c if (do_nonshared == seg->do_nonshared) { seg 472 arch/s390/mm/extmem.c if (refcount_read(&seg->ref_count) != 1) { seg 477 arch/s390/mm/extmem.c release_resource(seg->res); seg 479 arch/s390/mm/extmem.c seg->res->flags &= ~IORESOURCE_READONLY; seg 481 arch/s390/mm/extmem.c if (seg->vm_segtype == SEG_TYPE_SR || seg 482 arch/s390/mm/extmem.c seg->vm_segtype == SEG_TYPE_ER) seg 483 arch/s390/mm/extmem.c seg->res->flags |= IORESOURCE_READONLY; seg 485 arch/s390/mm/extmem.c if (request_resource(&iomem_resource, seg->res)) { seg 489 arch/s390/mm/extmem.c kfree(seg->res); seg 493 arch/s390/mm/extmem.c dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); seg 495 arch/s390/mm/extmem.c diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name, seg 498 arch/s390/mm/extmem.c diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name, seg 510 arch/s390/mm/extmem.c seg->start_addr = start_addr; seg 511 arch/s390/mm/extmem.c seg->end = end_addr; seg 512 arch/s390/mm/extmem.c seg->do_nonshared = do_nonshared; seg 516 arch/s390/mm/extmem.c release_resource(seg->res); seg 517 arch/s390/mm/extmem.c kfree(seg->res); seg 519 arch/s390/mm/extmem.c vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); seg 520 arch/s390/mm/extmem.c list_del(&seg->list); seg 521 arch/s390/mm/extmem.c dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); seg 522 arch/s390/mm/extmem.c kfree(seg); seg 537 arch/s390/mm/extmem.c struct dcss_segment *seg; seg 543 arch/s390/mm/extmem.c seg = segment_by_name (name); seg 544 arch/s390/mm/extmem.c if (seg == NULL) { seg 548 arch/s390/mm/extmem.c if (!refcount_dec_and_test(&seg->ref_count)) seg 550 arch/s390/mm/extmem.c release_resource(seg->res); seg 551 arch/s390/mm/extmem.c kfree(seg->res); seg 552 arch/s390/mm/extmem.c vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); seg 553 arch/s390/mm/extmem.c list_del(&seg->list); seg 554 arch/s390/mm/extmem.c dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); seg 555 arch/s390/mm/extmem.c kfree(seg); seg 566 arch/s390/mm/extmem.c struct dcss_segment *seg; seg 575 arch/s390/mm/extmem.c seg = segment_by_name (name); seg 577 arch/s390/mm/extmem.c if (seg == NULL) { seg 583 arch/s390/mm/extmem.c for (i=0; i<seg->segcnt; i++) { seg 585 arch/s390/mm/extmem.c seg->range[i].start >> PAGE_SHIFT, seg 586 arch/s390/mm/extmem.c seg->range[i].end >> PAGE_SHIFT, seg 587 arch/s390/mm/extmem.c segtype_string[seg->range[i].start & 0xff]); seg 308 arch/s390/mm/vmem.c static int insert_memory_segment(struct memory_segment *seg) seg 312 arch/s390/mm/vmem.c if (seg->start + seg->size > VMEM_MAX_PHYS || seg 313 arch/s390/mm/vmem.c seg->start + seg->size < seg->start) seg 317 arch/s390/mm/vmem.c if (seg->start >= tmp->start + tmp->size) seg 319 arch/s390/mm/vmem.c if (seg->start + seg->size <= tmp->start) seg 323 arch/s390/mm/vmem.c list_add(&seg->list, &mem_segs); seg 330 arch/s390/mm/vmem.c static void remove_memory_segment(struct memory_segment *seg) seg 332 arch/s390/mm/vmem.c list_del(&seg->list); seg 335 arch/s390/mm/vmem.c static void __remove_shared_memory(struct memory_segment *seg) seg 337 arch/s390/mm/vmem.c remove_memory_segment(seg); seg 338 arch/s390/mm/vmem.c vmem_remove_range(seg->start, seg->size); seg 343 arch/s390/mm/vmem.c struct memory_segment *seg; seg 349 arch/s390/mm/vmem.c list_for_each_entry(seg, &mem_segs, list) { seg 350 arch/s390/mm/vmem.c if (seg->start == start && seg->size == size) seg 354 arch/s390/mm/vmem.c if (seg->start != start || seg->size != size) seg 358 arch/s390/mm/vmem.c __remove_shared_memory(seg); seg 359 arch/s390/mm/vmem.c kfree(seg); seg 367 arch/s390/mm/vmem.c struct memory_segment *seg; seg 372 arch/s390/mm/vmem.c seg = kzalloc(sizeof(*seg), GFP_KERNEL); seg 373 arch/s390/mm/vmem.c if (!seg) seg 375 arch/s390/mm/vmem.c seg->start = start; seg 376 arch/s390/mm/vmem.c seg->size = size; seg 378 arch/s390/mm/vmem.c ret = insert_memory_segment(seg); seg 388 arch/s390/mm/vmem.c __remove_shared_memory(seg); seg 390 arch/s390/mm/vmem.c kfree(seg); seg 433 arch/s390/mm/vmem.c struct memory_segment *seg; seg 437 arch/s390/mm/vmem.c seg = kzalloc(sizeof(*seg), GFP_KERNEL); seg 438 arch/s390/mm/vmem.c if (!seg) seg 440 arch/s390/mm/vmem.c seg->start = reg->base; seg 441 arch/s390/mm/vmem.c seg->size = reg->size; seg 442 arch/s390/mm/vmem.c insert_memory_segment(seg); seg 8 arch/sh/include/asm/segment.h unsigned long seg; seg 27 arch/sh/include/asm/segment.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 9 arch/sh/include/asm/uaccess.h ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg) seg 28 arch/sh/include/asm/uaccess.h #define user_addr_max() (current_thread_info()->addr_limit.seg) seg 36 arch/sparc/include/asm/processor_32.h int seg; seg 51 arch/sparc/include/asm/processor_64.h unsigned char seg; seg 31 arch/sparc/include/asm/uaccess_32.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 40 arch/sparc/include/asm/uaccess_32.h #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) seg 35 arch/sparc/include/asm/uaccess_64.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 39 arch/sparc/include/asm/uaccess_64.h current_thread_info()->current_ds = (val).seg; \ seg 40 arch/sparc/include/asm/uaccess_64.h __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ seg 28 arch/unicore32/include/asm/thread_info.h unsigned long seg; seg 82 arch/x86/boot/boot.h u16 seg; seg 83 arch/x86/boot/boot.h asm("movw %%ds,%0" : "=rm" (seg)); seg 84 arch/x86/boot/boot.h return seg; seg 87 arch/x86/boot/boot.h static inline void set_fs(u16 seg) seg 89 arch/x86/boot/boot.h asm volatile("movw %0,%%fs" : : "rm" (seg)); seg 93 arch/x86/boot/boot.h u16 seg; seg 94 arch/x86/boot/boot.h asm volatile("movw %%fs,%0" : "=rm" (seg)); seg 95 arch/x86/boot/boot.h return seg; seg 98 arch/x86/boot/boot.h static inline void set_gs(u16 seg) seg 100 arch/x86/boot/boot.h asm volatile("movw %0,%%gs" : : "rm" (seg)); seg 104 arch/x86/boot/boot.h u16 seg; seg 105 arch/x86/boot/boot.h asm volatile("movw %%gs,%0" : "=rm" (seg)); seg 106 arch/x86/boot/boot.h return seg; seg 5 arch/x86/boot/compressed/cmdline.c static inline void set_fs(unsigned long seg) seg 7 arch/x86/boot/compressed/cmdline.c fs = seg << 4; /* shift it back */ seg 12 arch/x86/boot/vesa.h u16 off, seg; seg 51 arch/x86/boot/video-vesa.c set_fs(vginfo.video_mode_ptr.seg); seg 47 arch/x86/ia32/ia32_signal.c #define get_user_seg(seg) ({ unsigned int v; savesegment(seg, v); v; }) seg 48 arch/x86/ia32/ia32_signal.c #define set_user_seg(seg, v) loadsegment_##seg(v) seg 54 arch/x86/ia32/ia32_signal.c #define GET_SEG(seg) ({ \ seg 56 arch/x86/ia32/ia32_signal.c get_user_ex(tmp, &sc->seg); \ seg 60 arch/x86/ia32/ia32_signal.c #define COPY_SEG_CPL3(seg) do { \ seg 61 arch/x86/ia32/ia32_signal.c regs->seg = GET_SEG(seg) | 3; \ seg 64 arch/x86/ia32/ia32_signal.c #define RELOAD_SEG(seg) { \ seg 65 arch/x86/ia32/ia32_signal.c unsigned int pre = (seg) | 3; \ seg 66 arch/x86/ia32/ia32_signal.c unsigned int cur = get_user_seg(seg); \ seg 68 arch/x86/ia32/ia32_signal.c set_user_seg(seg, pre); \ seg 85 arch/x86/include/asm/desc.h unsigned dpl, unsigned ist, unsigned seg) seg 99 arch/x86/include/asm/desc.h gate->segment = seg; seg 196 arch/x86/include/asm/kvm_emulate.h struct desc_struct *desc, u32 *base3, int seg); seg 198 arch/x86/include/asm/kvm_emulate.h struct desc_struct *desc, u32 base3, int seg); seg 200 arch/x86/include/asm/kvm_emulate.h int seg); seg 250 arch/x86/include/asm/kvm_emulate.h unsigned seg; seg 1037 arch/x86/include/asm/kvm_host.h u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); seg 1039 arch/x86/include/asm/kvm_host.h struct kvm_segment *var, int seg); seg 1042 arch/x86/include/asm/kvm_host.h struct kvm_segment *var, int seg); seg 1375 arch/x86/include/asm/kvm_host.h void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); seg 1376 arch/x86/include/asm/kvm_host.h int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg); seg 21 arch/x86/include/asm/pci_64.h extern int (*pci_config_read)(int seg, int bus, int dev, int fn, seg 23 arch/x86/include/asm/pci_64.h extern int (*pci_config_write)(int seg, int bus, int dev, int fn, seg 157 arch/x86/include/asm/pci_x86.h extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, seg 159 arch/x86/include/asm/pci_x86.h extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end); seg 437 arch/x86/include/asm/processor.h unsigned long seg; seg 269 arch/x86/include/asm/segment.h [p] "=a" (p), [seg] "r" (__CPUNODE_SEG)); seg 315 arch/x86/include/asm/segment.h #define __loadsegment_simple(seg, value) \ seg 320 arch/x86/include/asm/segment.h "1: movl %k0,%%" #seg " \n" \ seg 362 arch/x86/include/asm/segment.h #define loadsegment(seg, value) __loadsegment_ ## seg (value) seg 367 arch/x86/include/asm/segment.h #define savesegment(seg, value) \ seg 368 arch/x86/include/asm/segment.h asm("mov %%" #seg ",%0":"=r" (value) : : "memory") seg 36 arch/x86/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 37 arch/x86/include/asm/uaccess.h #define user_addr_max() (current->thread.addr_limit.seg) seg 235 arch/x86/kernel/ptrace.c unsigned int seg; seg 241 arch/x86/kernel/ptrace.c asm("movl %%fs,%0" : "=r" (seg)); seg 242 arch/x86/kernel/ptrace.c return seg; seg 247 arch/x86/kernel/ptrace.c asm("movl %%gs,%0" : "=r" (seg)); seg 248 arch/x86/kernel/ptrace.c return seg; seg 253 arch/x86/kernel/ptrace.c asm("movl %%ds,%0" : "=r" (seg)); seg 254 arch/x86/kernel/ptrace.c return seg; seg 259 arch/x86/kernel/ptrace.c asm("movl %%es,%0" : "=r" (seg)); seg 260 arch/x86/kernel/ptrace.c return seg; seg 54 arch/x86/kernel/signal.c #define GET_SEG(seg) ({ \ seg 56 arch/x86/kernel/signal.c get_user_ex(tmp, &sc->seg); \ seg 60 arch/x86/kernel/signal.c #define COPY_SEG(seg) do { \ seg 61 arch/x86/kernel/signal.c regs->seg = GET_SEG(seg); \ seg 64 arch/x86/kernel/signal.c #define COPY_SEG_CPL3(seg) do { \ seg 65 arch/x86/kernel/signal.c regs->seg = GET_SEG(seg) | 3; \ seg 14 arch/x86/kernel/step.c unsigned long addr, seg; seg 17 arch/x86/kernel/step.c seg = regs->cs; seg 19 arch/x86/kernel/step.c addr = (addr & 0xffff) + (seg << 4); seg 30 arch/x86/kernel/step.c if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { seg 34 arch/x86/kernel/step.c seg >>= 3; seg 38 arch/x86/kernel/step.c seg >= child->mm->context.ldt->nr_entries)) seg 41 arch/x86/kernel/step.c desc = &child->mm->context.ldt->entries[seg]; seg 288 arch/x86/kernel/vm86_32.c unsigned short seg; seg 297 arch/x86/kernel/vm86_32.c get_user_ex(seg, &user_vm86->regs.cs); seg 298 arch/x86/kernel/vm86_32.c vm86regs.pt.cs = seg; seg 301 arch/x86/kernel/vm86_32.c get_user_ex(seg, &user_vm86->regs.ss); seg 302 arch/x86/kernel/vm86_32.c vm86regs.pt.ss = seg; seg 601 arch/x86/kvm/emulate.c static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) seg 603 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) seg 606 arch/x86/kvm/emulate.c return ctxt->ops->get_cached_segment_base(ctxt, seg); seg 654 arch/x86/kvm/emulate.c static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) seg 659 arch/x86/kvm/emulate.c ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); seg 664 arch/x86/kvm/emulate.c unsigned seg) seg 670 arch/x86/kvm/emulate.c ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); seg 671 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); seg 715 arch/x86/kvm/emulate.c la = seg_base(ctxt, addr.seg) + addr.ea; seg 731 arch/x86/kvm/emulate.c addr.seg); seg 763 arch/x86/kvm/emulate.c if (addr.seg == VCPU_SREG_SS) seg 785 arch/x86/kvm/emulate.c struct segmented_address addr = { .seg = VCPU_SREG_CS, seg 883 arch/x86/kvm/emulate.c struct segmented_address addr = { .seg = VCPU_SREG_CS, seg 1638 arch/x86/kvm/emulate.c u16 selector, int seg, u8 cpl, seg 1657 arch/x86/kvm/emulate.c ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); seg 1660 arch/x86/kvm/emulate.c } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { seg 1674 arch/x86/kvm/emulate.c if (seg == VCPU_SREG_TR && (selector & (1 << 2))) seg 1679 arch/x86/kvm/emulate.c if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) seg 1682 arch/x86/kvm/emulate.c if (seg == VCPU_SREG_SS) { seg 1711 arch/x86/kvm/emulate.c if (seg <= VCPU_SREG_GS && !seg_desc.s) { seg 1718 arch/x86/kvm/emulate.c err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; seg 1724 arch/x86/kvm/emulate.c switch (seg) { seg 1803 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); seg 1812 arch/x86/kvm/emulate.c u16 selector, int seg) seg 1826 arch/x86/kvm/emulate.c if (seg == VCPU_SREG_SS && selector == 3 && seg 1830 arch/x86/kvm/emulate.c return __load_segment_descriptor(ctxt, selector, seg, cpl, seg 1885 arch/x86/kvm/emulate.c addr.seg = VCPU_SREG_SS; seg 1904 arch/x86/kvm/emulate.c addr.seg = VCPU_SREG_SS; seg 1999 arch/x86/kvm/emulate.c int seg = ctxt->src2.val; seg 2001 arch/x86/kvm/emulate.c ctxt->src.val = get_segment_selector(ctxt, seg); seg 2012 arch/x86/kvm/emulate.c int seg = ctxt->src2.val; seg 2025 arch/x86/kvm/emulate.c rc = load_segment_descriptor(ctxt, (u16)selector, seg); seg 2363 arch/x86/kvm/emulate.c int seg = ctxt->src2.val; seg 2369 arch/x86/kvm/emulate.c rc = load_segment_descriptor(ctxt, sel, seg); seg 5071 arch/x86/kvm/emulate.c op->addr.mem.seg = VCPU_SREG_ES; seg 5125 arch/x86/kvm/emulate.c op->addr.mem.seg = ctxt->seg_override; seg 5136 arch/x86/kvm/emulate.c op->addr.mem.seg = ctxt->seg_override; seg 5448 arch/x86/kvm/emulate.c ctxt->memop.addr.mem.seg = ctxt->seg_override; seg 181 arch/x86/kvm/mtrr.c static u64 fixed_mtrr_seg_unit_size(int seg) seg 183 arch/x86/kvm/mtrr.c return 8 << fixed_seg_table[seg].range_shift; seg 186 arch/x86/kvm/mtrr.c static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) seg 190 arch/x86/kvm/mtrr.c *seg = 0; seg 194 arch/x86/kvm/mtrr.c *seg = 1; seg 200 arch/x86/kvm/mtrr.c *seg = 2; seg 212 arch/x86/kvm/mtrr.c static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end) seg 214 arch/x86/kvm/mtrr.c struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; seg 215 arch/x86/kvm/mtrr.c u64 unit_size = fixed_mtrr_seg_unit_size(seg); seg 222 arch/x86/kvm/mtrr.c static int fixed_mtrr_seg_unit_range_index(int seg, int unit) seg 224 arch/x86/kvm/mtrr.c struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; seg 226 arch/x86/kvm/mtrr.c WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg) seg 233 arch/x86/kvm/mtrr.c static int fixed_mtrr_seg_end_range_index(int seg) seg 235 arch/x86/kvm/mtrr.c struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; seg 244 arch/x86/kvm/mtrr.c int seg, unit; seg 246 arch/x86/kvm/mtrr.c if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) seg 249 arch/x86/kvm/mtrr.c fixed_mtrr_seg_unit_range(seg, unit, start, end); seg 255 arch/x86/kvm/mtrr.c int seg, unit; seg 257 arch/x86/kvm/mtrr.c if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) seg 260 arch/x86/kvm/mtrr.c return fixed_mtrr_seg_unit_range_index(seg, unit); seg 266 arch/x86/kvm/mtrr.c int seg, seg_num = ARRAY_SIZE(fixed_seg_table); seg 268 arch/x86/kvm/mtrr.c for (seg = 0; seg < seg_num; seg++) { seg 269 arch/x86/kvm/mtrr.c mtrr_seg = &fixed_seg_table[seg]; seg 271 arch/x86/kvm/mtrr.c return seg; seg 277 arch/x86/kvm/mtrr.c static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg) seg 282 arch/x86/kvm/mtrr.c mtrr_seg = &fixed_seg_table[seg]; seg 288 arch/x86/kvm/mtrr.c static u64 fixed_mtrr_range_end_addr(int seg, int index) seg 290 arch/x86/kvm/mtrr.c struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; seg 462 arch/x86/kvm/mtrr.c int seg; seg 478 arch/x86/kvm/mtrr.c int seg, index; seg 483 arch/x86/kvm/mtrr.c seg = fixed_mtrr_addr_to_seg(iter->start); seg 484 arch/x86/kvm/mtrr.c if (seg < 0) seg 488 arch/x86/kvm/mtrr.c index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg); seg 490 arch/x86/kvm/mtrr.c iter->seg = seg; seg 545 arch/x86/kvm/mtrr.c if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) { seg 558 arch/x86/kvm/mtrr.c if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg)) seg 559 arch/x86/kvm/mtrr.c iter->seg++; seg 1473 arch/x86/kvm/svm.c static void init_seg(struct vmcb_seg *seg) seg 1475 arch/x86/kvm/svm.c seg->selector = 0; seg 1476 arch/x86/kvm/svm.c seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | seg 1478 arch/x86/kvm/svm.c seg->limit = 0xffff; seg 1479 arch/x86/kvm/svm.c seg->base = 0; seg 1482 arch/x86/kvm/svm.c static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) seg 1484 arch/x86/kvm/svm.c seg->selector = 0; seg 1485 arch/x86/kvm/svm.c seg->attrib = SVM_SELECTOR_P_MASK | type; seg 1486 arch/x86/kvm/svm.c seg->limit = 0xffff; seg 1487 arch/x86/kvm/svm.c seg->base = 0; seg 2433 arch/x86/kvm/svm.c static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) seg 2437 arch/x86/kvm/svm.c switch (seg) { seg 2451 arch/x86/kvm/svm.c static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) seg 2453 arch/x86/kvm/svm.c struct vmcb_seg *s = svm_seg(vcpu, seg); seg 2459 arch/x86/kvm/svm.c struct kvm_segment *var, int seg) seg 2461 arch/x86/kvm/svm.c struct vmcb_seg *s = svm_seg(vcpu, seg); seg 2490 arch/x86/kvm/svm.c switch (seg) { seg 2654 arch/x86/kvm/svm.c struct kvm_segment *var, int seg) seg 2657 arch/x86/kvm/svm.c struct vmcb_seg *s = svm_seg(vcpu, seg); seg 2677 arch/x86/kvm/svm.c if (seg == VCPU_SREG_SS) seg 3799 arch/x86/kvm/vmx/nested.c struct kvm_segment seg; seg 3882 arch/x86/kvm/vmx/nested.c seg = (struct kvm_segment) { seg 3892 arch/x86/kvm/vmx/nested.c seg.l = 1; seg 3894 arch/x86/kvm/vmx/nested.c seg.db = 1; seg 3895 arch/x86/kvm/vmx/nested.c vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); seg 3896 arch/x86/kvm/vmx/nested.c seg = (struct kvm_segment) { seg 3905 arch/x86/kvm/vmx/nested.c seg.selector = vmcs12->host_ds_selector; seg 3906 arch/x86/kvm/vmx/nested.c vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); seg 3907 arch/x86/kvm/vmx/nested.c seg.selector = vmcs12->host_es_selector; seg 3908 arch/x86/kvm/vmx/nested.c vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); seg 3909 arch/x86/kvm/vmx/nested.c seg.selector = vmcs12->host_ss_selector; seg 3910 arch/x86/kvm/vmx/nested.c vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); seg 3911 arch/x86/kvm/vmx/nested.c seg.selector = vmcs12->host_fs_selector; seg 3912 arch/x86/kvm/vmx/nested.c seg.base = vmcs12->host_fs_base; seg 3913 arch/x86/kvm/vmx/nested.c vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); seg 3914 arch/x86/kvm/vmx/nested.c seg.selector = vmcs12->host_gs_selector; seg 3915 arch/x86/kvm/vmx/nested.c seg.base = vmcs12->host_gs_base; seg 3916 arch/x86/kvm/vmx/nested.c vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); seg 3917 arch/x86/kvm/vmx/nested.c seg = (struct kvm_segment) { seg 3924 arch/x86/kvm/vmx/nested.c vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); seg 414 arch/x86/kvm/vmx/vmx.c #define VMX_SEGMENT_FIELD(seg) \ seg 415 arch/x86/kvm/vmx/vmx.c [VCPU_SREG_##seg] = { \ seg 416 arch/x86/kvm/vmx/vmx.c .selector = GUEST_##seg##_SELECTOR, \ seg 417 arch/x86/kvm/vmx/vmx.c .base = GUEST_##seg##_BASE, \ seg 418 arch/x86/kvm/vmx/vmx.c .limit = GUEST_##seg##_LIMIT, \ seg 419 arch/x86/kvm/vmx/vmx.c .ar_bytes = GUEST_##seg##_AR_BYTES, \ seg 700 arch/x86/kvm/vmx/vmx.c static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, seg 704 arch/x86/kvm/vmx/vmx.c u32 mask = 1 << (seg * SEG_FIELD_NR + field); seg 715 arch/x86/kvm/vmx/vmx.c static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) seg 717 arch/x86/kvm/vmx/vmx.c u16 *p = &vmx->segment_cache.seg[seg].selector; seg 719 arch/x86/kvm/vmx/vmx.c if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) seg 720 arch/x86/kvm/vmx/vmx.c *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); seg 724 arch/x86/kvm/vmx/vmx.c static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) seg 726 arch/x86/kvm/vmx/vmx.c ulong *p = &vmx->segment_cache.seg[seg].base; seg 728 arch/x86/kvm/vmx/vmx.c if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) seg 729 arch/x86/kvm/vmx/vmx.c *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); seg 733 arch/x86/kvm/vmx/vmx.c static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) seg 735 arch/x86/kvm/vmx/vmx.c u32 *p = &vmx->segment_cache.seg[seg].limit; seg 737 arch/x86/kvm/vmx/vmx.c if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) seg 738 arch/x86/kvm/vmx/vmx.c *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); seg 742 arch/x86/kvm/vmx/vmx.c static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) seg 744 arch/x86/kvm/vmx/vmx.c u32 *p = &vmx->segment_cache.seg[seg].ar; seg 746 arch/x86/kvm/vmx/vmx.c if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) seg 747 arch/x86/kvm/vmx/vmx.c *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); seg 2624 arch/x86/kvm/vmx/vmx.c static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, seg 2635 arch/x86/kvm/vmx/vmx.c if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) seg 2640 arch/x86/kvm/vmx/vmx.c vmx_set_segment(vcpu, save, seg); seg 2683 arch/x86/kvm/vmx/vmx.c static void fix_rmode_seg(int seg, struct kvm_segment *save) seg 2685 arch/x86/kvm/vmx/vmx.c const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; seg 2689 arch/x86/kvm/vmx/vmx.c if (seg == VCPU_SREG_CS) seg 2707 arch/x86/kvm/vmx/vmx.c "protected mode (seg=%d)", seg); seg 3079 arch/x86/kvm/vmx/vmx.c void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) seg 3084 arch/x86/kvm/vmx/vmx.c if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { seg 3085 arch/x86/kvm/vmx/vmx.c *var = vmx->rmode.segs[seg]; seg 3086 arch/x86/kvm/vmx/vmx.c if (seg == VCPU_SREG_TR seg 3087 arch/x86/kvm/vmx/vmx.c || var->selector == vmx_read_guest_seg_selector(vmx, seg)) seg 3089 arch/x86/kvm/vmx/vmx.c var->base = vmx_read_guest_seg_base(vmx, seg); seg 3090 arch/x86/kvm/vmx/vmx.c var->selector = vmx_read_guest_seg_selector(vmx, seg); seg 3093 arch/x86/kvm/vmx/vmx.c var->base = vmx_read_guest_seg_base(vmx, seg); seg 3094 arch/x86/kvm/vmx/vmx.c var->limit = vmx_read_guest_seg_limit(vmx, seg); seg 3095 arch/x86/kvm/vmx/vmx.c var->selector = vmx_read_guest_seg_selector(vmx, seg); seg 3096 arch/x86/kvm/vmx/vmx.c ar = vmx_read_guest_seg_ar(vmx, seg); seg 3115 arch/x86/kvm/vmx/vmx.c static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) seg 3120 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &s, seg); seg 3123 arch/x86/kvm/vmx/vmx.c return vmx_read_guest_seg_base(to_vmx(vcpu), seg); seg 3158 arch/x86/kvm/vmx/vmx.c void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) seg 3161 arch/x86/kvm/vmx/vmx.c const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; seg 3165 arch/x86/kvm/vmx/vmx.c if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { seg 3166 arch/x86/kvm/vmx/vmx.c vmx->rmode.segs[seg] = *var; seg 3167 arch/x86/kvm/vmx/vmx.c if (seg == VCPU_SREG_TR) seg 3170 arch/x86/kvm/vmx/vmx.c fix_rmode_seg(seg, &vmx->rmode.segs[seg]); seg 3189 arch/x86/kvm/vmx/vmx.c if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) seg 3230 arch/x86/kvm/vmx/vmx.c static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) seg 3235 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &var, seg); seg 3237 arch/x86/kvm/vmx/vmx.c if (seg == VCPU_SREG_CS) seg 3301 arch/x86/kvm/vmx/vmx.c static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) seg 3306 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &var, seg); seg 3502 arch/x86/kvm/vmx/vmx.c static void seg_setup(int seg) seg 3504 arch/x86/kvm/vmx/vmx.c const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; seg 3511 arch/x86/kvm/vmx/vmx.c if (seg == VCPU_SREG_CS) seg 245 arch/x86/kvm/vmx/vmx.h } seg[8]; seg 327 arch/x86/kvm/vmx/vmx.h void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); seg 328 arch/x86/kvm/vmx/vmx.h void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); seg 5331 arch/x86/kvm/x86.c struct kvm_segment *var, int seg) seg 5333 arch/x86/kvm/x86.c kvm_x86_ops->set_segment(vcpu, var, seg); seg 5337 arch/x86/kvm/x86.c struct kvm_segment *var, int seg) seg 5339 arch/x86/kvm/x86.c kvm_x86_ops->get_segment(vcpu, var, seg); seg 5970 arch/x86/kvm/x86.c static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) seg 5972 arch/x86/kvm/x86.c return kvm_x86_ops->get_segment_base(vcpu, seg); seg 6114 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt, int seg) seg 6116 arch/x86/kvm/x86.c return get_segment_base(emul_to_vcpu(ctxt), seg); seg 6121 arch/x86/kvm/x86.c int seg) seg 6125 arch/x86/kvm/x86.c kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); seg 6157 arch/x86/kvm/x86.c int seg) seg 6181 arch/x86/kvm/x86.c kvm_set_segment(vcpu, &var, seg); seg 7701 arch/x86/kvm/x86.c static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) seg 7704 arch/x86/kvm/x86.c flags |= seg->g << 23; seg 7705 arch/x86/kvm/x86.c flags |= seg->db << 22; seg 7706 arch/x86/kvm/x86.c flags |= seg->l << 21; seg 7707 arch/x86/kvm/x86.c flags |= seg->avl << 20; seg 7708 arch/x86/kvm/x86.c flags |= seg->present << 15; seg 7709 arch/x86/kvm/x86.c flags |= seg->dpl << 13; seg 7710 arch/x86/kvm/x86.c flags |= seg->s << 12; seg 7711 arch/x86/kvm/x86.c flags |= seg->type << 8; seg 7717 arch/x86/kvm/x86.c struct kvm_segment seg; seg 7720 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &seg, n); seg 7721 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector); seg 7728 arch/x86/kvm/x86.c put_smstate(u32, buf, offset + 8, seg.base); seg 7729 arch/x86/kvm/x86.c put_smstate(u32, buf, offset + 4, seg.limit); seg 7730 arch/x86/kvm/x86.c put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg)); seg 7736 arch/x86/kvm/x86.c struct kvm_segment seg; seg 7740 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &seg, n); seg 7743 arch/x86/kvm/x86.c flags = enter_smm_get_segment_flags(&seg) >> 8; seg 7744 arch/x86/kvm/x86.c put_smstate(u16, buf, offset, seg.selector); seg 7746 arch/x86/kvm/x86.c put_smstate(u32, buf, offset + 4, seg.limit); seg 7747 arch/x86/kvm/x86.c put_smstate(u64, buf, offset + 8, seg.base); seg 7754 arch/x86/kvm/x86.c struct kvm_segment seg; seg 7771 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); seg 7772 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7fc4, seg.selector); seg 7773 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7f64, seg.base); seg 7774 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7f60, seg.limit); seg 7775 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg)); seg 7777 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); seg 7778 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7fc0, seg.selector); seg 7779 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7f80, seg.base); seg 7780 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7f7c, seg.limit); seg 7781 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); seg 7805 arch/x86/kvm/x86.c struct kvm_segment seg; seg 7831 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); seg 7832 arch/x86/kvm/x86.c put_smstate(u16, buf, 0x7e90, seg.selector); seg 7833 arch/x86/kvm/x86.c put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8); seg 7834 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7e94, seg.limit); seg 7835 arch/x86/kvm/x86.c put_smstate(u64, buf, 0x7e98, seg.base); seg 7841 arch/x86/kvm/x86.c kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); seg 7842 arch/x86/kvm/x86.c put_smstate(u16, buf, 0x7e70, seg.selector); seg 7843 arch/x86/kvm/x86.c put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8); seg 7844 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7e74, seg.limit); seg 7845 arch/x86/kvm/x86.c put_smstate(u64, buf, 0x7e78, seg.base); seg 23 arch/x86/math-emu/fpu_system.h static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg) seg 29 arch/x86/math-emu/fpu_system.h seg >>= 3; seg 31 arch/x86/math-emu/fpu_system.h if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries) seg 32 arch/x86/math-emu/fpu_system.h ret = current->mm->context.ldt->entries[seg]; seg 166 arch/x86/pci/acpi.c static int check_segment(u16 seg, struct device *dev, char *estr) seg 168 arch/x86/pci/acpi.c if (seg) { seg 191 arch/x86/pci/acpi.c int result, seg; seg 200 arch/x86/pci/acpi.c seg = info->sd.domain; seg 207 arch/x86/pci/acpi.c return check_segment(seg, dev, "MMCONFIG is disabled,"); seg 209 arch/x86/pci/acpi.c result = pci_mmconfig_insert(dev, seg, info->start_bus, info->end_bus, seg 217 arch/x86/pci/acpi.c return check_segment(seg, dev, seg 263 arch/x86/pci/ce4100.c static int ce4100_conf_read(unsigned int seg, unsigned int bus, seg 266 arch/x86/pci/ce4100.c WARN_ON(seg); seg 275 arch/x86/pci/ce4100.c return pci_direct_conf1.read(seg, bus, devfn, reg, len, value); seg 297 arch/x86/pci/ce4100.c static int ce4100_conf_write(unsigned int seg, unsigned int bus, seg 300 arch/x86/pci/ce4100.c WARN_ON(seg); seg 310 arch/x86/pci/ce4100.c return pci_direct_conf1.write(seg, bus, devfn, reg, len, value); seg 21 arch/x86/pci/direct.c static int pci_conf1_read(unsigned int seg, unsigned int bus, seg 26 arch/x86/pci/direct.c if (seg || (bus > 255) || (devfn > 255) || (reg > 4095)) { seg 52 arch/x86/pci/direct.c static int pci_conf1_write(unsigned int seg, unsigned int bus, seg 57 arch/x86/pci/direct.c if (seg || (bus > 255) || (devfn > 255) || (reg > 4095)) seg 95 arch/x86/pci/direct.c static int pci_conf2_read(unsigned int seg, unsigned int bus, seg 101 arch/x86/pci/direct.c WARN_ON(seg); seg 137 arch/x86/pci/direct.c static int pci_conf2_write(unsigned int seg, unsigned int bus, seg 143 arch/x86/pci/direct.c WARN_ON(seg); seg 721 arch/x86/pci/mmconfig-shared.c int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, seg 735 arch/x86/pci/mmconfig-shared.c cfg = pci_mmconfig_lookup(seg, start); seg 753 arch/x86/pci/mmconfig-shared.c cfg = pci_mmconfig_alloc(seg, start, end, addr); seg 795 arch/x86/pci/mmconfig-shared.c int pci_mmconfig_delete(u16 seg, u8 start, u8 end) seg 801 arch/x86/pci/mmconfig-shared.c if (cfg->segment == seg && cfg->start_bus == start && seg 27 arch/x86/pci/mmconfig_32.c static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) seg 29 arch/x86/pci/mmconfig_32.c struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); seg 51 arch/x86/pci/mmconfig_32.c static int pci_mmcfg_read(unsigned int seg, unsigned int bus, seg 63 arch/x86/pci/mmconfig_32.c base = get_base_addr(seg, bus, devfn); seg 90 arch/x86/pci/mmconfig_32.c static int pci_mmcfg_write(unsigned int seg, unsigned int bus, seg 100 arch/x86/pci/mmconfig_32.c base = get_base_addr(seg, bus, devfn); seg 19 arch/x86/pci/mmconfig_64.c static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) seg 21 arch/x86/pci/mmconfig_64.c struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); seg 28 arch/x86/pci/mmconfig_64.c static int pci_mmcfg_read(unsigned int seg, unsigned int bus, seg 40 arch/x86/pci/mmconfig_64.c addr = pci_dev_base(seg, bus, devfn); seg 62 arch/x86/pci/mmconfig_64.c static int pci_mmcfg_write(unsigned int seg, unsigned int bus, seg 72 arch/x86/pci/mmconfig_64.c addr = pci_dev_base(seg, bus, devfn); seg 21 arch/x86/pci/numachip.c static inline char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) seg 23 arch/x86/pci/numachip.c struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); seg 30 arch/x86/pci/numachip.c static int pci_mmcfg_read_numachip(unsigned int seg, unsigned int bus, seg 48 arch/x86/pci/numachip.c addr = pci_dev_base(seg, bus, devfn); seg 70 arch/x86/pci/numachip.c static int pci_mmcfg_write_numachip(unsigned int seg, unsigned int bus, seg 84 arch/x86/pci/numachip.c addr = pci_dev_base(seg, bus, devfn); seg 200 arch/x86/pci/olpc.c static int pci_olpc_read(unsigned int seg, unsigned int bus, seg 205 arch/x86/pci/olpc.c WARN_ON(seg); seg 209 arch/x86/pci/olpc.c return pci_direct_conf1.read(seg, bus, devfn, reg, len, value); seg 262 arch/x86/pci/olpc.c static int pci_olpc_write(unsigned int seg, unsigned int bus, seg 265 arch/x86/pci/olpc.c WARN_ON(seg); seg 269 arch/x86/pci/olpc.c return pci_direct_conf1.write(seg, bus, devfn, reg, len, value); seg 183 arch/x86/pci/pcbios.c static int pci_bios_read(unsigned int seg, unsigned int bus, seg 191 arch/x86/pci/pcbios.c WARN_ON(seg); seg 233 arch/x86/pci/pcbios.c static int pci_bios_write(unsigned int seg, unsigned int bus, seg 241 arch/x86/pci/pcbios.c WARN_ON(seg); seg 360 arch/x86/pci/xen.c restore_ext.seg = pci_domain_nr(dev->bus); seg 73 arch/x86/realmode/init.c u16 *seg = (u16 *) (base + *rel++); seg 74 arch/x86/realmode/init.c *seg = real_mode_seg; seg 12 arch/x86/um/asm/segment.h unsigned long seg; seg 136 arch/xtensa/include/asm/processor.h unsigned long seg; seg 38 arch/xtensa/include/asm/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 44 drivers/acpi/pci_mcfg.c #define AL_ECAM(table_id, rev, seg, ops) \ seg 45 drivers/acpi/pci_mcfg.c { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } seg 56 drivers/acpi/pci_mcfg.c #define QCOM_ECAM32(seg) \ seg 57 drivers/acpi/pci_mcfg.c { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } seg 68 drivers/acpi/pci_mcfg.c #define HISI_QUAD_DOM(table_id, seg, ops) \ seg 69 drivers/acpi/pci_mcfg.c { "HISI ", table_id, 0, (seg) + 0, MCFG_BUS_ANY, ops }, \ seg 70 drivers/acpi/pci_mcfg.c { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \ seg 71 drivers/acpi/pci_mcfg.c { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \ seg 72 drivers/acpi/pci_mcfg.c { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops } seg 98 drivers/acpi/pci_mcfg.c #define THUNDER_ECAM_QUIRK(rev, seg) \ seg 99 drivers/acpi/pci_mcfg.c { "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \ seg 119 drivers/acpi/pci_mcfg.c #define XGENE_V1_ECAM_MCFG(rev, seg) \ seg 120 drivers/acpi/pci_mcfg.c {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ seg 123 drivers/acpi/pci_mcfg.c #define XGENE_V2_ECAM_MCFG(rev, seg) \ seg 124 drivers/acpi/pci_mcfg.c {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ seg 198 drivers/acpi/pci_mcfg.c u16 seg = root->segment; seg 210 drivers/acpi/pci_mcfg.c if (e->segment == seg && e->bus_start <= bus_res->start && seg 957 drivers/block/xen-blkback/blkback.c struct seg_buf seg[], seg 996 drivers/block/xen-blkback/blkback.c seg[n].nsec = last_sect - first_sect + 1; seg 997 drivers/block/xen-blkback/blkback.c seg[n].offset = first_sect << 9; seg 998 drivers/block/xen-blkback/blkback.c preq->nr_sects += seg[n].nsec; seg 1229 drivers/block/xen-blkback/blkback.c struct seg_buf *seg = pending_req->seg; seg 1301 drivers/block/xen-blkback/blkback.c pages[i]->gref = req->u.rw.seg[i].gref; seg 1302 drivers/block/xen-blkback/blkback.c seg[i].nsec = req->u.rw.seg[i].last_sect - seg 1303 drivers/block/xen-blkback/blkback.c req->u.rw.seg[i].first_sect + 1; seg 1304 drivers/block/xen-blkback/blkback.c seg[i].offset = (req->u.rw.seg[i].first_sect << 9); seg 1305 drivers/block/xen-blkback/blkback.c if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || seg 1306 drivers/block/xen-blkback/blkback.c (req->u.rw.seg[i].last_sect < seg 1307 drivers/block/xen-blkback/blkback.c req->u.rw.seg[i].first_sect)) seg 1309 drivers/block/xen-blkback/blkback.c preq.nr_sects += seg[i].nsec; seg 1314 drivers/block/xen-blkback/blkback.c if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq)) seg 1332 drivers/block/xen-blkback/blkback.c if (((int)preq.sector_number|(int)seg[i].nsec) & seg 1366 drivers/block/xen-blkback/blkback.c seg[i].nsec << 9, seg 1367 drivers/block/xen-blkback/blkback.c seg[i].offset) == 0)) { seg 1382 drivers/block/xen-blkback/blkback.c preq.sector_number += seg[i].nsec; seg 86 drivers/block/xen-blkback/common.h struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; seg 139 drivers/block/xen-blkback/common.h struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; seg 353 drivers/block/xen-blkback/common.h struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; seg 413 drivers/block/xen-blkback/common.h dst->u.rw.seg[i] = src->u.rw.seg[i]; seg 461 drivers/block/xen-blkback/common.h dst->u.rw.seg[i] = src->u.rw.seg[i]; seg 657 drivers/block/xen-blkfront.c ring_req->u.rw.seg[grant_idx] = seg 96 drivers/char/agp/compat_ioctl.c int seg; seg 122 drivers/char/agp/compat_ioctl.c for (seg = 0; seg < ureserve.seg_count; seg++) { seg 123 drivers/char/agp/compat_ioctl.c ksegment[seg].pg_start = usegment[seg].pg_start; seg 124 drivers/char/agp/compat_ioctl.c ksegment[seg].pg_count = usegment[seg].pg_count; seg 125 drivers/char/agp/compat_ioctl.c ksegment[seg].prot = usegment[seg].prot; seg 104 drivers/char/agp/frontend.c struct agp_segment_priv *seg; seg 111 drivers/char/agp/frontend.c seg = *(client->segments); seg 115 drivers/char/agp/frontend.c if ((seg[i].pg_start == pg_start) && seg 116 drivers/char/agp/frontend.c (seg[i].pg_count == pg_count) && seg 117 drivers/char/agp/frontend.c (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) { seg 118 drivers/char/agp/frontend.c return seg + i; seg 141 drivers/char/agp/frontend.c struct agp_segment_priv ** seg, int num_segments) seg 150 drivers/char/agp/frontend.c DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client); seg 152 drivers/char/agp/frontend.c client->segments = seg; seg 166 drivers/char/agp/frontend.c struct agp_segment_priv *seg; seg 170 drivers/char/agp/frontend.c seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL); seg 171 drivers/char/agp/frontend.c if (seg == NULL) { seg 179 drivers/char/agp/frontend.c seg[i].pg_start = user_seg[i].pg_start; seg 180 drivers/char/agp/frontend.c seg[i].pg_count = user_seg[i].pg_count; seg 181 drivers/char/agp/frontend.c seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot); seg 188 drivers/char/agp/frontend.c kfree(seg); seg 191 drivers/char/agp/frontend.c *ret_seg = seg; seg 72 drivers/edac/i10nm_base.c d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1); seg 76 drivers/edac/i10nm_base.c d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1); seg 90 drivers/edac/i10nm_base.c mdev = pci_get_dev_wrapper(d->seg, d->bus[0], seg 354 drivers/edac/sb_edac.c int seg; seg 732 drivers/edac/sb_edac.c static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom, seg 751 drivers/edac/sb_edac.c if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) && seg 759 drivers/edac/sb_edac.c static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom, seg 776 drivers/edac/sb_edac.c sbridge_dev->seg = seg; seg 2338 drivers/edac/sb_edac.c int seg = 0; seg 2369 drivers/edac/sb_edac.c seg = pci_domain_nr(pdev->bus); seg 2373 drivers/edac/sb_edac.c sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom, seg 2387 drivers/edac/sb_edac.c sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table); seg 41 drivers/edac/skx_base.c if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number) seg 213 drivers/edac/skx_common.c d->seg = pci_domain_nr(pdev->bus); seg 217 drivers/edac/skx_common.c d->seg = GET_BITFIELD(reg, 16, 23); seg 53 drivers/edac/skx_common.h int seg; seg 48 drivers/firmware/google/memconsole-coreboot.c } seg[2] = { {0}, {0} }; seg 55 drivers/firmware/google/memconsole-coreboot.c seg[0] = (struct seg){.phys = cursor, .len = size - cursor}; seg 56 drivers/firmware/google/memconsole-coreboot.c seg[1] = (struct seg){.phys = 0, .len = cursor}; seg 58 drivers/firmware/google/memconsole-coreboot.c seg[0] = (struct seg){.phys = 0, .len = min(cursor, size)}; seg 61 drivers/firmware/google/memconsole-coreboot.c for (i = 0; i < ARRAY_SIZE(seg) && count > done; i++) { seg 63 drivers/firmware/google/memconsole-coreboot.c cbmem_console->body + seg[i].phys, seg[i].len); seg 64 drivers/firmware/google/memconsole-coreboot.c pos -= seg[i].len; seg 5755 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c uint32_t seg; seg 6127 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c [gfx_ras_edc_regs[i].seg] + seg 224 drivers/gpu/drm/amd/amdgpu/nv.c (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) seg 412 drivers/gpu/drm/amd/amdgpu/soc15.c if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] seg 48 drivers/gpu/drm/amd/amdgpu/soc15.h uint32_t seg; seg 58 drivers/gpu/drm/amd/amdgpu/soc15.h uint32_t seg; seg 65 drivers/gpu/drm/amd/amdgpu/soc15.h #define SOC15_REG_ENTRY_OFFSET(entry) (adev->reg_offset[entry.hwip][entry.inst][entry.seg] + entry.reg_offset) seg 47 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg seg 49 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c #define BASE(seg) BASE_INNER(seg) seg 55 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c #define CLK_BASE_INNER(seg) \ seg 56 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c CLK_BASE__INST0_SEG ## seg seg 124 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c #define BASE_INNER(seg) \ seg 125 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c DCE_BASE__INST0_SEG ## seg seg 127 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c #define NBIO_BASE_INNER(seg) \ seg 128 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c NBIF_BASE__INST0_SEG ## seg seg 130 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c #define NBIO_BASE(seg) \ seg 131 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c NBIO_BASE_INNER(seg) seg 134 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c #define BASE(seg) \ seg 135 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c BASE_INNER(seg) seg 146 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c #define MMHUB_BASE_INNER(seg) \ seg 147 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c MMHUB_BASE__INST0_SEG ## seg seg 149 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c #define MMHUB_BASE(seg) \ seg 150 drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c MMHUB_BASE_INNER(seg) seg 30 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h #define BASE_INNER(seg) \ seg 31 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h DCE_BASE__INST0_SEG ## seg seg 33 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h #define BASE(seg) \ seg 34 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h BASE_INNER(seg) seg 163 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c #define BASE_INNER(seg) \ seg 164 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c DCE_BASE__INST0_SEG ## seg seg 166 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c #define BASE(seg) \ seg 167 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c BASE_INNER(seg) seg 183 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c #define NBIO_BASE_INNER(seg) \ seg 184 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c NBIF_BASE__INST0_SEG ## seg seg 186 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c #define NBIO_BASE(seg) \ seg 187 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c NBIO_BASE_INNER(seg) seg 194 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c #define MMHUB_BASE_INNER(seg) \ seg 195 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c MMHUB_BASE__INST0_SEG ## seg seg 197 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c #define MMHUB_BASE(seg) \ seg 198 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c MMHUB_BASE_INNER(seg) seg 31 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h #define BASE_INNER(seg) \ seg 32 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h DCE_BASE__INST0_SEG ## seg seg 34 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h #define BASE(seg) \ seg 35 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h BASE_INNER(seg) seg 33 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h #define BASE_INNER(seg) \ seg 34 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h DCE_BASE__INST0_SEG ## seg seg 36 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h #define BASE(seg) \ seg 37 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h BASE_INNER(seg) seg 419 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg seg 421 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define BASE(seg) BASE_INNER(seg) seg 444 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define NBIO_BASE_INNER(seg) \ seg 445 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c NBIO_BASE__INST0_SEG ## seg seg 447 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define NBIO_BASE(seg) \ seg 448 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c NBIO_BASE_INNER(seg) seg 455 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define MMHUB_BASE_INNER(seg) \ seg 456 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c MMHUB_BASE__INST0_SEG ## seg seg 458 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c #define MMHUB_BASE(seg) \ seg 459 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c MMHUB_BASE_INNER(seg) seg 31 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h #define BASE_INNER(seg) \ seg 32 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h DCE_BASE__INST0_SEG ## seg seg 34 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h #define BASE(seg) \ seg 35 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h BASE_INNER(seg) seg 282 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg seg 284 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define BASE(seg) BASE_INNER(seg) seg 307 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define NBIO_BASE_INNER(seg) \ seg 308 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c NBIF0_BASE__INST0_SEG ## seg seg 310 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define NBIO_BASE(seg) \ seg 311 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c NBIO_BASE_INNER(seg) seg 318 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define MMHUB_BASE_INNER(seg) \ seg 319 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c MMHUB_BASE__INST0_SEG ## seg seg 321 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c #define MMHUB_BASE(seg) \ seg 322 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c MMHUB_BASE_INNER(seg) seg 53 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c #define BASE_INNER(seg) \ seg 54 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c DCE_BASE__INST0_SEG ## seg seg 57 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c #define BASE(seg) \ seg 58 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c BASE_INNER(seg) seg 44 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c #define BASE_INNER(seg) \ seg 45 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c DCE_BASE__INST0_SEG ## seg seg 48 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c #define BASE(seg) \ seg 49 drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c BASE_INNER(seg) seg 50 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c #define BASE_INNER(seg) \ seg 51 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c DCE_BASE__INST0_SEG ## seg seg 54 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c #define BASE(seg) \ seg 55 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c BASE_INNER(seg) seg 44 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c #define BASE_INNER(seg) \ seg 45 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c DCE_BASE__INST0_SEG ## seg seg 48 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c #define BASE(seg) \ seg 49 drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c BASE_INNER(seg) seg 54 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg seg 56 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c #define BASE(seg) BASE_INNER(seg) seg 51 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg seg 53 drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c #define BASE(seg) BASE_INNER(seg) seg 52 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg seg 54 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c #define BASE(seg) BASE_INNER(seg) seg 51 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg seg 53 drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c #define BASE(seg) BASE_INNER(seg) seg 94 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c #define BASE_INNER(seg) \ seg 95 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c DCE_BASE__INST0_SEG ## seg seg 97 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c #define BASE(seg) \ seg 98 drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c BASE_INNER(seg) seg 175 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c #define BASE_INNER(seg) \ seg 176 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c DCE_BASE__INST0_SEG ## seg seg 178 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c #define BASE(seg) \ seg 179 drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c BASE_INNER(seg) seg 176 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg seg 179 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c #define BASE(seg) \ seg 180 drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c BASE_INNER(seg) seg 172 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg seg 175 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c #define BASE(seg) \ seg 176 drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c BASE_INNER(seg) seg 93 drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c reg = adev->reg_offset[entry[i].hwip][entry[i].inst][entry[i].seg] seg 40 drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h uint32_t seg; seg 51 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c u8 seg = 0x00, off = 0x00, tmp; seg 62 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c nvkm_wri2cr(adap, bus->addr, 0x41, seg); seg 82 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c seg = msg->buf[0]; seg 116 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN4I_HDMI_DDC_ADDR_SEGMENT(seg) (((seg) & 0xff) << 24) seg 173 drivers/gpu/drm/sun4i/sun4i_hdmi.h #define SUN6I_HDMI_DDC_ADDR_SEGMENT(seg) (((seg) & 0xff) << 24) seg 1009 drivers/infiniband/core/mad.c struct ib_rmpp_segment *seg = NULL; seg 1019 drivers/infiniband/core/mad.c seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); seg 1020 drivers/infiniband/core/mad.c if (!seg) { seg 1024 drivers/infiniband/core/mad.c seg->num = ++send_buf->seg_count; seg 1025 drivers/infiniband/core/mad.c list_add_tail(&seg->list, &send_wr->rmpp_list); seg 1030 drivers/infiniband/core/mad.c memset(seg->data + seg_size - pad, 0, pad); seg 378 drivers/infiniband/core/mad_rmpp.c static inline int get_last_flag(struct ib_mad_recv_buf *seg) seg 382 drivers/infiniband/core/mad_rmpp.c rmpp_mad = (struct ib_rmpp_mad *) seg->mad; seg 386 drivers/infiniband/core/mad_rmpp.c static inline int get_seg_num(struct ib_mad_recv_buf *seg) seg 390 drivers/infiniband/core/mad_rmpp.c rmpp_mad = (struct ib_rmpp_mad *) seg->mad; seg 395 drivers/infiniband/core/mad_rmpp.c struct ib_mad_recv_buf *seg) seg 397 drivers/infiniband/core/mad_rmpp.c if (seg->list.next == rmpp_list) seg 400 drivers/infiniband/core/mad_rmpp.c return container_of(seg->list.next, struct ib_mad_recv_buf, list); seg 420 drivers/infiniband/core/user_mad.c int left, seg; seg 429 drivers/infiniband/core/user_mad.c for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; seg 430 drivers/infiniband/core/user_mad.c seg++, left -= msg->seg_size, buf += msg->seg_size) { seg 431 drivers/infiniband/core/user_mad.c if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf, seg 70 drivers/infiniband/hw/hns/hns_roce_mr.c unsigned long *seg) seg 80 drivers/infiniband/hw/hns/hns_roce_mr.c *seg = find_first_bit(buddy->bits[o], m); seg 81 drivers/infiniband/hw/hns/hns_roce_mr.c if (*seg < m) seg 89 drivers/infiniband/hw/hns/hns_roce_mr.c clear_bit(*seg, buddy->bits[o]); seg 94 drivers/infiniband/hw/hns/hns_roce_mr.c *seg <<= 1; seg 95 drivers/infiniband/hw/hns/hns_roce_mr.c set_bit(*seg ^ 1, buddy->bits[o]); seg 101 drivers/infiniband/hw/hns/hns_roce_mr.c *seg <<= order; seg 105 drivers/infiniband/hw/hns/hns_roce_mr.c static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg, seg 108 drivers/infiniband/hw/hns/hns_roce_mr.c seg >>= order; seg 112 drivers/infiniband/hw/hns/hns_roce_mr.c while (test_bit(seg ^ 1, buddy->bits[order])) { seg 113 drivers/infiniband/hw/hns/hns_roce_mr.c clear_bit(seg ^ 1, buddy->bits[order]); seg 115 drivers/infiniband/hw/hns/hns_roce_mr.c seg >>= 1; seg 119 drivers/infiniband/hw/hns/hns_roce_mr.c set_bit(seg, buddy->bits[order]); seg 178 drivers/infiniband/hw/hns/hns_roce_mr.c unsigned long *seg, u32 mtt_type) seg 208 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_buddy_alloc(buddy, order, seg); seg 212 drivers/infiniband/hw/hns/hns_roce_mr.c if (hns_roce_table_get_range(hr_dev, table, *seg, seg 213 drivers/infiniband/hw/hns/hns_roce_mr.c *seg + (1 << order) - 1)) { seg 214 drivers/infiniband/hw/hns/hns_roce_mr.c hns_roce_buddy_free(buddy, *seg, order); seg 4014 drivers/infiniband/hw/mlx5/qp.c static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, seg 4022 drivers/infiniband/hw/mlx5/qp.c *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx); seg 4032 drivers/infiniband/hw/mlx5/qp.c static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, seg 4035 drivers/infiniband/hw/mlx5/qp.c if (likely(*seg != *cur_edge)) seg 4038 drivers/infiniband/hw/mlx5/qp.c _handle_post_send_edge(sq, seg, wqe_sz, cur_edge); seg 4051 drivers/infiniband/hw/mlx5/qp.c void **seg, u32 *wqe_sz, const void *src, seg 4055 drivers/infiniband/hw/mlx5/qp.c size_t leftlen = *cur_edge - *seg; seg 4059 drivers/infiniband/hw/mlx5/qp.c memcpy(*seg, src, copysz); seg 4064 drivers/infiniband/hw/mlx5/qp.c *seg += stride; seg 4066 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(sq, seg, *wqe_sz, cur_edge); seg 4096 drivers/infiniband/hw/mlx5/qp.c void **seg, int *size, void **cur_edge) seg 4098 drivers/infiniband/hw/mlx5/qp.c struct mlx5_wqe_eth_seg *eseg = *seg; seg 4126 drivers/infiniband/hw/mlx5/qp.c *seg += stride; seg 4129 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4132 drivers/infiniband/hw/mlx5/qp.c memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata, seg 4139 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(struct mlx5_wqe_eth_seg); seg 4341 drivers/infiniband/hw/mlx5/qp.c static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, seg 4347 drivers/infiniband/hw/mlx5/qp.c memset(seg, 0, sizeof(*seg)); seg 4350 drivers/infiniband/hw/mlx5/qp.c seg->log2_page_size = ilog2(mr->ibmr.page_size); seg 4355 drivers/infiniband/hw/mlx5/qp.c seg->flags = get_umr_flags(access) | mr->access_mode; seg 4356 drivers/infiniband/hw/mlx5/qp.c seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); seg 4357 drivers/infiniband/hw/mlx5/qp.c seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); seg 4358 drivers/infiniband/hw/mlx5/qp.c seg->start_addr = cpu_to_be64(mr->ibmr.iova); seg 4359 drivers/infiniband/hw/mlx5/qp.c seg->len = cpu_to_be64(mr->ibmr.length); seg 4360 drivers/infiniband/hw/mlx5/qp.c seg->xlt_oct_size = cpu_to_be32(ndescs); seg 4363 drivers/infiniband/hw/mlx5/qp.c static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) seg 4365 drivers/infiniband/hw/mlx5/qp.c memset(seg, 0, sizeof(*seg)); seg 4366 drivers/infiniband/hw/mlx5/qp.c seg->status = MLX5_MKEY_STATUS_FREE; seg 4369 drivers/infiniband/hw/mlx5/qp.c static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, seg 4374 drivers/infiniband/hw/mlx5/qp.c memset(seg, 0, sizeof(*seg)); seg 4376 drivers/infiniband/hw/mlx5/qp.c seg->status = MLX5_MKEY_STATUS_FREE; seg 4378 drivers/infiniband/hw/mlx5/qp.c seg->flags = convert_access(umrwr->access_flags); seg 4380 drivers/infiniband/hw/mlx5/qp.c seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); seg 4383 drivers/infiniband/hw/mlx5/qp.c seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64); seg 4385 drivers/infiniband/hw/mlx5/qp.c seg->start_addr = cpu_to_be64(umrwr->virt_addr); seg 4386 drivers/infiniband/hw/mlx5/qp.c seg->len = cpu_to_be64(umrwr->length); seg 4387 drivers/infiniband/hw/mlx5/qp.c seg->log2_page_size = umrwr->page_shift; seg 4388 drivers/infiniband/hw/mlx5/qp.c seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | seg 4438 drivers/infiniband/hw/mlx5/qp.c struct mlx5_wqe_inline_seg *seg; seg 4443 drivers/infiniband/hw/mlx5/qp.c seg = *wqe; seg 4444 drivers/infiniband/hw/mlx5/qp.c *wqe += sizeof(*seg); seg 4445 drivers/infiniband/hw/mlx5/qp.c offset = sizeof(*seg); seg 4475 drivers/infiniband/hw/mlx5/qp.c seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); seg 4477 drivers/infiniband/hw/mlx5/qp.c *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16; seg 4592 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_qp *qp, void **seg, int *size, seg 4630 drivers/infiniband/hw/mlx5/qp.c struct mlx5_klm *data_klm = *seg; seg 4656 drivers/infiniband/hw/mlx5/qp.c sblock_ctrl = *seg; seg 4685 drivers/infiniband/hw/mlx5/qp.c *seg += wqe_size; seg 4687 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4689 drivers/infiniband/hw/mlx5/qp.c bsf = *seg; seg 4694 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(*bsf); seg 4696 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4701 drivers/infiniband/hw/mlx5/qp.c static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, seg 4708 drivers/infiniband/hw/mlx5/qp.c memset(seg, 0, sizeof(*seg)); seg 4710 drivers/infiniband/hw/mlx5/qp.c seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS; seg 4711 drivers/infiniband/hw/mlx5/qp.c seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); seg 4712 drivers/infiniband/hw/mlx5/qp.c seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | seg 4714 drivers/infiniband/hw/mlx5/qp.c seg->len = cpu_to_be64(length); seg 4715 drivers/infiniband/hw/mlx5/qp.c seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size)); seg 4716 drivers/infiniband/hw/mlx5/qp.c seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); seg 4731 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_qp *qp, void **seg, int *size, seg 4761 drivers/infiniband/hw/mlx5/qp.c set_sig_umr_segment(*seg, xlt_size); seg 4762 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); seg 4764 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4766 drivers/infiniband/hw/mlx5/qp.c set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len, seg 4768 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(struct mlx5_mkey_seg); seg 4770 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4772 drivers/infiniband/hw/mlx5/qp.c ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size, seg 4782 drivers/infiniband/hw/mlx5/qp.c u32 psv_idx, void **seg, int *size) seg 4784 drivers/infiniband/hw/mlx5/qp.c struct mlx5_seg_set_psv *psv_seg = *seg; seg 4802 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(*psv_seg); seg 4810 drivers/infiniband/hw/mlx5/qp.c void **seg, int *size, void **cur_edge, seg 4842 drivers/infiniband/hw/mlx5/qp.c set_reg_umr_seg(*seg, mr, flags, atomic); seg 4843 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); seg 4845 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4847 drivers/infiniband/hw/mlx5/qp.c set_reg_mkey_seg(*seg, mr, wr->key, wr->access); seg 4848 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(struct mlx5_mkey_seg); seg 4850 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4853 drivers/infiniband/hw/mlx5/qp.c memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs, seg 4857 drivers/infiniband/hw/mlx5/qp.c set_reg_data_seg(*seg, mr, pd); seg 4858 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(struct mlx5_wqe_data_seg); seg 4864 drivers/infiniband/hw/mlx5/qp.c static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size, seg 4867 drivers/infiniband/hw/mlx5/qp.c set_linv_umr_seg(*seg); seg 4868 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); seg 4870 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4871 drivers/infiniband/hw/mlx5/qp.c set_linv_mkey_seg(*seg); seg 4872 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(struct mlx5_mkey_seg); seg 4874 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); seg 4896 drivers/infiniband/hw/mlx5/qp.c static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg, seg 4906 drivers/infiniband/hw/mlx5/qp.c *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx); seg 4907 drivers/infiniband/hw/mlx5/qp.c *ctrl = *seg; seg 4908 drivers/infiniband/hw/mlx5/qp.c *(uint32_t *)(*seg + 8) = 0; seg 4914 drivers/infiniband/hw/mlx5/qp.c *seg += sizeof(**ctrl); seg 4921 drivers/infiniband/hw/mlx5/qp.c static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, seg 4926 drivers/infiniband/hw/mlx5/qp.c return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, seg 4933 drivers/infiniband/hw/mlx5/qp.c void *seg, u8 size, void *cur_edge, seg 4955 drivers/infiniband/hw/mlx5/qp.c seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB); seg 4956 drivers/infiniband/hw/mlx5/qp.c qp->sq.cur_edge = (unlikely(seg == cur_edge)) ? seg 4982 drivers/infiniband/hw/mlx5/qp.c void *seg; seg 5018 drivers/infiniband/hw/mlx5/qp.c err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge, seg 5044 drivers/infiniband/hw/mlx5/qp.c xrc = seg; seg 5045 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(*xrc); seg 5053 drivers/infiniband/hw/mlx5/qp.c set_raddr_seg(seg, rdma_wr(wr)->remote_addr, seg 5055 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(struct mlx5_wqe_raddr_seg); seg 5070 drivers/infiniband/hw/mlx5/qp.c set_linv_wr(qp, &seg, &size, &cur_edge); seg 5077 drivers/infiniband/hw/mlx5/qp.c err = set_reg_wr(qp, reg_wr(wr), &seg, &size, seg 5102 drivers/infiniband/hw/mlx5/qp.c err = set_reg_wr(qp, ®_pi_wr, &seg, seg 5109 drivers/infiniband/hw/mlx5/qp.c finish_wqe(qp, ctrl, seg, size, seg 5114 drivers/infiniband/hw/mlx5/qp.c err = begin_wqe(qp, &seg, &ctrl, wr, seg 5146 drivers/infiniband/hw/mlx5/qp.c err = set_pi_umr_wr(wr, qp, &seg, &size, seg 5153 drivers/infiniband/hw/mlx5/qp.c finish_wqe(qp, ctrl, seg, size, cur_edge, idx, seg 5162 drivers/infiniband/hw/mlx5/qp.c err = __begin_wqe(qp, &seg, &ctrl, wr, &idx, seg 5173 drivers/infiniband/hw/mlx5/qp.c &seg, &size); seg 5179 drivers/infiniband/hw/mlx5/qp.c finish_wqe(qp, ctrl, seg, size, cur_edge, idx, seg 5183 drivers/infiniband/hw/mlx5/qp.c err = __begin_wqe(qp, &seg, &ctrl, wr, &idx, seg 5194 drivers/infiniband/hw/mlx5/qp.c &seg, &size); seg 5200 drivers/infiniband/hw/mlx5/qp.c finish_wqe(qp, ctrl, seg, size, cur_edge, idx, seg 5218 drivers/infiniband/hw/mlx5/qp.c set_raddr_seg(seg, rdma_wr(wr)->remote_addr, seg 5220 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(struct mlx5_wqe_raddr_seg); seg 5238 drivers/infiniband/hw/mlx5/qp.c set_datagram_seg(seg, wr); seg 5239 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(struct mlx5_wqe_datagram_seg); seg 5241 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); seg 5245 drivers/infiniband/hw/mlx5/qp.c set_datagram_seg(seg, wr); seg 5246 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(struct mlx5_wqe_datagram_seg); seg 5248 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); seg 5254 drivers/infiniband/hw/mlx5/qp.c pad = seg; seg 5256 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(struct mlx5_wqe_eth_pad); seg 5258 drivers/infiniband/hw/mlx5/qp.c set_eth_seg(wr, qp, &seg, &size, &cur_edge); seg 5259 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, seg 5271 drivers/infiniband/hw/mlx5/qp.c err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic))); seg 5274 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); seg 5276 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); seg 5277 drivers/infiniband/hw/mlx5/qp.c set_reg_mkey_segment(seg, wr); seg 5278 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(struct mlx5_mkey_seg); seg 5280 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); seg 5288 drivers/infiniband/hw/mlx5/qp.c err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge); seg 5296 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, seg 5300 drivers/infiniband/hw/mlx5/qp.c ((struct mlx5_wqe_data_seg *)seg, seg 5303 drivers/infiniband/hw/mlx5/qp.c seg += sizeof(struct mlx5_wqe_data_seg); seg 5309 drivers/infiniband/hw/mlx5/qp.c finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq, seg 88 drivers/infiniband/hw/mthca/mthca_mr.c u32 seg; seg 95 drivers/infiniband/hw/mthca/mthca_mr.c seg = find_first_bit(buddy->bits[o], m); seg 96 drivers/infiniband/hw/mthca/mthca_mr.c if (seg < m) seg 104 drivers/infiniband/hw/mthca/mthca_mr.c clear_bit(seg, buddy->bits[o]); seg 109 drivers/infiniband/hw/mthca/mthca_mr.c seg <<= 1; seg 110 drivers/infiniband/hw/mthca/mthca_mr.c set_bit(seg ^ 1, buddy->bits[o]); seg 116 drivers/infiniband/hw/mthca/mthca_mr.c seg <<= order; seg 118 drivers/infiniband/hw/mthca/mthca_mr.c return seg; seg 121 drivers/infiniband/hw/mthca/mthca_mr.c static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) seg 123 drivers/infiniband/hw/mthca/mthca_mr.c seg >>= order; seg 127 drivers/infiniband/hw/mthca/mthca_mr.c while (test_bit(seg ^ 1, buddy->bits[order])) { seg 128 drivers/infiniband/hw/mthca/mthca_mr.c clear_bit(seg ^ 1, buddy->bits[order]); seg 130 drivers/infiniband/hw/mthca/mthca_mr.c seg >>= 1; seg 134 drivers/infiniband/hw/mthca/mthca_mr.c set_bit(seg, buddy->bits[order]); seg 193 drivers/infiniband/hw/mthca/mthca_mr.c u32 seg = mthca_buddy_alloc(buddy, order); seg 195 drivers/infiniband/hw/mthca/mthca_mr.c if (seg == -1) seg 199 drivers/infiniband/hw/mthca/mthca_mr.c if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg, seg 200 drivers/infiniband/hw/mthca/mthca_mr.c seg + (1 << order) - 1)) { seg 201 drivers/infiniband/hw/mthca/mthca_mr.c mthca_buddy_free(buddy, seg, order); seg 202 drivers/infiniband/hw/mthca/mthca_mr.c seg = -1; seg 205 drivers/infiniband/hw/mthca/mthca_mr.c return seg; seg 432 drivers/infiniband/sw/siw/siw_qp_tx.c int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv; seg 450 drivers/infiniband/sw/siw/siw_qp_tx.c seg = 1; seg 472 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_base = seg 474 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_len = sge_len; seg 478 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_base, seg 482 drivers/infiniband/sw/siw/siw_qp_tx.c seg++; seg 505 drivers/infiniband/sw/siw/siw_qp_tx.c page_array[seg] = p; seg 508 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_base = kmap(p) + fp_off; seg 509 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_len = plen; seg 512 drivers/infiniband/sw/siw/siw_qp_tx.c kmap_mask |= BIT(seg); seg 517 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_base, seg 528 drivers/infiniband/sw/siw/siw_qp_tx.c page_array[seg] = virt_to_page(va & PAGE_MASK); seg 541 drivers/infiniband/sw/siw/siw_qp_tx.c if (++seg > (int)MAX_ARRAY) { seg 560 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_base = &c_tx->trailer.pad[4 - c_tx->pad]; seg 561 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_len = trl_len = MAX_TRAILER - (4 - c_tx->pad); seg 563 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_base = &c_tx->trailer.pad[c_tx->ctrl_sent]; seg 564 drivers/infiniband/sw/siw/siw_qp_tx.c iov[seg].iov_len = trl_len = MAX_TRAILER - c_tx->ctrl_sent; seg 585 drivers/infiniband/sw/siw/siw_qp_tx.c rv = kernel_sendmsg(s, &msg, &iov[seg], 1, trl_len); seg 592 drivers/infiniband/sw/siw/siw_qp_tx.c rv = kernel_sendmsg(s, &msg, iov, seg + 1, seg 160 drivers/iommu/dmar.c info->seg = pci_domain_nr(dev->bus); seg 228 drivers/iommu/dmar.c if (segment != info->seg) seg 281 drivers/iommu/dmar.c if (info->seg != segment) seg 48 drivers/isdn/mISDN/dsp_audio.c int seg; seg 65 drivers/isdn/mISDN/dsp_audio.c for (seg = 0; seg < 8; seg++) { seg 66 drivers/isdn/mISDN/dsp_audio.c if (pcm_val <= seg_end[seg]) seg 70 drivers/isdn/mISDN/dsp_audio.c return ((seg << 4) | seg 71 drivers/isdn/mISDN/dsp_audio.c ((pcm_val >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^ mask; seg 78 drivers/isdn/mISDN/dsp_audio.c int seg; seg 82 drivers/isdn/mISDN/dsp_audio.c seg = (((int) alaw & 0x70) >> 4); seg 83 drivers/isdn/mISDN/dsp_audio.c if (seg) seg 84 drivers/isdn/mISDN/dsp_audio.c i = (i + 0x100) << (seg - 1); seg 84 drivers/net/ethernet/google/gve/gve.h struct gve_tx_seg_desc seg; /* subsequent descs for a packet */ seg 382 drivers/net/ethernet/google/gve/gve_tx.c seg_desc->seg.type_flags = GVE_TXD_SEG; seg 385 drivers/net/ethernet/google/gve/gve_tx.c seg_desc->seg.type_flags |= GVE_TXSF_IPV6; seg 386 drivers/net/ethernet/google/gve/gve_tx.c seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1; seg 387 drivers/net/ethernet/google/gve/gve_tx.c seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); seg 389 drivers/net/ethernet/google/gve/gve_tx.c seg_desc->seg.seg_len = cpu_to_be16(len); seg 390 drivers/net/ethernet/google/gve/gve_tx.c seg_desc->seg.seg_addr = cpu_to_be64(addr); seg 277 drivers/net/ethernet/intel/ice/ice_flex_pipe.c struct ice_generic_seg_hdr *seg; seg 279 drivers/net/ethernet/intel/ice/ice_flex_pipe.c seg = (struct ice_generic_seg_hdr *) seg 282 drivers/net/ethernet/intel/ice/ice_flex_pipe.c if (le32_to_cpu(seg->seg_type) == seg_type) seg 283 drivers/net/ethernet/intel/ice/ice_flex_pipe.c return seg; seg 564 drivers/net/ethernet/intel/ice/ice_flex_pipe.c struct ice_generic_seg_hdr *seg; seg 567 drivers/net/ethernet/intel/ice/ice_flex_pipe.c if (len < off + sizeof(*seg)) seg 570 drivers/net/ethernet/intel/ice/ice_flex_pipe.c seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); seg 573 drivers/net/ethernet/intel/ice/ice_flex_pipe.c if (len < off + le32_to_cpu(seg->seg_size)) seg 594 drivers/net/ethernet/intel/ice/ice_flex_pipe.c hw->seg = NULL; seg 659 drivers/net/ethernet/intel/ice/ice_flex_pipe.c struct ice_seg *seg; seg 685 drivers/net/ethernet/intel/ice/ice_flex_pipe.c seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg); seg 686 drivers/net/ethernet/intel/ice/ice_flex_pipe.c if (!seg) { seg 692 drivers/net/ethernet/intel/ice/ice_flex_pipe.c status = ice_download_pkg(hw, seg); seg 709 drivers/net/ethernet/intel/ice/ice_flex_pipe.c hw->seg = seg; seg 1249 drivers/net/ethernet/intel/ice/ice_flex_pipe.c if (!hw->seg) { seg 1256 drivers/net/ethernet/intel/ice/ice_flex_pipe.c sect = ice_pkg_enum_section(hw->seg, &state, sid); seg 490 drivers/net/ethernet/intel/ice/ice_type.h struct ice_seg *seg; seg 50 drivers/net/ethernet/mellanox/mlx4/mr.c u32 seg; seg 57 drivers/net/ethernet/mellanox/mlx4/mr.c seg = find_first_bit(buddy->bits[o], m); seg 58 drivers/net/ethernet/mellanox/mlx4/mr.c if (seg < m) seg 66 drivers/net/ethernet/mellanox/mlx4/mr.c clear_bit(seg, buddy->bits[o]); seg 71 drivers/net/ethernet/mellanox/mlx4/mr.c seg <<= 1; seg 72 drivers/net/ethernet/mellanox/mlx4/mr.c set_bit(seg ^ 1, buddy->bits[o]); seg 78 drivers/net/ethernet/mellanox/mlx4/mr.c seg <<= order; seg 80 drivers/net/ethernet/mellanox/mlx4/mr.c return seg; seg 83 drivers/net/ethernet/mellanox/mlx4/mr.c static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) seg 85 drivers/net/ethernet/mellanox/mlx4/mr.c seg >>= order; seg 89 drivers/net/ethernet/mellanox/mlx4/mr.c while (test_bit(seg ^ 1, buddy->bits[order])) { seg 90 drivers/net/ethernet/mellanox/mlx4/mr.c clear_bit(seg ^ 1, buddy->bits[order]); seg 92 drivers/net/ethernet/mellanox/mlx4/mr.c seg >>= 1; seg 96 drivers/net/ethernet/mellanox/mlx4/mr.c set_bit(seg, buddy->bits[order]); seg 153 drivers/net/ethernet/mellanox/mlx4/mr.c u32 seg; seg 159 drivers/net/ethernet/mellanox/mlx4/mr.c seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order); seg 160 drivers/net/ethernet/mellanox/mlx4/mr.c if (seg == -1) seg 163 drivers/net/ethernet/mellanox/mlx4/mr.c offset = seg * (1 << log_mtts_per_seg); seg 167 drivers/net/ethernet/mellanox/mlx4/mr.c mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order); seg 408 drivers/net/ethernet/qlogic/qed/qed_cxt.c u32 seg) seg 417 drivers/net/ethernet/qlogic/qed/qed_cxt.c if (p_cfg->conn_cfg[i].tid_seg[seg].count) seg 418 drivers/net/ethernet/qlogic/qed/qed_cxt.c return &p_cfg->conn_cfg[i].tid_seg[seg]; seg 486 drivers/net/ethernet/qlogic/qed/qed_cxt.c u8 seg, seg 490 drivers/net/ethernet/qlogic/qed/qed_cxt.c struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; seg 2203 drivers/net/ethernet/qlogic/qed/qed_cxt.c u32 proto, seg, total_lines, i, shadow_line; seg 2212 drivers/net/ethernet/qlogic/qed/qed_cxt.c seg = QED_CXT_FCOE_TID_SEG; seg 2216 drivers/net/ethernet/qlogic/qed/qed_cxt.c seg = QED_CXT_ISCSI_TID_SEG; seg 2226 drivers/net/ethernet/qlogic/qed/qed_cxt.c p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; seg 2230 drivers/net/ethernet/qlogic/qed/qed_cxt.c p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; seg 2510 drivers/net/ethernet/qlogic/qed/qed_cxt.c u32 proto, seg; seg 2516 drivers/net/ethernet/qlogic/qed/qed_cxt.c seg = QED_CXT_FCOE_TID_SEG; seg 2520 drivers/net/ethernet/qlogic/qed/qed_cxt.c seg = QED_CXT_ISCSI_TID_SEG; seg 2530 drivers/net/ethernet/qlogic/qed/qed_cxt.c p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; seg 2533 drivers/net/ethernet/qlogic/qed/qed_cxt.c p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)]; seg 2537 drivers/net/ethernet/qlogic/qed/qed_cxt.c p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; seg 2316 drivers/net/ethernet/qlogic/qla3xxx.c int seg_cnt, seg = 0; seg 2337 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); seg 2338 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len_set(&tx_cb->map[seg], maplen, len); seg 2339 drivers/net/ethernet/qlogic/qla3xxx.c seg++; seg 2349 drivers/net/ethernet/qlogic/qla3xxx.c completed_segs++, seg++) { seg 2357 drivers/net/ethernet/qlogic/qla3xxx.c if ((seg == 2 && seg_cnt > 3) || seg 2358 drivers/net/ethernet/qlogic/qla3xxx.c (seg == 7 && seg_cnt > 8) || seg 2359 drivers/net/ethernet/qlogic/qla3xxx.c (seg == 12 && seg_cnt > 13) || seg 2360 drivers/net/ethernet/qlogic/qla3xxx.c (seg == 17 && seg_cnt > 18)) { seg 2377 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); seg 2378 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len_set(&tx_cb->map[seg], maplen, seg 2382 drivers/net/ethernet/qlogic/qla3xxx.c seg++; seg 2399 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); seg 2400 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); seg 2412 drivers/net/ethernet/qlogic/qla3xxx.c seg = 1; seg 2415 drivers/net/ethernet/qlogic/qla3xxx.c for (i = 0; i < completed_segs; i++, seg++) { seg 2423 drivers/net/ethernet/qlogic/qla3xxx.c if ((seg == 2 && seg_cnt > 3) || seg 2424 drivers/net/ethernet/qlogic/qla3xxx.c (seg == 7 && seg_cnt > 8) || seg 2425 drivers/net/ethernet/qlogic/qla3xxx.c (seg == 12 && seg_cnt > 13) || seg 2426 drivers/net/ethernet/qlogic/qla3xxx.c (seg == 17 && seg_cnt > 18)) { seg 2428 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[seg], mapaddr), seg 2429 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len(&tx_cb->map[seg], maplen), seg 2432 drivers/net/ethernet/qlogic/qla3xxx.c seg++; seg 2436 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[seg], mapaddr), seg 2437 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len(&tx_cb->map[seg], maplen), seg 51 drivers/net/wireless/rsi/rsi_91x_usb.c u8 *seg = dev->tx_buffer; seg 55 drivers/net/wireless/rsi/rsi_91x_usb.c memset(seg, 0, len + RSI_USB_TX_HEAD_ROOM); seg 56 drivers/net/wireless/rsi/rsi_91x_usb.c memcpy(seg + RSI_USB_TX_HEAD_ROOM, buf, len); seg 61 drivers/net/wireless/rsi/rsi_91x_usb.c (void *)seg, seg 333 drivers/pci/controller/pci-thunder-pem.c static void thunder_pem_reserve_range(struct device *dev, int seg, seg 340 drivers/pci/controller/pci-thunder-pem.c regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg); seg 221 drivers/pci/pci.c int seg, bus, slot, func; seg 261 drivers/pci/pci.c ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot, seg 264 drivers/pci/pci.c seg = 0; seg 272 drivers/pci/pci.c ret = (seg == pci_domain_nr(dev->bus) && seg 204 drivers/pcmcia/omap_cf.c unsigned seg; seg 209 drivers/pcmcia/omap_cf.c seg = (int) pdev->dev.platform_data; seg 210 drivers/pcmcia/omap_cf.c if (seg == 0 || seg > 3) seg 234 drivers/pcmcia/omap_cf.c switch (seg) { seg 268 drivers/pcmcia/omap_cf.c omap_writew(~(1 << seg), CF_CFG); seg 270 drivers/pcmcia/omap_cf.c pr_info("%s: cs%d on irq %d\n", driver_name, seg, irq); seg 277 drivers/pcmcia/omap_cf.c seg, omap_readl(EMIFS_CCS(seg)), omap_readl(EMIFS_ACS(seg))); seg 278 drivers/pcmcia/omap_cf.c omap_writel(0x0004a1b3, EMIFS_CCS(seg)); /* synch mode 4 etc */ seg 279 drivers/pcmcia/omap_cf.c omap_writel(0x00000000, EMIFS_ACS(seg)); /* OE hold/setup */ seg 417 drivers/platform/mellanox/mlxbf-tmfifo.c u32 len, idx, seg; seg 424 drivers/platform/mellanox/mlxbf-tmfifo.c seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail, seg 426 drivers/platform/mellanox/mlxbf-tmfifo.c if (len <= seg) { seg 429 drivers/platform/mellanox/mlxbf-tmfifo.c memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg); seg 430 drivers/platform/mellanox/mlxbf-tmfifo.c addr += seg; seg 431 drivers/platform/mellanox/mlxbf-tmfifo.c memcpy(cons->tx_buf.buf, addr, len - seg); seg 500 drivers/platform/mellanox/mlxbf-tmfifo.c int size, seg; seg 533 drivers/platform/mellanox/mlxbf-tmfifo.c seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail, seg 535 drivers/platform/mellanox/mlxbf-tmfifo.c if (seg >= sizeof(u64)) { seg 538 drivers/platform/mellanox/mlxbf-tmfifo.c memcpy(&data, addr, seg); seg 539 drivers/platform/mellanox/mlxbf-tmfifo.c memcpy((u8 *)&data + seg, cons->tx_buf.buf, seg 540 drivers/platform/mellanox/mlxbf-tmfifo.c sizeof(u64) - seg); seg 524 drivers/scsi/megaraid.c u32 seg; seg 851 drivers/scsi/megaraid.c (u32 *)&mbox->m_out.xferaddr, &seg); seg 7778 drivers/scsi/qla2xxx/qla_init.c uint32_t risc_addr, risc_size, fwclen, wlen, *seg; seg 7820 drivers/scsi/qla2xxx/qla_init.c seg = blob->segs; seg 7821 drivers/scsi/qla2xxx/qla_init.c while (*seg && rval == QLA_SUCCESS) { seg 7822 drivers/scsi/qla2xxx/qla_init.c risc_addr = *seg; seg 7823 drivers/scsi/qla2xxx/qla_init.c *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr; seg 7863 drivers/scsi/qla2xxx/qla_init.c seg++; seg 88 drivers/scsi/xen-scsifront.c struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; seg 218 drivers/scsi/xen-scsifront.c ring_req->seg[i] = shadow->seg[i]; seg 404 drivers/scsi/xen-scsifront.c struct scsiif_request_segment *seg; seg 424 drivers/scsi/xen-scsifront.c seg = shadow->sg ? : shadow->seg; seg 436 drivers/scsi/xen-scsifront.c page = virt_to_page(seg); seg 437 drivers/scsi/xen-scsifront.c off = offset_in_page(seg); seg 449 drivers/scsi/xen-scsifront.c shadow->seg[ref_cnt].gref = ref; seg 450 drivers/scsi/xen-scsifront.c shadow->seg[ref_cnt].offset = (uint16_t)off; seg 451 drivers/scsi/xen-scsifront.c shadow->seg[ref_cnt].length = (uint16_t)bytes; seg 485 drivers/scsi/xen-scsifront.c seg->gref = ref; seg 486 drivers/scsi/xen-scsifront.c seg->offset = (uint16_t)off; seg 487 drivers/scsi/xen-scsifront.c seg->length = (uint16_t)bytes; seg 490 drivers/scsi/xen-scsifront.c seg++; seg 166 drivers/ssb/pcmcia.c int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg) seg 172 drivers/ssb/pcmcia.c WARN_ON((seg != 0) && (seg != 1)); seg 174 drivers/ssb/pcmcia.c err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_MEMSEG, seg); seg 180 drivers/ssb/pcmcia.c if (val == seg) seg 188 drivers/ssb/pcmcia.c bus->mapped_pcmcia_seg = seg; seg 59 drivers/ssb/ssb_private.h u8 seg); seg 75 drivers/ssb/ssb_private.h u8 seg) seg 138 drivers/staging/media/hantro/hantro_g1_vp8_dec.c const struct v4l2_vp8_segment_header *seg = &hdr->segment_header; seg 144 drivers/staging/media/hantro/hantro_g1_vp8_dec.c if (!(seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED)) { seg 146 drivers/staging/media/hantro/hantro_g1_vp8_dec.c } else if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE) { seg 148 drivers/staging/media/hantro/hantro_g1_vp8_dec.c u32 lf_level = clamp(lf->level + seg->lf_update[i], seg 156 drivers/staging/media/hantro/hantro_g1_vp8_dec.c seg->lf_update[i]); seg 181 drivers/staging/media/hantro/hantro_g1_vp8_dec.c const struct v4l2_vp8_segment_header *seg = &hdr->segment_header; seg 185 drivers/staging/media/hantro/hantro_g1_vp8_dec.c if (!(seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED)) { seg 187 drivers/staging/media/hantro/hantro_g1_vp8_dec.c } else if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE) { seg 189 drivers/staging/media/hantro/hantro_g1_vp8_dec.c u32 quant = clamp(q->y_ac_qi + seg->quant_update[i], seg 197 drivers/staging/media/hantro/hantro_g1_vp8_dec.c seg->quant_update[i]); seg 405 drivers/staging/media/hantro/hantro_g1_vp8_dec.c const struct v4l2_vp8_segment_header *seg = &hdr->segment_header; seg 419 drivers/staging/media/hantro/hantro_g1_vp8_dec.c if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED) { seg 421 drivers/staging/media/hantro/hantro_g1_vp8_dec.c if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP) seg 279 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c const struct v4l2_vp8_segment_header *seg = &hdr->segment_header; seg 285 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c if (!(seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED)) { seg 287 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c } else if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE) { seg 289 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c u32 lf_level = clamp(lf->level + seg->lf_update[i], seg 297 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c seg->lf_update[i]); seg 319 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c const struct v4l2_vp8_segment_header *seg = &hdr->segment_header; seg 323 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c if (!(seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED)) { seg 325 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c } else if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE) { seg 327 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c u32 quant = clamp(q->y_ac_qi + seg->quant_update[i], seg 335 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c seg->quant_update[i]); seg 483 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c const struct v4l2_vp8_segment_header *seg = &hdr->segment_header; seg 497 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED) { seg 499 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c if (seg->flags & V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP) seg 120 drivers/staging/wusbcore/wa-xfer.c static inline void wa_seg_init(struct wa_seg *seg) seg 122 drivers/staging/wusbcore/wa-xfer.c usb_init_urb(&seg->tr_urb); seg 125 drivers/staging/wusbcore/wa-xfer.c memset(((void *)seg) + sizeof(seg->tr_urb), 0, seg 126 drivers/staging/wusbcore/wa-xfer.c sizeof(*seg) - sizeof(seg->tr_urb)); seg 142 drivers/staging/wusbcore/wa-xfer.c struct wa_seg **seg; /* transfer segments */ seg 155 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg, int curr_iso_frame); seg 175 drivers/staging/wusbcore/wa-xfer.c if (xfer->seg) { seg 178 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = xfer->seg[cnt]; seg 179 drivers/staging/wusbcore/wa-xfer.c if (seg) { seg 180 drivers/staging/wusbcore/wa-xfer.c usb_free_urb(seg->isoc_pack_desc_urb); seg 181 drivers/staging/wusbcore/wa-xfer.c if (seg->dto_urb) { seg 182 drivers/staging/wusbcore/wa-xfer.c kfree(seg->dto_urb->sg); seg 183 drivers/staging/wusbcore/wa-xfer.c usb_free_urb(seg->dto_urb); seg 185 drivers/staging/wusbcore/wa-xfer.c usb_free_urb(&seg->tr_urb); seg 188 drivers/staging/wusbcore/wa-xfer.c kfree(xfer->seg); seg 327 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg; seg 336 drivers/staging/wusbcore/wa-xfer.c seg = xfer->seg[cnt]; seg 337 drivers/staging/wusbcore/wa-xfer.c switch (seg->status) { seg 339 drivers/staging/wusbcore/wa-xfer.c if (found_short && seg->result > 0) { seg 342 drivers/staging/wusbcore/wa-xfer.c seg->result); seg 346 drivers/staging/wusbcore/wa-xfer.c urb->actual_length += seg->result; seg 348 drivers/staging/wusbcore/wa-xfer.c && seg->result < xfer->seg_size seg 353 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg->index, found_short, seg 354 drivers/staging/wusbcore/wa-xfer.c seg->result, urb->actual_length); seg 357 drivers/staging/wusbcore/wa-xfer.c xfer->result = seg->result; seg 359 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg->index, seg->result, seg 360 drivers/staging/wusbcore/wa-xfer.c seg->result); seg 363 drivers/staging/wusbcore/wa-xfer.c xfer->result = seg->result; seg 365 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg->index, seg->result, seg 366 drivers/staging/wusbcore/wa-xfer.c seg->result); seg 370 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), cnt, seg->status); seg 389 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg, enum wa_seg_status status) seg 391 drivers/staging/wusbcore/wa-xfer.c seg->status = status; seg 456 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = xfer->seg[seg_index]; seg 458 drivers/staging/wusbcore/wa-xfer.c if ((seg->status == WA_SEG_DONE) || seg 459 drivers/staging/wusbcore/wa-xfer.c (seg->status == WA_SEG_ERROR)) { seg 658 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg) { seg 667 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_count)); seg 668 drivers/staging/wusbcore/wa-xfer.c for (frame_index = 0; frame_index < seg->isoc_frame_count; seg 670 drivers/staging/wusbcore/wa-xfer.c int offset_index = frame_index + seg->isoc_frame_offset; seg 684 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = xfer->seg[0]; seg 686 drivers/staging/wusbcore/wa-xfer.c xfer_hdr0 = &seg->xfer_hdr; seg 710 drivers/staging/wusbcore/wa-xfer.c xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count); seg 712 drivers/staging/wusbcore/wa-xfer.c __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); seg 734 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = urb->context; seg 735 drivers/staging/wusbcore/wa-xfer.c struct wa_xfer *xfer = seg->xfer; seg 755 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_index += seg->isoc_frame_count; seg 757 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_index += 1; seg 758 drivers/staging/wusbcore/wa-xfer.c if (seg->isoc_frame_index < seg->isoc_frame_count) { seg 765 drivers/staging/wusbcore/wa-xfer.c if ((seg->isoc_frame_index + 1) >= seg 766 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_count) seg 770 drivers/staging/wusbcore/wa-xfer.c wa_xfer_id(xfer), seg->index, seg->isoc_frame_index, seg 778 drivers/staging/wusbcore/wa-xfer.c seg->result += urb->actual_length; seg 781 drivers/staging/wusbcore/wa-xfer.c wa_xfer_id(xfer), seg->index, seg->result); seg 782 drivers/staging/wusbcore/wa-xfer.c if (seg->status < WA_SEG_PENDING) seg 783 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_PENDING; seg 790 drivers/staging/wusbcore/wa-xfer.c __wa_populate_dto_urb_isoc(xfer, seg, seg 791 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_offset + seg->isoc_frame_index); seg 796 drivers/staging/wusbcore/wa-xfer.c result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); seg 799 drivers/staging/wusbcore/wa-xfer.c wa_xfer_id(xfer), seg->index, result); seg 819 drivers/staging/wusbcore/wa-xfer.c wa_xfer_id(xfer), seg->index, urb->status); seg 838 drivers/staging/wusbcore/wa-xfer.c if (seg->status != WA_SEG_ERROR) { seg 839 drivers/staging/wusbcore/wa-xfer.c seg->result = urb->status; seg 842 drivers/staging/wusbcore/wa-xfer.c done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); seg 871 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = urb->context; seg 872 drivers/staging/wusbcore/wa-xfer.c struct wa_xfer *xfer = seg->xfer; seg 886 drivers/staging/wusbcore/wa-xfer.c wa_xfer_id(xfer), seg->index); seg 887 drivers/staging/wusbcore/wa-xfer.c if (xfer->is_inbound && seg->status < WA_SEG_PENDING) seg 888 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_PENDING; seg 900 drivers/staging/wusbcore/wa-xfer.c wa_xfer_id(xfer), seg->index, urb->status); seg 906 drivers/staging/wusbcore/wa-xfer.c if (seg->status != WA_SEG_ERROR) { seg 907 drivers/staging/wusbcore/wa-xfer.c usb_unlink_urb(seg->dto_urb); seg 908 drivers/staging/wusbcore/wa-xfer.c seg->result = urb->status; seg 911 drivers/staging/wusbcore/wa-xfer.c done = __wa_xfer_mark_seg_as_done(xfer, seg, seg 944 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = urb->context; seg 945 drivers/staging/wusbcore/wa-xfer.c struct wa_xfer *xfer = seg->xfer; seg 959 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg->index); seg 961 drivers/staging/wusbcore/wa-xfer.c seg->status < WA_SEG_PENDING && seg 963 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_PENDING; seg 976 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg->index, seg 984 drivers/staging/wusbcore/wa-xfer.c usb_unlink_urb(seg->isoc_pack_desc_urb); seg 985 drivers/staging/wusbcore/wa-xfer.c usb_unlink_urb(seg->dto_urb); seg 986 drivers/staging/wusbcore/wa-xfer.c seg->result = urb->status; seg 989 drivers/staging/wusbcore/wa-xfer.c done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); seg 1086 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg, int curr_iso_frame) seg 1088 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; seg 1089 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->sg = NULL; seg 1090 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->num_sgs = 0; seg 1092 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_dma = xfer->urb->transfer_dma + seg 1096 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_buffer_length = seg->isoc_size; seg 1098 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_buffer_length = seg 1106 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size) seg 1111 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_dma = seg 1113 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; seg 1114 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->sg = NULL; seg 1115 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->num_sgs = 0; seg 1118 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_flags &= seg 1121 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->num_mapped_sgs = 0; seg 1124 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_buffer = seg 1127 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->sg = NULL; seg 1128 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->num_sgs = 0; seg 1130 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_buffer = NULL; seg 1138 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->sg = wa_xfer_create_subset_sg( seg 1141 drivers/staging/wusbcore/wa-xfer.c &(seg->dto_urb->num_sgs)); seg 1142 drivers/staging/wusbcore/wa-xfer.c if (!(seg->dto_urb->sg)) seg 1146 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb->transfer_buffer_length = buf_itr_size; seg 1162 drivers/staging/wusbcore/wa-xfer.c size_t alloc_size = sizeof(*xfer->seg[0]) seg 1163 drivers/staging/wusbcore/wa-xfer.c - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; seg 1166 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg; seg 1170 drivers/staging/wusbcore/wa-xfer.c xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); seg 1171 drivers/staging/wusbcore/wa-xfer.c if (xfer->seg == NULL) seg 1193 drivers/staging/wusbcore/wa-xfer.c seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size, seg 1195 drivers/staging/wusbcore/wa-xfer.c if (seg == NULL) seg 1197 drivers/staging/wusbcore/wa-xfer.c wa_seg_init(seg); seg 1198 drivers/staging/wusbcore/wa-xfer.c seg->xfer = xfer; seg 1199 drivers/staging/wusbcore/wa-xfer.c seg->index = cnt; seg 1200 drivers/staging/wusbcore/wa-xfer.c usb_fill_bulk_urb(&seg->tr_urb, usb_dev, seg 1203 drivers/staging/wusbcore/wa-xfer.c &seg->xfer_hdr, xfer_hdr_size, seg 1204 drivers/staging/wusbcore/wa-xfer.c wa_seg_tr_cb, seg); seg 1208 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_count = seg_isoc_frame_count; seg 1209 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_offset = isoc_frame_offset; seg 1210 drivers/staging/wusbcore/wa-xfer.c seg->isoc_size = seg_isoc_size; seg 1212 drivers/staging/wusbcore/wa-xfer.c seg->isoc_pack_desc_urb = seg 1214 drivers/staging/wusbcore/wa-xfer.c if (seg->isoc_pack_desc_urb == NULL) seg 1222 drivers/staging/wusbcore/wa-xfer.c seg->isoc_pack_desc_urb, usb_dev, seg 1225 drivers/staging/wusbcore/wa-xfer.c (void *)(&seg->xfer_hdr) + seg 1228 drivers/staging/wusbcore/wa-xfer.c wa_seg_iso_pack_desc_cb, seg); seg 1236 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); seg 1237 drivers/staging/wusbcore/wa-xfer.c if (seg->dto_urb == NULL) seg 1240 drivers/staging/wusbcore/wa-xfer.c seg->dto_urb, usb_dev, seg 1243 drivers/staging/wusbcore/wa-xfer.c NULL, 0, wa_seg_dto_cb, seg); seg 1252 drivers/staging/wusbcore/wa-xfer.c __wa_populate_dto_urb_isoc(xfer, seg, seg 1253 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_offset); seg 1256 drivers/staging/wusbcore/wa-xfer.c result = __wa_populate_dto_urb(xfer, seg, seg 1265 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_READY; seg 1275 drivers/staging/wusbcore/wa-xfer.c usb_free_urb(xfer->seg[cnt]->dto_urb); seg 1277 drivers/staging/wusbcore/wa-xfer.c usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb); seg 1279 drivers/staging/wusbcore/wa-xfer.c kfree(xfer->seg[cnt]); seg 1280 drivers/staging/wusbcore/wa-xfer.c xfer->seg[cnt] = NULL; seg 1315 drivers/staging/wusbcore/wa-xfer.c xfer_hdr0 = &xfer->seg[0]->xfer_hdr; seg 1323 drivers/staging/wusbcore/wa-xfer.c cpu_to_le32(xfer->seg[0]->isoc_size); seg 1326 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = xfer->seg[cnt]; seg 1329 drivers/staging/wusbcore/wa-xfer.c xfer_hdr = &seg->xfer_hdr; seg 1340 drivers/staging/wusbcore/wa-xfer.c cpu_to_le32(seg->isoc_size); seg 1342 drivers/staging/wusbcore/wa-xfer.c cpu_to_le32(seg->isoc_frame_count); seg 1343 drivers/staging/wusbcore/wa-xfer.c __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); seg 1344 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_READY; seg 1353 drivers/staging/wusbcore/wa-xfer.c xfer_hdr = &xfer->seg[cnt]->xfer_hdr; seg 1360 drivers/staging/wusbcore/wa-xfer.c xfer->seg[cnt]->status = WA_SEG_READY; seg 1377 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg, int *dto_done) seg 1390 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_SUBMITTED; seg 1391 drivers/staging/wusbcore/wa-xfer.c result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC); seg 1394 drivers/staging/wusbcore/wa-xfer.c __func__, xfer, seg->index, result); seg 1399 drivers/staging/wusbcore/wa-xfer.c if (seg->isoc_pack_desc_urb) { seg 1401 drivers/staging/wusbcore/wa-xfer.c result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC); seg 1402 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_index = 0; seg 1405 drivers/staging/wusbcore/wa-xfer.c __func__, xfer, seg->index, result); seg 1411 drivers/staging/wusbcore/wa-xfer.c if (seg->dto_urb) { seg 1414 drivers/staging/wusbcore/wa-xfer.c result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); seg 1417 drivers/staging/wusbcore/wa-xfer.c __func__, xfer, seg->index, result); seg 1427 drivers/staging/wusbcore/wa-xfer.c && (seg->isoc_frame_count > 1)) seg 1434 drivers/staging/wusbcore/wa-xfer.c usb_unlink_urb(seg->isoc_pack_desc_urb); seg 1436 drivers/staging/wusbcore/wa-xfer.c usb_unlink_urb(&seg->tr_urb); seg 1438 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_ERROR; seg 1439 drivers/staging/wusbcore/wa-xfer.c seg->result = result; seg 1455 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg; seg 1465 drivers/staging/wusbcore/wa-xfer.c seg = list_first_entry(&(rpipe->seg_list), struct wa_seg, seg 1467 drivers/staging/wusbcore/wa-xfer.c list_del(&seg->list_node); seg 1468 drivers/staging/wusbcore/wa-xfer.c xfer = seg->xfer; seg 1475 drivers/staging/wusbcore/wa-xfer.c result = __wa_seg_submit(rpipe, xfer, seg, &dto_done); seg 1480 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg->index, seg 1547 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg; seg 1566 drivers/staging/wusbcore/wa-xfer.c seg = xfer->seg[cnt]; seg 1575 drivers/staging/wusbcore/wa-xfer.c result = __wa_seg_submit(rpipe, xfer, seg, seg 1593 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_DELAYED; seg 1594 drivers/staging/wusbcore/wa-xfer.c list_add_tail(&seg->list_node, &rpipe->seg_list); seg 1923 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg; seg 1968 drivers/staging/wusbcore/wa-xfer.c if (!list_empty(&xfer->list_node) && xfer->seg == NULL) seg 1971 drivers/staging/wusbcore/wa-xfer.c if (xfer->seg == NULL) /* still hasn't reached */ seg 1981 drivers/staging/wusbcore/wa-xfer.c seg = xfer->seg[cnt]; seg 1983 drivers/staging/wusbcore/wa-xfer.c __func__, wa_xfer_id(xfer), cnt, seg->status); seg 1984 drivers/staging/wusbcore/wa-xfer.c switch (seg->status) { seg 1988 drivers/staging/wusbcore/wa-xfer.c xfer, cnt, seg->status); seg 1998 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_ABORTED; seg 1999 drivers/staging/wusbcore/wa-xfer.c seg->result = -ENOENT; seg 2000 drivers/staging/wusbcore/wa-xfer.c list_del(&seg->list_node); seg 2032 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_ABORTED; seg 2132 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *current_seg = xfer->seg[index]; seg 2163 drivers/staging/wusbcore/wa-xfer.c struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg) seg 2165 drivers/staging/wusbcore/wa-xfer.c int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset; seg 2181 drivers/staging/wusbcore/wa-xfer.c seg_index = seg->isoc_frame_index; seg 2190 drivers/staging/wusbcore/wa-xfer.c if (seg_index < seg->isoc_frame_count) { seg 2211 drivers/staging/wusbcore/wa-xfer.c buf_in_urb->context = seg; seg 2214 drivers/staging/wusbcore/wa-xfer.c return seg_index - seg->isoc_frame_index; seg 2222 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = xfer->seg[seg_idx]; seg 2264 drivers/staging/wusbcore/wa-xfer.c buf_in_urb->context = seg; seg 2283 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg; seg 2295 drivers/staging/wusbcore/wa-xfer.c seg = xfer->seg[seg_idx]; seg 2299 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status); seg 2300 drivers/staging/wusbcore/wa-xfer.c if (seg->status == WA_SEG_ABORTED seg 2301 drivers/staging/wusbcore/wa-xfer.c || seg->status == WA_SEG_ERROR) /* already handled */ seg 2303 drivers/staging/wusbcore/wa-xfer.c if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */ seg 2304 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */ seg 2305 drivers/staging/wusbcore/wa-xfer.c if (seg->status != WA_SEG_PENDING) { seg 2308 drivers/staging/wusbcore/wa-xfer.c xfer, seg_idx, seg->status); seg 2309 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_PENDING; /* workaround/"fix" it */ seg 2312 drivers/staging/wusbcore/wa-xfer.c seg->result = wa_xfer_status_to_errno(usb_status); seg 2314 drivers/staging/wusbcore/wa-xfer.c xfer, xfer->id, seg->index, usb_status); seg 2315 drivers/staging/wusbcore/wa-xfer.c seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ? seg 2328 drivers/staging/wusbcore/wa-xfer.c wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg 2339 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_DTI_PENDING; seg 2352 drivers/staging/wusbcore/wa-xfer.c seg->result = bytes_transferred; seg 2354 drivers/staging/wusbcore/wa-xfer.c done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); seg 2372 drivers/staging/wusbcore/wa-xfer.c seg->result = result; seg 2377 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_ERROR; seg 2381 drivers/staging/wusbcore/wa-xfer.c wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status); seg 2437 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg; seg 2461 drivers/staging/wusbcore/wa-xfer.c seg = xfer->seg[wa->dti_isoc_xfer_seg]; seg 2464 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_count); seg 2479 drivers/staging/wusbcore/wa-xfer.c for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) { seg 2483 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_offset + seg_index; seg 2504 drivers/staging/wusbcore/wa-xfer.c seg->status = WA_SEG_DTI_PENDING; seg 2507 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_index = first_frame_index; seg 2515 drivers/staging/wusbcore/wa-xfer.c buf_in_urb, xfer, seg); seg 2517 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_index += urb_frame_count; seg 2525 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_offset + seg->isoc_frame_index; seg 2528 drivers/staging/wusbcore/wa-xfer.c while ((seg->isoc_frame_index < seg 2529 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_count) && seg 2531 drivers/staging/wusbcore/wa-xfer.c ++(seg->isoc_frame_index); seg 2537 drivers/staging/wusbcore/wa-xfer.c && (seg->isoc_frame_index < seg 2538 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_count)); seg 2551 drivers/staging/wusbcore/wa-xfer.c done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); seg 2584 drivers/staging/wusbcore/wa-xfer.c struct wa_seg *seg = urb->context; seg 2585 drivers/staging/wusbcore/wa-xfer.c struct wa_xfer *xfer = seg->xfer; seg 2614 drivers/staging/wusbcore/wa-xfer.c seg_index = seg->isoc_frame_index; seg 2615 drivers/staging/wusbcore/wa-xfer.c while (seg_index < seg->isoc_frame_count) { seg 2617 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_offset + seg_index; seg 2622 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_index = seg_index; seg 2634 drivers/staging/wusbcore/wa-xfer.c seg->result += urb->actual_length; seg 2640 drivers/staging/wusbcore/wa-xfer.c xfer, seg); seg 2642 drivers/staging/wusbcore/wa-xfer.c seg->isoc_frame_index += urb_frame_count; seg 2663 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg->index, seg 2664 drivers/staging/wusbcore/wa-xfer.c seg->result); seg 2666 drivers/staging/wusbcore/wa-xfer.c done = __wa_xfer_mark_seg_as_done(xfer, seg, seg 2688 drivers/staging/wusbcore/wa-xfer.c xfer, wa_xfer_id(xfer), seg->index, seg 2696 drivers/staging/wusbcore/wa-xfer.c seg->result = urb->status; seg 2699 drivers/staging/wusbcore/wa-xfer.c done = __wa_xfer_mark_seg_as_done(xfer, seg, seg 176 drivers/usb/early/xhci-dbc.c xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring) seg 178 drivers/usb/early/xhci-dbc.c seg->trbs = xdbc_get_page(&seg->dma); seg 179 drivers/usb/early/xhci-dbc.c if (!seg->trbs) seg 182 drivers/usb/early/xhci-dbc.c ring->segment = seg; seg 189 drivers/usb/early/xhci-dbc.c struct xdbc_segment *seg = ring->segment; seg 191 drivers/usb/early/xhci-dbc.c if (!seg) seg 194 drivers/usb/early/xhci-dbc.c memblock_free(seg->dma, PAGE_SIZE); seg 200 drivers/usb/early/xhci-dbc.c struct xdbc_segment *seg = ring->segment; seg 203 drivers/usb/early/xhci-dbc.c memset(seg->trbs, 0, PAGE_SIZE); seg 205 drivers/usb/early/xhci-dbc.c ring->enqueue = seg->trbs; seg 206 drivers/usb/early/xhci-dbc.c ring->dequeue = seg->trbs; seg 210 drivers/usb/early/xhci-dbc.c link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1]; seg 211 drivers/usb/early/xhci-dbc.c link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma)); seg 212 drivers/usb/early/xhci-dbc.c link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma)); seg 195 drivers/usb/host/xhci-debugfs.c struct xhci_segment *seg) seg 202 drivers/usb/host/xhci-debugfs.c trb = &seg->trbs[i]; seg 203 drivers/usb/host/xhci-debugfs.c dma = seg->dma + i * sizeof(*trb); seg 216 drivers/usb/host/xhci-debugfs.c struct xhci_segment *seg = ring->first_seg; seg 219 drivers/usb/host/xhci-debugfs.c xhci_ring_dump_segment(s, seg); seg 220 drivers/usb/host/xhci-debugfs.c seg = seg->next; seg 33 drivers/usb/host/xhci-mem.c struct xhci_segment *seg; seg 38 drivers/usb/host/xhci-mem.c seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev)); seg 39 drivers/usb/host/xhci-mem.c if (!seg) seg 42 drivers/usb/host/xhci-mem.c seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma); seg 43 drivers/usb/host/xhci-mem.c if (!seg->trbs) { seg 44 drivers/usb/host/xhci-mem.c kfree(seg); seg 49 drivers/usb/host/xhci-mem.c seg->bounce_buf = kzalloc_node(max_packet, flags, seg 51 drivers/usb/host/xhci-mem.c if (!seg->bounce_buf) { seg 52 drivers/usb/host/xhci-mem.c dma_pool_free(xhci->segment_pool, seg->trbs, dma); seg 53 drivers/usb/host/xhci-mem.c kfree(seg); seg 60 drivers/usb/host/xhci-mem.c seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); seg 62 drivers/usb/host/xhci-mem.c seg->dma = dma; seg 63 drivers/usb/host/xhci-mem.c seg->next = NULL; seg 65 drivers/usb/host/xhci-mem.c return seg; seg 68 drivers/usb/host/xhci-mem.c static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) seg 70 drivers/usb/host/xhci-mem.c if (seg->trbs) { seg 71 drivers/usb/host/xhci-mem.c dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); seg 72 drivers/usb/host/xhci-mem.c seg->trbs = NULL; seg 74 drivers/usb/host/xhci-mem.c kfree(seg->bounce_buf); seg 75 drivers/usb/host/xhci-mem.c kfree(seg); seg 81 drivers/usb/host/xhci-mem.c struct xhci_segment *seg; seg 83 drivers/usb/host/xhci-mem.c seg = first->next; seg 84 drivers/usb/host/xhci-mem.c while (seg != first) { seg 85 drivers/usb/host/xhci-mem.c struct xhci_segment *next = seg->next; seg 86 drivers/usb/host/xhci-mem.c xhci_segment_free(xhci, seg); seg 87 drivers/usb/host/xhci-mem.c seg = next; seg 186 drivers/usb/host/xhci-mem.c struct xhci_segment *seg, seg 192 drivers/usb/host/xhci-mem.c key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); seg 207 drivers/usb/host/xhci-mem.c struct xhci_segment *seg) seg 211 drivers/usb/host/xhci-mem.c key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT); seg 223 drivers/usb/host/xhci-mem.c struct xhci_segment *seg; seg 230 drivers/usb/host/xhci-mem.c seg = first_seg; seg 233 drivers/usb/host/xhci-mem.c ring, seg, mem_flags); seg 236 drivers/usb/host/xhci-mem.c if (seg == last_seg) seg 238 drivers/usb/host/xhci-mem.c seg = seg->next; seg 239 drivers/usb/host/xhci-mem.c } while (seg != first_seg); seg 244 drivers/usb/host/xhci-mem.c failed_seg = seg; seg 245 drivers/usb/host/xhci-mem.c seg = first_seg; seg 247 drivers/usb/host/xhci-mem.c xhci_remove_segment_mapping(trb_address_map, seg); seg 248 drivers/usb/host/xhci-mem.c if (seg == failed_seg) seg 250 drivers/usb/host/xhci-mem.c seg = seg->next; seg 251 drivers/usb/host/xhci-mem.c } while (seg != first_seg); seg 258 drivers/usb/host/xhci-mem.c struct xhci_segment *seg; seg 263 drivers/usb/host/xhci-mem.c seg = ring->first_seg; seg 265 drivers/usb/host/xhci-mem.c xhci_remove_segment_mapping(ring->trb_address_map, seg); seg 266 drivers/usb/host/xhci-mem.c seg = seg->next; seg 267 drivers/usb/host/xhci-mem.c } while (seg != ring->first_seg); seg 1804 drivers/usb/host/xhci-mem.c struct xhci_segment *seg; seg 1815 drivers/usb/host/xhci-mem.c seg = evt_ring->first_seg; seg 1818 drivers/usb/host/xhci-mem.c entry->seg_addr = cpu_to_le64(seg->dma); seg 1821 drivers/usb/host/xhci-mem.c seg = seg->next; seg 1949 drivers/usb/host/xhci-mem.c struct xhci_segment *seg; seg 1954 drivers/usb/host/xhci-mem.c seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false); seg 1955 drivers/usb/host/xhci-mem.c if (seg != result_seg) { seg 1967 drivers/usb/host/xhci-mem.c result_seg, seg); seg 66 drivers/usb/host/xhci-ring.c dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, seg 71 drivers/usb/host/xhci-ring.c if (!seg || !trb || trb < seg->trbs) seg 74 drivers/usb/host/xhci-ring.c segment_offset = trb - seg->trbs; seg 77 drivers/usb/host/xhci-ring.c return seg->dma + (segment_offset * sizeof(*trb)); seg 90 drivers/usb/host/xhci-ring.c static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) seg 92 drivers/usb/host/xhci-ring.c return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; seg 96 drivers/usb/host/xhci-ring.c struct xhci_segment *seg, union xhci_trb *trb) seg 98 drivers/usb/host/xhci-ring.c return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); seg 141 drivers/usb/host/xhci-ring.c struct xhci_segment **seg, seg 145 drivers/usb/host/xhci-ring.c *seg = (*seg)->next; seg 146 drivers/usb/host/xhci-ring.c *trb = ((*seg)->trbs); seg 627 drivers/usb/host/xhci-ring.c struct xhci_segment *seg = td->start_seg; seg 640 drivers/usb/host/xhci-ring.c next_trb(xhci, ep_ring, &seg, &trb); seg 682 drivers/usb/host/xhci-ring.c struct xhci_segment *seg = td->bounce_seg; seg 686 drivers/usb/host/xhci-ring.c if (!ring || !seg || !urb) seg 690 drivers/usb/host/xhci-ring.c dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, seg 695 drivers/usb/host/xhci-ring.c dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, seg 698 drivers/usb/host/xhci-ring.c len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, seg 699 drivers/usb/host/xhci-ring.c seg->bounce_len, seg->bounce_offs); seg 700 drivers/usb/host/xhci-ring.c if (len != seg->bounce_len) seg 702 drivers/usb/host/xhci-ring.c len, seg->bounce_len); seg 703 drivers/usb/host/xhci-ring.c seg->bounce_len = 0; seg 704 drivers/usb/host/xhci-ring.c seg->bounce_offs = 0; seg 2021 drivers/usb/host/xhci-ring.c struct xhci_segment *seg = ring->deq_seg; seg 2023 drivers/usb/host/xhci-ring.c for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { seg 3227 drivers/usb/host/xhci-ring.c u32 *trb_buff_len, struct xhci_segment *seg) seg 3265 drivers/usb/host/xhci-ring.c seg->bounce_buf, new_buff_len, enqd_len); seg 3270 drivers/usb/host/xhci-ring.c seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, seg 3273 drivers/usb/host/xhci-ring.c seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, seg 3277 drivers/usb/host/xhci-ring.c if (dma_mapping_error(dev, seg->bounce_dma)) { seg 3283 drivers/usb/host/xhci-ring.c seg->bounce_len = new_buff_len; seg 3284 drivers/usb/host/xhci-ring.c seg->bounce_offs = enqd_len; seg 43 drivers/usb/host/xhci.c struct xhci_segment *seg = ring->first_seg; seg 48 drivers/usb/host/xhci.c if (seg == td->start_seg) seg 50 drivers/usb/host/xhci.c seg = seg->next; seg 51 drivers/usb/host/xhci.c } while (seg && seg != ring->first_seg); seg 851 drivers/usb/host/xhci.c struct xhci_segment *seg; seg 854 drivers/usb/host/xhci.c seg = ring->deq_seg; seg 856 drivers/usb/host/xhci.c memset(seg->trbs, 0, seg 858 drivers/usb/host/xhci.c seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= seg 860 drivers/usb/host/xhci.c seg = seg->next; seg 861 drivers/usb/host/xhci.c } while (seg != ring->deq_seg); seg 2080 drivers/usb/host/xhci.h dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); seg 1035 drivers/vhost/net.c int seg = 0; seg 1045 drivers/vhost/net.c if (unlikely(seg >= UIO_MAXIOV)) { seg 1049 drivers/vhost/net.c r = vhost_get_vq_desc(vq, vq->iov + seg, seg 1050 drivers/vhost/net.c ARRAY_SIZE(vq->iov) - seg, &out, seg 1071 drivers/vhost/net.c len = iov_length(vq->iov + seg, in); seg 1075 drivers/vhost/net.c seg += in; seg 1078 drivers/vhost/net.c *iovcount = seg; seg 26 drivers/xen/dbgp.c dbgp.u.pci.seg = pci_domain_nr(pdev->bus); seg 898 drivers/xen/gntdev.c struct gntdev_grant_copy_segment *seg, seg 908 drivers/xen/gntdev.c if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref))) seg 912 drivers/xen/gntdev.c if (seg->flags & GNTCOPY_source_gref) { seg 913 drivers/xen/gntdev.c if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE) seg 916 drivers/xen/gntdev.c if (seg->flags & GNTCOPY_dest_gref) { seg 917 drivers/xen/gntdev.c if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE) seg 924 drivers/xen/gntdev.c while (copied < seg->len) { seg 937 drivers/xen/gntdev.c len = seg->len - copied; seg 942 drivers/xen/gntdev.c if (seg->flags & GNTCOPY_source_gref) { seg 943 drivers/xen/gntdev.c op->source.u.ref = seg->source.foreign.ref; seg 944 drivers/xen/gntdev.c op->source.domid = seg->source.foreign.domid; seg 945 drivers/xen/gntdev.c op->source.offset = seg->source.foreign.offset + copied; seg 948 drivers/xen/gntdev.c virt = seg->source.virt + copied; seg 961 drivers/xen/gntdev.c if (seg->flags & GNTCOPY_dest_gref) { seg 962 drivers/xen/gntdev.c op->dest.u.ref = seg->dest.foreign.ref; seg 963 drivers/xen/gntdev.c op->dest.domid = seg->dest.foreign.domid; seg 964 drivers/xen/gntdev.c op->dest.offset = seg->dest.foreign.offset + copied; seg 967 drivers/xen/gntdev.c virt = seg->dest.virt + copied; seg 1004 drivers/xen/gntdev.c struct gntdev_grant_copy_segment seg; seg 1006 drivers/xen/gntdev.c if (copy_from_user(&seg, ©.segments[i], sizeof(seg))) { seg 1011 drivers/xen/gntdev.c ret = gntdev_grant_copy_seg(&batch, &seg, ©.segments[i].status); seg 50 drivers/xen/pci.c .add.seg = pci_domain_nr(pci_dev->bus), seg 157 drivers/xen/pci.c .seg = pci_domain_nr(pci_dev->bus), seg 117 drivers/xen/xen-pciback/pci_stub.c .seg = pci_domain_nr(dev->bus), seg 402 drivers/xen/xen-pciback/pci_stub.c .seg = pci_domain_nr(dev->bus), seg 253 drivers/xen/xen-scsiback.c static unsigned long vaddr(struct vscsibk_pend *req, int seg) seg 255 drivers/xen/xen-scsiback.c return vaddr_page(req->pages[seg]); seg 441 drivers/xen/xen-scsiback.c struct scsiif_request_segment *seg, struct page **pg, seg 455 drivers/xen/xen-scsiback.c flags, seg[i].gref, info->domid); seg 478 drivers/xen/xen-scsiback.c struct scsiif_request_segment *seg; seg 500 drivers/xen/xen-scsiback.c err = scsiback_gnttab_data_map_list(pending_req, ring_req->seg, seg 508 drivers/xen/xen-scsiback.c n_segs = ring_req->seg[i].length / seg 510 drivers/xen/xen-scsiback.c if ((unsigned)ring_req->seg[i].offset + seg 511 drivers/xen/xen-scsiback.c (unsigned)ring_req->seg[i].length > PAGE_SIZE || seg 513 drivers/xen/xen-scsiback.c ring_req->seg[i].length) seg 539 drivers/xen/xen-scsiback.c seg = ring_req->seg; seg 540 drivers/xen/xen-scsiback.c err = scsiback_gnttab_data_map_list(pending_req, seg, seg 546 drivers/xen/xen-scsiback.c seg = (struct scsiif_request_segment *)( seg 547 drivers/xen/xen-scsiback.c vaddr(pending_req, i) + ring_req->seg[i].offset); seg 548 drivers/xen/xen-scsiback.c n_segs = ring_req->seg[i].length / seg 550 drivers/xen/xen-scsiback.c err = scsiback_gnttab_data_map_list(pending_req, seg, seg 557 drivers/xen/xen-scsiback.c end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset; seg 558 drivers/xen/xen-scsiback.c seg = (struct scsiif_request_segment *)end_seg; seg 559 drivers/xen/xen-scsiback.c end_seg += ring_req->seg[0].length; seg 564 drivers/xen/xen-scsiback.c sg_set_page(sg, pg[i], seg->length, seg->offset); seg 565 drivers/xen/xen-scsiback.c pending_req->data_len += seg->length; seg 566 drivers/xen/xen-scsiback.c seg++; seg 567 drivers/xen/xen-scsiback.c if (nr_sgl && (unsigned long)seg >= end_seg) { seg 570 drivers/xen/xen-scsiback.c ring_req->seg[i_seg].offset; seg 571 drivers/xen/xen-scsiback.c seg = (struct scsiif_request_segment *)end_seg; seg 572 drivers/xen/xen-scsiback.c end_seg += ring_req->seg[i_seg].length; seg 737 fs/binfmt_elf_fdpic.c struct elf32_fdpic_loadseg *seg; seg 753 fs/binfmt_elf_fdpic.c size = sizeof(*loadmap) + nloads * sizeof(*seg); seg 764 fs/binfmt_elf_fdpic.c seg = loadmap->segs; seg 785 fs/binfmt_elf_fdpic.c seg = loadmap->segs; seg 786 fs/binfmt_elf_fdpic.c for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { seg 787 fs/binfmt_elf_fdpic.c if (params->hdr.e_entry >= seg->p_vaddr && seg 788 fs/binfmt_elf_fdpic.c params->hdr.e_entry < seg->p_vaddr + seg->p_memsz) { seg 790 fs/binfmt_elf_fdpic.c (params->hdr.e_entry - seg->p_vaddr) + seg 791 fs/binfmt_elf_fdpic.c seg->addr; seg 810 fs/binfmt_elf_fdpic.c seg = loadmap->segs; seg 811 fs/binfmt_elf_fdpic.c for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { seg 812 fs/binfmt_elf_fdpic.c if (phdr->p_vaddr >= seg->p_vaddr && seg 814 fs/binfmt_elf_fdpic.c seg->p_vaddr + seg->p_memsz) { seg 816 fs/binfmt_elf_fdpic.c (phdr->p_vaddr - seg->p_vaddr) + seg 817 fs/binfmt_elf_fdpic.c seg->addr + seg 831 fs/binfmt_elf_fdpic.c seg = loadmap->segs; seg 832 fs/binfmt_elf_fdpic.c for (loop = loadmap->nsegs; loop > 0; loop--, seg++) { seg 833 fs/binfmt_elf_fdpic.c if (phdr->p_vaddr >= seg->p_vaddr && seg 835 fs/binfmt_elf_fdpic.c seg->p_vaddr + seg->p_memsz) { seg 840 fs/binfmt_elf_fdpic.c (phdr->p_vaddr - seg->p_vaddr) + seg 841 fs/binfmt_elf_fdpic.c seg->addr; seg 868 fs/binfmt_elf_fdpic.c seg = mseg + 1; seg 871 fs/binfmt_elf_fdpic.c if (seg->p_vaddr - mseg->p_vaddr == seg->addr - mseg->addr) { seg 873 fs/binfmt_elf_fdpic.c if (load_addr == (seg->addr & PAGE_MASK)) { seg 877 fs/binfmt_elf_fdpic.c mseg->p_memsz += seg->addr & ~PAGE_MASK; seg 878 fs/binfmt_elf_fdpic.c mseg->p_memsz += seg->p_memsz; seg 885 fs/binfmt_elf_fdpic.c if (mseg != seg) seg 886 fs/binfmt_elf_fdpic.c *mseg = *seg; seg 895 fs/binfmt_elf_fdpic.c seg = loadmap->segs; seg 896 fs/binfmt_elf_fdpic.c for (loop = 0; loop < loadmap->nsegs; loop++, seg++) seg 899 fs/binfmt_elf_fdpic.c seg->addr, seg->addr + seg->p_memsz - 1, seg 900 fs/binfmt_elf_fdpic.c seg->p_vaddr, seg->p_memsz); seg 920 fs/binfmt_elf_fdpic.c struct elf32_fdpic_loadseg *seg; seg 926 fs/binfmt_elf_fdpic.c seg = params->loadmap->segs; seg 960 fs/binfmt_elf_fdpic.c seg->addr = maddr + (phdr->p_vaddr - base); seg 961 fs/binfmt_elf_fdpic.c seg->p_vaddr = phdr->p_vaddr; seg 962 fs/binfmt_elf_fdpic.c seg->p_memsz = phdr->p_memsz; seg 964 fs/binfmt_elf_fdpic.c ret = read_code(file, seg->addr, phdr->p_offset, seg 971 fs/binfmt_elf_fdpic.c params->elfhdr_addr = seg->addr; seg 975 fs/binfmt_elf_fdpic.c if (clear_user((void *) (seg->addr + phdr->p_filesz), seg 983 fs/binfmt_elf_fdpic.c mm->start_code = seg->addr; seg 984 fs/binfmt_elf_fdpic.c mm->end_code = seg->addr + seg 988 fs/binfmt_elf_fdpic.c mm->start_data = seg->addr; seg 989 fs/binfmt_elf_fdpic.c mm->end_data = seg->addr + phdr->p_memsz; seg 993 fs/binfmt_elf_fdpic.c seg++; seg 1008 fs/binfmt_elf_fdpic.c struct elf32_fdpic_loadseg *seg; seg 1017 fs/binfmt_elf_fdpic.c seg = params->loadmap->segs; seg 1097 fs/binfmt_elf_fdpic.c seg->addr = maddr + disp; seg 1098 fs/binfmt_elf_fdpic.c seg->p_vaddr = phdr->p_vaddr; seg 1099 fs/binfmt_elf_fdpic.c seg->p_memsz = phdr->p_memsz; seg 1103 fs/binfmt_elf_fdpic.c params->elfhdr_addr = seg->addr; seg 1169 fs/binfmt_elf_fdpic.c seg++; seg 8704 fs/btrfs/inode.c int seg; seg 8723 fs/btrfs/inode.c for (seg = 0; seg < iter->nr_segs; seg++) { seg 8724 fs/btrfs/inode.c for (i = seg + 1; i < iter->nr_segs; i++) { seg 8725 fs/btrfs/inode.c if (iter->iov[seg].iov_base == iter->iov[i].iov_base) seg 38 fs/cachefiles/key.c int loop, len, max, seg, mark, print; seg 97 fs/cachefiles/key.c seg = 250; seg 99 fs/cachefiles/key.c if (seg <= 0) { seg 103 fs/cachefiles/key.c seg = 252; seg 116 fs/cachefiles/key.c seg = 252; seg 118 fs/cachefiles/key.c if (seg <= 0) { seg 122 fs/cachefiles/key.c seg = 252; seg 2528 fs/f2fs/segment.c struct curseg_info *seg, block_t start) seg 2530 fs/f2fs/segment.c struct seg_entry *se = get_seg_entry(sbi, seg->segno); seg 2542 fs/f2fs/segment.c seg->next_blkoff = pos; seg 2551 fs/f2fs/segment.c struct curseg_info *seg) seg 2553 fs/f2fs/segment.c if (seg->alloc_type == SSR) seg 2554 fs/f2fs/segment.c __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); seg 2556 fs/f2fs/segment.c seg->next_blkoff++; seg 31 fs/f2fs/segment.h #define IS_CURSEG(sbi, seg) \ seg 32 fs/f2fs/segment.h (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ seg 33 fs/f2fs/segment.h ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ seg 34 fs/f2fs/segment.h ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ seg 35 fs/f2fs/segment.h ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ seg 36 fs/f2fs/segment.h ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ seg 37 fs/f2fs/segment.h ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno)) seg 27 fs/nfsd/blocklayout.c struct nfsd4_layout_seg *seg = &args->lg_seg; seg 35 fs/nfsd/blocklayout.c if (seg->offset & (block_size - 1)) { seg 50 fs/nfsd/blocklayout.c error = sb->s_export_op->map_blocks(inode, seg->offset, seg->length, seg 51 fs/nfsd/blocklayout.c &iomap, seg->iomode != IOMODE_READ, seg 66 fs/nfsd/blocklayout.c if (seg->iomode == IOMODE_READ) seg 73 fs/nfsd/blocklayout.c if (seg->iomode & IOMODE_RW) { seg 88 fs/nfsd/blocklayout.c if (seg->iomode == IOMODE_READ) { seg 105 fs/nfsd/blocklayout.c seg->offset = iomap.offset; seg 106 fs/nfsd/blocklayout.c seg->length = iomap.length; seg 112 fs/nfsd/blocklayout.c seg->length = 0; seg 115 fs/nfsd/blocklayout.c seg->length = 0; seg 25 fs/nfsd/flexfilelayout.c struct nfsd4_layout_seg *seg = &args->lg_seg; seg 52 fs/nfsd/flexfilelayout.c if (seg->iomode == IOMODE_READ) { seg 67 fs/nfsd/flexfilelayout.c seg->offset = 0; seg 68 fs/nfsd/flexfilelayout.c seg->length = NFS4_MAX_UINT64; seg 70 fs/nfsd/flexfilelayout.c dprintk("GET: 0x%llx:0x%llx %d\n", seg->offset, seg->length, seg 71 fs/nfsd/flexfilelayout.c seg->iomode); seg 75 fs/nfsd/flexfilelayout.c seg->length = 0; seg 340 fs/nfsd/nfs4layouts.c layout_end(struct nfsd4_layout_seg *seg) seg 342 fs/nfsd/nfs4layouts.c u64 end = seg->offset + seg->length; seg 343 fs/nfsd/nfs4layouts.c return end >= seg->offset ? end : NFS4_MAX_UINT64; seg 404 fs/nfsd/nfs4layouts.c struct nfsd4_layout_seg *seg = &lgp->lg_seg; seg 415 fs/nfsd/nfs4layouts.c if (layouts_try_merge(&lp->lo_seg, seg)) seg 424 fs/nfsd/nfs4layouts.c memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg)); seg 433 fs/nfsd/nfs4layouts.c if (layouts_try_merge(&lp->lo_seg, seg)) seg 464 fs/nfsd/nfs4layouts.c nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg, seg 470 fs/nfsd/nfs4layouts.c if (seg->offset <= lo->offset) { seg 471 fs/nfsd/nfs4layouts.c if (layout_end(seg) >= end) { seg 475 fs/nfsd/nfs4layouts.c lo->offset = layout_end(seg); seg 478 fs/nfsd/nfs4layouts.c if (layout_end(seg) < end) { seg 482 fs/nfsd/nfs4layouts.c end = seg->offset; seg 1688 fs/nfsd/nfs4proc.c const struct nfsd4_layout_seg *seg = &lcp->lc_seg; seg 1707 fs/nfsd/nfs4proc.c if (new_size <= seg->offset) { seg 1711 fs/nfsd/nfs4proc.c if (new_size > seg->offset + seg->length) { seg 164 fs/nilfs2/sufile.c __u64 *seg; seg 172 fs/nilfs2/sufile.c for (seg = segnumv; seg < segnumv + nsegs; seg++) { seg 173 fs/nilfs2/sufile.c if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { seg 176 fs/nilfs2/sufile.c __func__, (unsigned long long)*seg); seg 189 fs/nilfs2/sufile.c seg = segnumv; seg 190 fs/nilfs2/sufile.c blkoff = nilfs_sufile_get_blkoff(sufile, *seg); seg 196 fs/nilfs2/sufile.c dofunc(sufile, *seg, header_bh, bh); seg 198 fs/nilfs2/sufile.c if (++seg >= segnumv + nsegs) seg 201 fs/nilfs2/sufile.c blkoff = nilfs_sufile_get_blkoff(sufile, *seg); seg 214 fs/nilfs2/sufile.c n = seg - segnumv; seg 769 fs/read_write.c unsigned long seg; seg 813 fs/read_write.c for (seg = 0; seg < nr_segs; seg++) { seg 814 fs/read_write.c void __user *buf = iov[seg].iov_base; seg 815 fs/read_write.c ssize_t len = (ssize_t)iov[seg].iov_len; seg 830 fs/read_write.c iov[seg].iov_len = len; seg 848 fs/read_write.c int seg; seg 883 fs/read_write.c for (seg = 0; seg < nr_segs; seg++) { seg 90 include/asm-generic/uaccess.h #define segment_eq(a, b) ((a).seg == (b).seg) seg 163 include/linux/bvec.h struct bio_vec *seg) seg 168 include/linux/bvec.h seg->bv_page = bvec->bv_page + last_page; seg 172 include/linux/bvec.h seg->bv_offset = bvec->bv_offset % PAGE_SIZE; seg 173 include/linux/bvec.h seg->bv_len = bvec->bv_len; seg 175 include/linux/bvec.h seg->bv_offset = 0; seg 176 include/linux/bvec.h seg->bv_len = total - last_page * PAGE_SIZE; seg 64 include/linux/dmar.h u16 seg; seg 98 include/linux/uio.h unsigned long seg; seg 101 include/linux/uio.h for (seg = 0; seg < nr_segs; seg++) seg 102 include/linux/uio.h ret += iov[seg].iov_len; seg 202 include/xen/interface/io/blkif.h struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; seg 211 include/xen/interface/io/vscsiif.h struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; seg 254 include/xen/interface/physdev.h uint16_t seg; seg 279 include/xen/interface/physdev.h uint16_t seg; seg 63 ipc/msgutil.c struct msg_msgseg *seg; seg 68 ipc/msgutil.c seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT); seg 69 ipc/msgutil.c if (seg == NULL) seg 71 ipc/msgutil.c *pseg = seg; seg 72 ipc/msgutil.c seg->next = NULL; seg 73 ipc/msgutil.c pseg = &seg->next; seg 87 ipc/msgutil.c struct msg_msgseg *seg; seg 99 ipc/msgutil.c for (seg = msg->next; seg != NULL; seg = seg->next) { seg 103 ipc/msgutil.c if (copy_from_user(seg + 1, src, alen)) seg 153 ipc/msgutil.c struct msg_msgseg *seg; seg 159 ipc/msgutil.c for (seg = msg->next; seg != NULL; seg = seg->next) { seg 163 ipc/msgutil.c if (copy_to_user(dest, seg + 1, alen)) seg 171 ipc/msgutil.c struct msg_msgseg *seg; seg 175 ipc/msgutil.c seg = msg->next; seg 177 ipc/msgutil.c while (seg != NULL) { seg 178 ipc/msgutil.c struct msg_msgseg *tmp = seg->next; seg 181 ipc/msgutil.c kfree(seg); seg 182 ipc/msgutil.c seg = tmp; seg 94 kernel/rcu/rcu_segcblist.h static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) seg 96 kernel/rcu/rcu_segcblist.h return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); seg 154 mm/swap.c int seg; seg 156 mm/swap.c for (seg = 0; seg < nr_segs; seg++) { seg 157 mm/swap.c if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) seg 158 mm/swap.c return seg; seg 160 mm/swap.c pages[seg] = kmap_to_page(kiov[seg].iov_base); seg 161 mm/swap.c get_page(pages[seg]); seg 164 mm/swap.c return seg; seg 216 net/caif/cfrfml.c u8 seg; seg 241 net/caif/cfrfml.c seg = 1; seg 244 net/caif/cfrfml.c if (cfpkt_add_head(frontpkt, &seg, 1) < 0) seg 273 net/caif/cfrfml.c seg = 0; seg 276 net/caif/cfrfml.c if (cfpkt_add_head(frontpkt, &seg, 1) < 0) seg 2377 net/core/skbuff.c int seg; seg 2396 net/core/skbuff.c for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { seg 2397 net/core/skbuff.c const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; seg 192 net/ipv4/udp_offload.c struct sk_buff *segs, *seg; seg 224 net/ipv4/udp_offload.c seg = segs; seg 225 net/ipv4/udp_offload.c uh = udp_hdr(seg); seg 228 net/ipv4/udp_offload.c skb_shinfo(seg)->tskey = skb_shinfo(gso_skb)->tskey; seg 229 net/ipv4/udp_offload.c skb_shinfo(seg)->tx_flags |= seg 238 net/ipv4/udp_offload.c seg->destructor = sock_wfree; seg 239 net/ipv4/udp_offload.c seg->sk = sk; seg 240 net/ipv4/udp_offload.c sum_truesize += seg->truesize; seg 243 net/ipv4/udp_offload.c if (!seg->next) seg 249 net/ipv4/udp_offload.c if (seg->ip_summed == CHECKSUM_PARTIAL) seg 250 net/ipv4/udp_offload.c gso_reset_checksum(seg, ~check); seg 252 net/ipv4/udp_offload.c uh->check = gso_make_checksum(seg, ~check) ? : seg 255 net/ipv4/udp_offload.c seg = seg->next; seg 256 net/ipv4/udp_offload.c uh = udp_hdr(seg); seg 260 net/ipv4/udp_offload.c newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) + seg 261 net/ipv4/udp_offload.c seg->data_len); seg 267 net/ipv4/udp_offload.c if (seg->ip_summed == CHECKSUM_PARTIAL) seg 268 net/ipv4/udp_offload.c gso_reset_checksum(seg, ~check); seg 270 net/ipv4/udp_offload.c uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0; seg 323 net/sunrpc/xprtrdma/frwr_ops.c struct rpcrdma_mr_seg *seg, seg 336 net/sunrpc/xprtrdma/frwr_ops.c if (seg->mr_page) seg 338 net/sunrpc/xprtrdma/frwr_ops.c seg->mr_page, seg 339 net/sunrpc/xprtrdma/frwr_ops.c seg->mr_len, seg 340 net/sunrpc/xprtrdma/frwr_ops.c offset_in_page(seg->mr_offset)); seg 342 net/sunrpc/xprtrdma/frwr_ops.c sg_set_buf(&mr->mr_sg[i], seg->mr_offset, seg 343 net/sunrpc/xprtrdma/frwr_ops.c seg->mr_len); seg 345 net/sunrpc/xprtrdma/frwr_ops.c ++seg; seg 349 net/sunrpc/xprtrdma/frwr_ops.c if ((i < nsegs && offset_in_page(seg->mr_offset)) || seg 350 net/sunrpc/xprtrdma/frwr_ops.c offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) seg 383 net/sunrpc/xprtrdma/frwr_ops.c return seg; seg 194 net/sunrpc/xprtrdma/rpc_rdma.c rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, seg 204 net/sunrpc/xprtrdma/rpc_rdma.c seg->mr_page = NULL; seg 205 net/sunrpc/xprtrdma/rpc_rdma.c seg->mr_offset = base; seg 206 net/sunrpc/xprtrdma/rpc_rdma.c seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining); seg 207 net/sunrpc/xprtrdma/rpc_rdma.c remaining -= seg->mr_len; seg 208 net/sunrpc/xprtrdma/rpc_rdma.c base += seg->mr_len; seg 209 net/sunrpc/xprtrdma/rpc_rdma.c ++seg; seg 213 net/sunrpc/xprtrdma/rpc_rdma.c return seg; seg 226 net/sunrpc/xprtrdma/rpc_rdma.c struct rpcrdma_mr_seg *seg) seg 234 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n); seg 249 net/sunrpc/xprtrdma/rpc_rdma.c seg->mr_page = *ppages; seg 250 net/sunrpc/xprtrdma/rpc_rdma.c seg->mr_offset = (char *)page_base; seg 251 net/sunrpc/xprtrdma/rpc_rdma.c seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len); seg 252 net/sunrpc/xprtrdma/rpc_rdma.c len -= seg->mr_len; seg 254 net/sunrpc/xprtrdma/rpc_rdma.c ++seg; seg 274 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n); seg 347 net/sunrpc/xprtrdma/rpc_rdma.c struct rpcrdma_mr_seg *seg, seg 361 net/sunrpc/xprtrdma/rpc_rdma.c return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); seg 391 net/sunrpc/xprtrdma/rpc_rdma.c struct rpcrdma_mr_seg *seg; seg 402 net/sunrpc/xprtrdma/rpc_rdma.c seg = req->rl_segments; seg 404 net/sunrpc/xprtrdma/rpc_rdma.c rtype, seg); seg 409 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr); seg 410 net/sunrpc/xprtrdma/rpc_rdma.c if (IS_ERR(seg)) seg 411 net/sunrpc/xprtrdma/rpc_rdma.c return PTR_ERR(seg); seg 446 net/sunrpc/xprtrdma/rpc_rdma.c struct rpcrdma_mr_seg *seg; seg 454 net/sunrpc/xprtrdma/rpc_rdma.c seg = req->rl_segments; seg 457 net/sunrpc/xprtrdma/rpc_rdma.c wtype, seg); seg 470 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); seg 471 net/sunrpc/xprtrdma/rpc_rdma.c if (IS_ERR(seg)) seg 472 net/sunrpc/xprtrdma/rpc_rdma.c return PTR_ERR(seg); seg 509 net/sunrpc/xprtrdma/rpc_rdma.c struct rpcrdma_mr_seg *seg; seg 517 net/sunrpc/xprtrdma/rpc_rdma.c seg = req->rl_segments; seg 518 net/sunrpc/xprtrdma/rpc_rdma.c nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); seg 531 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); seg 532 net/sunrpc/xprtrdma/rpc_rdma.c if (IS_ERR(seg)) seg 533 net/sunrpc/xprtrdma/rpc_rdma.c return PTR_ERR(seg); seg 410 net/sunrpc/xprtrdma/svc_rdma_rw.c __be32 *seg; seg 413 net/sunrpc/xprtrdma/svc_rdma_rw.c seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz; seg 422 net/sunrpc/xprtrdma/svc_rdma_rw.c seg_handle = be32_to_cpup(seg); seg 423 net/sunrpc/xprtrdma/svc_rdma_rw.c seg_length = be32_to_cpup(seg + 1); seg 424 net/sunrpc/xprtrdma/svc_rdma_rw.c xdr_decode_hyper(seg + 2, &seg_offset); seg 445 net/sunrpc/xprtrdma/svc_rdma_rw.c seg += 4; seg 555 net/sunrpc/xprtrdma/xprt_rdma.h struct rpcrdma_mr_seg *seg, seg 86 sound/core/oss/mulaw.c int seg; seg 101 sound/core/oss/mulaw.c seg = val_seg(pcm_val); seg 107 sound/core/oss/mulaw.c uval = (seg << 4) | ((pcm_val >> (seg + 3)) & 0xF); seg 1920 sound/pci/cs46xx/dsp_spos.c struct dsp_segment_desc *seg; seg 1923 sound/pci/cs46xx/dsp_spos.c seg = get_segment_desc(module, SEGTYPE_SP_PARAMETER); seg 1924 sound/pci/cs46xx/dsp_spos.c err = dsp_load_parameter(chip, seg); seg 1928 sound/pci/cs46xx/dsp_spos.c seg = get_segment_desc(module, SEGTYPE_SP_SAMPLE); seg 1929 sound/pci/cs46xx/dsp_spos.c err = dsp_load_sample(chip, seg); seg 1933 sound/pci/cs46xx/dsp_spos.c seg = get_segment_desc(module, SEGTYPE_SP_PROGRAM); seg 1934 sound/pci/cs46xx/dsp_spos.c if (!seg) seg 1937 sound/pci/cs46xx/dsp_spos.c doffset = seg->offset * 4 + module->load_address * 4 seg 1939 sound/pci/cs46xx/dsp_spos.c dsize = seg->size * 4; seg 3120 tools/lib/bpf/libbpf.c const struct btf_ext_info *seg; seg 3144 tools/lib/bpf/libbpf.c seg = &obj->btf_ext->offset_reloc_info; seg 3145 tools/lib/bpf/libbpf.c for_each_btf_ext_sec(seg, sec) { seg 3162 tools/lib/bpf/libbpf.c for_each_btf_ext_rec(seg, sec, i, rec) { seg 81 tools/lib/bpf/libbpf_internal.h #define for_each_btf_ext_sec(seg, sec) \ seg 82 tools/lib/bpf/libbpf_internal.h for (sec = (seg)->info; \ seg 83 tools/lib/bpf/libbpf_internal.h (void *)sec < (seg)->info + (seg)->len; \ seg 85 tools/lib/bpf/libbpf_internal.h (seg)->rec_size * sec->num_info) seg 87 tools/lib/bpf/libbpf_internal.h #define for_each_btf_ext_rec(seg, sec, i, rec) \ seg 90 tools/lib/bpf/libbpf_internal.h i++, rec = (void *)rec + (seg)->rec_size) seg 289 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c struct ip6_addr_t *seg; seg 303 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c seg = (struct ip6_addr_t *)((char *)srh + sizeof(*srh)); seg 307 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c seg->lo = bpf_cpu_to_be64(4 - lo); seg 308 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c seg->hi = bpf_cpu_to_be64(hi); seg 309 tools/testing/selftests/bpf/progs/test_lwt_seg6local.c seg = (struct ip6_addr_t *)((char *)seg + sizeof(*seg)); seg 2073 virt/kvm/kvm_main.c int seg; seg 2077 virt/kvm/kvm_main.c while ((seg = next_segment(len, offset)) != 0) { seg 2078 virt/kvm/kvm_main.c ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); seg 2082 virt/kvm/kvm_main.c len -= seg; seg 2083 virt/kvm/kvm_main.c data += seg; seg 2093 virt/kvm/kvm_main.c int seg; seg 2097 virt/kvm/kvm_main.c while ((seg = next_segment(len, offset)) != 0) { seg 2098 virt/kvm/kvm_main.c ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); seg 2102 virt/kvm/kvm_main.c len -= seg; seg 2103 virt/kvm/kvm_main.c data += seg; seg 2187 virt/kvm/kvm_main.c int seg; seg 2191 virt/kvm/kvm_main.c while ((seg = next_segment(len, offset)) != 0) { seg 2192 virt/kvm/kvm_main.c ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); seg 2196 virt/kvm/kvm_main.c len -= seg; seg 2197 virt/kvm/kvm_main.c data += seg; seg 2208 virt/kvm/kvm_main.c int seg; seg 2212 virt/kvm/kvm_main.c while ((seg = next_segment(len, offset)) != 0) { seg 2213 virt/kvm/kvm_main.c ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); seg 2217 virt/kvm/kvm_main.c len -= seg; seg 2218 virt/kvm/kvm_main.c data += seg; seg 2342 virt/kvm/kvm_main.c int seg; seg 2346 virt/kvm/kvm_main.c while ((seg = next_segment(len, offset)) != 0) { seg 2347 virt/kvm/kvm_main.c ret = kvm_clear_guest_page(kvm, gfn, offset, seg); seg 2351 virt/kvm/kvm_main.c len -= seg;