/linux-4.1.27/tools/testing/selftests/memory-hotplug/ |
H A D | Makefile | 5 TEST_PROGS := mem-on-off-test.sh 6 override RUN_TESTS := ./mem-on-off-test.sh -r 2 || echo "selftests: memory-hotplug [FAIL]" 10 @/bin/bash ./mem-on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
|
/linux-4.1.27/tools/testing/selftests/cpu-hotplug/ |
H A D | Makefile | 3 TEST_PROGS := cpu-on-off-test.sh 8 @/bin/bash ./cpu-on-off-test.sh -a || echo "cpu-hotplug selftests: [FAIL]"
|
/linux-4.1.27/arch/sh/include/mach-sdk7786/mach/ |
H A D | irq.h | 4 /* arch/sh/boards/mach-sdk7786/irq.c */
|
/linux-4.1.27/tools/testing/selftests/firmware/ |
H A D | Makefile | 6 TEST_PROGS := fw_filesystem.sh fw_userhelper.sh
|
/linux-4.1.27/arch/sh/kernel/vsyscall/ |
H A D | vsyscall-syscall.S | 7 .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so"
|
H A D | vsyscall.lds.S | 9 OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux") 13 OUTPUT_ARCH(sh)
|
/linux-4.1.27/arch/sh/include/cpu-sh4a/cpu/ |
H A D | serial.h | 4 /* arch/sh/kernel/cpu/sh4a/serial-sh7722.c */
|
/linux-4.1.27/tools/testing/selftests/efivarfs/ |
H A D | Makefile | 7 TEST_PROGS := efivarfs.sh
|
/linux-4.1.27/drivers/scsi/ |
H A D | aha1542.c | 185 static int aha1542_test_port(struct Scsi_Host *sh) aha1542_test_port() argument 191 if (inb(STATUS(sh->io_port)) == 0xff) aha1542_test_port() 197 aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ aha1542_test_port() 199 outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port)); aha1542_test_port() 204 if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) aha1542_test_port() 208 if (inb(INTRFLAGS(sh->io_port)) & INTRMASK) aha1542_test_port() 214 aha1542_outb(sh->io_port, CMD_INQUIRY); aha1542_test_port() 217 if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0)) aha1542_test_port() 219 inquiry_result[i] = inb(DATA(sh->io_port)); aha1542_test_port() 223 if (inb(STATUS(sh->io_port)) & DF) aha1542_test_port() 227 if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0)) aha1542_test_port() 231 outb(IRST, CONTROL(sh->io_port)); aha1542_test_port() 238 struct Scsi_Host *sh = dev_id; aha1542_interrupt() local 239 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_interrupt() 251 flag = inb(INTRFLAGS(sh->io_port)); aha1542_interrupt() 252 shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: "); aha1542_interrupt() 263 printk("status %02x\n", inb(STATUS(sh->io_port))); aha1542_interrupt() 268 spin_lock_irqsave(sh->host_lock, flags); aha1542_interrupt() 270 flag = inb(INTRFLAGS(sh->io_port)); aha1542_interrupt() 284 aha1542_intr_reset(sh->io_port); aha1542_interrupt() 299 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_interrupt() 302 shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n"); aha1542_interrupt() 313 shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n", aha1542_interrupt() 321 shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi); aha1542_interrupt() 327 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_interrupt() 328 shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n"); aha1542_interrupt() 329 shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat, aha1542_interrupt() 355 shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus, aha1542_interrupt() 370 static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) aha1542_queuecommand() argument 372 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_queuecommand() 396 shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d", aha1542_queuecommand() 411 spin_lock_irqsave(sh->host_lock, flags); aha1542_queuecommand() 433 shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done); aha1542_queuecommand() 464 shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr); 484 spin_unlock_irqrestore(sh->host_lock, flags); 490 static void setup_mailboxes(struct Scsi_Host *sh) setup_mailboxes() argument 492 struct aha1542_hostdata *aha1542 = shost_priv(sh); setup_mailboxes() 503 aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ setup_mailboxes() 505 if (aha1542_out(sh->io_port, mb_cmd, 5)) setup_mailboxes() 506 shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n"); setup_mailboxes() 507 aha1542_intr_reset(sh->io_port); setup_mailboxes() 510 static int aha1542_getconfig(struct Scsi_Host *sh) aha1542_getconfig() argument 514 i = inb(STATUS(sh->io_port)); aha1542_getconfig() 516 i = inb(DATA(sh->io_port)); aha1542_getconfig() 518 aha1542_outb(sh->io_port, CMD_RETCONF); aha1542_getconfig() 519 aha1542_in(sh->io_port, inquiry_result, 3, 0); aha1542_getconfig() 520 if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) aha1542_getconfig() 521 shost_printk(KERN_ERR, sh, "error querying board settings\n"); aha1542_getconfig() 522 aha1542_intr_reset(sh->io_port); aha1542_getconfig() 525 sh->dma_channel = 7; aha1542_getconfig() 528 sh->dma_channel = 6; aha1542_getconfig() 531 sh->dma_channel = 5; aha1542_getconfig() 534 sh->dma_channel = 0; aha1542_getconfig() 539 sh->dma_channel = 0xFF; aha1542_getconfig() 542 shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n"); aha1542_getconfig() 547 sh->irq = 15; aha1542_getconfig() 550 sh->irq = 14; aha1542_getconfig() 553 sh->irq = 12; aha1542_getconfig() 556 sh->irq = 11; aha1542_getconfig() 559 sh->irq = 10; aha1542_getconfig() 562 sh->irq = 9; aha1542_getconfig() 565 shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n"); aha1542_getconfig() 568 sh->this_id = inquiry_result[2] & 7; aha1542_getconfig() 575 static int aha1542_mbenable(struct Scsi_Host *sh) aha1542_mbenable() argument 583 aha1542_outb(sh->io_port, CMD_EXTBIOS); aha1542_mbenable() 584 if (aha1542_in(sh->io_port, mbenable_result, 2, 100)) aha1542_mbenable() 586 if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100)) aha1542_mbenable() 588 aha1542_intr_reset(sh->io_port); aha1542_mbenable() 598 if (aha1542_out(sh->io_port, mbenable_cmd, 3)) aha1542_mbenable() 603 shost_printk(KERN_ERR, sh, "Mailbox init failed\n"); aha1542_mbenable() 605 aha1542_intr_reset(sh->io_port); aha1542_mbenable() 610 static int aha1542_query(struct Scsi_Host *sh) aha1542_query() argument 612 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_query() 615 i = inb(STATUS(sh->io_port)); aha1542_query() 617 i = inb(DATA(sh->io_port)); aha1542_query() 619 aha1542_outb(sh->io_port, CMD_INQUIRY); aha1542_query() 620 aha1542_in(sh->io_port, inquiry_result, 4, 0); aha1542_query() 621 if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) aha1542_query() 622 shost_printk(KERN_ERR, sh, "error querying card type\n"); aha1542_query() 623 aha1542_intr_reset(sh->io_port); aha1542_query() 634 shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n"); aha1542_query() 641 aha1542->bios_translation = aha1542_mbenable(sh); aha1542_query() 665 static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed) aha1542_set_bus_times() argument 670 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times() 671 if (aha1542_out(sh->io_port, oncmd, 2)) aha1542_set_bus_times() 678 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times() 679 if (aha1542_out(sh->io_port, offcmd, 2)) aha1542_set_bus_times() 686 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times() 687 if (aha1542_out(sh->io_port, dmacmd, 2)) aha1542_set_bus_times() 690 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times() 693 shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n"); aha1542_set_bus_times() 694 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times() 701 struct Scsi_Host *sh; aha1542_hw_init() local 711 sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata)); aha1542_hw_init() 712 if (!sh) aha1542_hw_init() 714 aha1542 = shost_priv(sh); aha1542_hw_init() 716 sh->unique_id = base_io; aha1542_hw_init() 717 sh->io_port = base_io; aha1542_hw_init() 718 sh->n_io_port = AHA1542_REGION_SIZE; aha1542_hw_init() 722 if (!aha1542_test_port(sh)) aha1542_hw_init() 725 aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]); aha1542_hw_init() 726 if (aha1542_query(sh)) aha1542_hw_init() 728 if (aha1542_getconfig(sh) == -1) aha1542_hw_init() 731 if (sh->dma_channel != 0xFF) aha1542_hw_init() 732 snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel); aha1542_hw_init() 733 shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n", aha1542_hw_init() 734 sh->this_id, base_io, sh->irq, dma_info); aha1542_hw_init() 736 shost_printk(KERN_INFO, sh, "Using extended bios translation\n"); aha1542_hw_init() 738 setup_mailboxes(sh); aha1542_hw_init() 740 if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) { aha1542_hw_init() 741 shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n"); aha1542_hw_init() 744 if (sh->dma_channel != 0xFF) { aha1542_hw_init() 745 if (request_dma(sh->dma_channel, "aha1542")) { aha1542_hw_init() 746 shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n"); aha1542_hw_init() 749 if (sh->dma_channel == 0 || sh->dma_channel >= 5) { aha1542_hw_init() 750 set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE); aha1542_hw_init() 751 enable_dma(sh->dma_channel); aha1542_hw_init() 755 if (scsi_add_host(sh, pdev)) aha1542_hw_init() 758 scsi_scan_host(sh); aha1542_hw_init() 760 return sh; aha1542_hw_init() 762 if (sh->dma_channel != 0xff) aha1542_hw_init() 763 free_dma(sh->dma_channel); aha1542_hw_init() 765 free_irq(sh->irq, sh); aha1542_hw_init() 767 scsi_host_put(sh); aha1542_hw_init() 774 static int aha1542_release(struct Scsi_Host *sh) aha1542_release() argument 776 scsi_remove_host(sh); aha1542_release() 777 if (sh->dma_channel != 0xff) aha1542_release() 778 free_dma(sh->dma_channel); aha1542_release() 779 if (sh->irq) aha1542_release() 780 free_irq(sh->irq, sh); aha1542_release() 781 if (sh->io_port && sh->n_io_port) aha1542_release() 782 release_region(sh->io_port, sh->n_io_port); aha1542_release() 783 scsi_host_put(sh); aha1542_release() 794 struct Scsi_Host *sh = cmd->device->host; aha1542_dev_reset() local 795 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_dev_reset() 803 spin_lock_irqsave(sh->host_lock, flags); aha1542_dev_reset() 840 aha1542_outb(sh->io_port, CMD_START_SCSI); aha1542_dev_reset() 841 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_dev_reset() 851 struct Scsi_Host *sh = cmd->device->host; aha1542_reset() local 852 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_reset() 856 spin_lock_irqsave(sh->host_lock, flags); aha1542_reset() 867 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_reset() 907 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_reset() 960 struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev); aha1542_isa_match() local 962 if (!sh) aha1542_isa_match() 965 dev_set_drvdata(pdev, sh); aha1542_isa_match() 996 struct Scsi_Host *sh; aha1542_pnp_probe() local 1013 sh = aha1542_hw_init(&driver_template, &pdev->dev, indx); aha1542_pnp_probe() 1014 if (!sh) aha1542_pnp_probe() 1017 pnp_set_drvdata(pdev, sh); aha1542_pnp_probe()
|
H A D | eata_pio.c | 113 static int eata_pio_release(struct Scsi_Host *sh) eata_pio_release() argument 115 hostdata *hd = SD(sh); eata_pio_release() 116 if (sh->irq && reg_IRQ[sh->irq] == 1) eata_pio_release() 117 free_irq(sh->irq, NULL); eata_pio_release() 119 reg_IRQ[sh->irq]--; eata_pio_release() 120 if (SD(sh)->channel == 0) { eata_pio_release() 121 if (sh->io_port && sh->n_io_port) eata_pio_release() 122 release_region(sh->io_port, sh->n_io_port); eata_pio_release() 166 struct Scsi_Host *sh; eata_pio_int_handler() local 171 for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) eata_pio_int_handler() 173 if (sh->irq != irq) eata_pio_int_handler() 175 if (inb(sh->base + HA_RSTATUS) & HA_SBUSY) eata_pio_int_handler() 181 hd = SD(sh); eata_pio_int_handler() 309 struct Scsi_Host *sh; eata_pio_queue_lck() local 315 sh = cmd->device->host; eata_pio_queue_lck() 316 base = sh->base; eata_pio_queue_lck() 324 DBG(DBG_QUEUE, printk(KERN_EMERG "can_queue %d, x %d, y %d\n", sh->can_queue, x, y)); eata_pio_queue_lck() 399 "slot %d irq %d\n", sh->base, cmd, y, sh->irq)); eata_pio_queue_lck() 662 struct Scsi_Host *sh; register_pio_HBA() local 685 sh = scsi_register(&driver_template, size); register_pio_HBA() 686 if (sh == NULL) register_pio_HBA() 690 if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) { register_pio_HBA() 706 hd = SD(sh); register_pio_HBA() 711 strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor)); register_pio_HBA() 712 strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name)); register_pio_HBA() 713 SD(sh)->revision[0] = buff[32]; register_pio_HBA() 714 SD(sh)->revision[1] = buff[33]; register_pio_HBA() 715 SD(sh)->revision[2] = buff[34]; register_pio_HBA() 716 SD(sh)->revision[3] = '.'; register_pio_HBA() 717 SD(sh)->revision[4] = buff[35]; register_pio_HBA() 718 SD(sh)->revision[5] = 0; register_pio_HBA() 722 SD(sh)->EATA_revision = 'a'; register_pio_HBA() 725 SD(sh)->EATA_revision = 'b'; register_pio_HBA() 728 SD(sh)->EATA_revision = 'c'; register_pio_HBA() 731 SD(sh)->EATA_revision = 'z'; register_pio_HBA() 733 SD(sh)->EATA_revision = '?'; register_pio_HBA() 752 SD(sh)->cplen = cplen; register_pio_HBA() 753 SD(sh)->cppadlen = cppadlen; register_pio_HBA() 754 SD(sh)->hostid = gc->scsi_id[3]; register_pio_HBA() 755 SD(sh)->devflags = 1 << gc->scsi_id[3]; register_pio_HBA() 756 SD(sh)->moresupport = gc->MORE_support; register_pio_HBA() 757 sh->unique_id = base; register_pio_HBA() 758 sh->base = base; register_pio_HBA() 759 sh->io_port = base; register_pio_HBA() 760 sh->n_io_port = 9; register_pio_HBA() 761 sh->irq = gc->IRQ; register_pio_HBA() 762 sh->dma_channel = PIO; register_pio_HBA() 763 sh->this_id = gc->scsi_id[3]; register_pio_HBA() 764 sh->can_queue = 1; register_pio_HBA() 765 sh->cmd_per_lun = 1; register_pio_HBA() 766 sh->sg_tablesize = SG_ALL; register_pio_HBA() 772 sh->max_id = 8; register_pio_HBA() 773 sh->max_lun = 8; register_pio_HBA() 783 SD(hd->prev)->next = sh; register_pio_HBA() 784 last_HBA = sh; register_pio_HBA() 786 first_HBA = sh; register_pio_HBA()
|
H A D | u14-34f.c | 609 static struct Scsi_Host *sh[MAX_BOARDS + 1]; variable in typeref:struct:Scsi_Host 630 #define HD(board) ((struct hostdata *) &sh[board]->hostdata) 763 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { board_inquiry() 771 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); board_inquiry() 774 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); board_inquiry() 777 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); board_inquiry() 891 sh[j] = scsi_register(tpnt, sizeof(struct hostdata)); port_detect() 894 if (sh[j] == NULL) { port_detect() 899 sh[j]->io_port = port_base; port_detect() 900 sh[j]->unique_id = port_base; port_detect() 901 sh[j]->n_io_port = REGION_SIZE; port_detect() 902 sh[j]->base = bios_segment_table[config_1.bios_segment]; port_detect() 903 sh[j]->irq = irq; port_detect() 904 sh[j]->sg_tablesize = MAX_SGLIST; port_detect() 905 sh[j]->this_id = config_2.ha_scsi_id; port_detect() 906 sh[j]->can_queue = MAX_MAILBOXES; port_detect() 907 sh[j]->cmd_per_lun = MAX_CMD_PER_LUN; port_detect() 913 sys_mask = inb(sh[j]->io_port + REG_SYS_MASK); port_detect() 914 lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK); port_detect() 920 if (sh[j]->this_id == 0) sh[j]->this_id = -1; port_detect() 923 if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK); port_detect() 932 if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST; port_detect() 935 sh[j]->unchecked_isa_dma = FALSE; port_detect() 936 sh[j]->dma_channel = NO_DMA; port_detect() 942 sh[j]->unchecked_isa_dma = TRUE; port_detect() 951 sh[j]->dma_channel = dma_channel; port_detect() 956 sh[j]->max_channel = MAX_CHANNEL - 1; port_detect() 957 sh[j]->max_id = MAX_TARGET; port_detect() 958 sh[j]->max_lun = MAX_LUN; port_detect() 967 sh[j]->hostt->use_clustering = DISABLE_CLUSTERING; port_detect() 968 sh[j]->sg_tablesize = MAX_SAFE_SGLIST; port_detect() 977 for (i = 0; i < sh[j]->can_queue; i++) port_detect() 981 for (i = 0; i < sh[j]->can_queue; i++) port_detect() 983 sh[j]->sg_tablesize * sizeof(struct sg_list), port_detect() 984 (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) { port_detect() 1005 BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base, port_detect() 1006 sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue); port_detect() 1008 if (sh[j]->max_id > 8 || sh[j]->max_lun > 8) port_detect() 1010 BN(j), sh[j]->max_id, sh[j]->max_lun); port_detect() 1012 for (i = 0; i <= sh[j]->max_channel; i++) port_detect() 1014 BN(j), i, sh[j]->this_id); port_detect() 1029 u14_34f_release(sh[j]); port_detect() 1099 for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL; u14_34f_detect() 1265 for (k = 0; k < sh[j]->can_queue; k++, i++) { u14_34f_queuecommand_lck() 1267 if (i >= sh[j]->can_queue) i = 0; u14_34f_queuecommand_lck() 1275 if (k == sh[j]->can_queue) { u14_34f_queuecommand_lck() 1313 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { u14_34f_queuecommand_lck() 1322 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); u14_34f_queuecommand_lck() 1325 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); u14_34f_queuecommand_lck() 1346 if (i >= sh[j]->can_queue) u14_34f_eh_abort() 1349 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { u14_34f_eh_abort() 1366 if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED) u14_34f_eh_abort() 1404 spin_lock_irq(sh[j]->host_lock); u14_34f_eh_host_reset() 1411 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset() 1415 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { u14_34f_eh_host_reset() 1417 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset() 1423 for (c = 0; c <= sh[j]->max_channel; c++) u14_34f_eh_host_reset() 1424 for (k = 0; k < sh[j]->max_id; k++) { u14_34f_eh_host_reset() 1429 for (i = 0; i < sh[j]->can_queue; i++) { u14_34f_eh_host_reset() 1464 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { u14_34f_eh_host_reset() 1466 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset() 1470 outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR); u14_34f_eh_host_reset() 1479 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset() 1482 spin_lock_irq(sh[j]->host_lock); u14_34f_eh_host_reset() 1486 for (i = 0; i < sh[j]->can_queue; i++) { u14_34f_eh_host_reset() 1526 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset() 1684 for (k = 0; k < sh[j]->can_queue; k++) { flush_dev() 1702 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { flush_dev() 1711 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); flush_dev() 1712 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); flush_dev() 1723 int irq = sh[j]->irq; ihdlr() 1726 if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none; ihdlr() 1734 if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) { ihdlr() 1735 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); ihdlr() 1741 ret = inl(sh[j]->io_port + REG_ICM); ihdlr() 1744 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); ihdlr() 1747 for (i = 0; i < sh[j]->can_queue; i++) ihdlr() 1750 if (i >= sh[j]->can_queue) ihdlr() 1862 for (c = 0; c <= sh[j]->max_channel; c++) ihdlr() 1863 for (k = 0; k < sh[j]->max_id; k++) ihdlr() 1936 spin_lock_irqsave(sh[j]->host_lock, spin_flags); do_interrupt_handler() 1938 spin_unlock_irqrestore(sh[j]->host_lock, spin_flags); do_interrupt_handler() 1945 for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++); u14_34f_release() 1947 if (sh[j] == NULL) u14_34f_release() 1950 for (i = 0; i < sh[j]->can_queue; i++) u14_34f_release() 1953 for (i = 0; i < sh[j]->can_queue; i++) u14_34f_release() 1957 free_irq(sh[j]->irq, &sha[j]); u14_34f_release() 1959 if (sh[j]->dma_channel != NO_DMA) u14_34f_release() 1960 free_dma(sh[j]->dma_channel); u14_34f_release() 1962 release_region(sh[j]->io_port, sh[j]->n_io_port); u14_34f_release() 1963 scsi_unregister(sh[j]); u14_34f_release()
|
H A D | wd719x.c | 201 static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) wd719x_queuecommand() argument 206 struct wd719x *wd = shost_priv(sh); wd719x_queuecommand() 212 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_queuecommand() 218 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_queuecommand() 221 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_queuecommand() 225 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_queuecommand() 263 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_queuecommand() 288 spin_unlock_irqrestore(wd->sh->host_lock, flags); 297 spin_unlock_irqrestore(wd->sh->host_lock, flags); 474 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_abort() 477 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_abort() 493 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_reset() 496 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_reset() 521 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_host_reset() 533 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_host_reset() 658 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_interrupt() 666 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_interrupt() 703 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_interrupt() 803 static int wd719x_board_found(struct Scsi_Host *sh) wd719x_board_found() argument 805 struct wd719x *wd = shost_priv(sh); wd719x_board_found() 812 sh->base = pci_resource_start(wd->pdev, 0); wd719x_board_found() 816 wd->sh = sh; wd719x_board_found() 817 sh->irq = wd->pdev->irq; wd719x_board_found() 853 sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK; wd719x_board_found() 856 card_types[wd->type], sh->base, sh->irq, sh->this_id); wd719x_board_found() 892 struct Scsi_Host *sh; wd719x_pci_probe() local 914 sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x)); wd719x_pci_probe() 915 if (!sh) wd719x_pci_probe() 918 wd = shost_priv(sh); wd719x_pci_probe() 924 err = wd719x_board_found(sh); wd719x_pci_probe() 928 err = scsi_add_host(sh, &wd->pdev->dev); wd719x_pci_probe() 932 scsi_scan_host(sh); wd719x_pci_probe() 934 pci_set_drvdata(pdev, sh); wd719x_pci_probe() 942 scsi_host_put(sh); wd719x_pci_probe() 954 struct Scsi_Host *sh = pci_get_drvdata(pdev); wd719x_pci_remove() local 955 struct wd719x *wd = shost_priv(sh); wd719x_pci_remove() 957 scsi_remove_host(sh); wd719x_pci_remove() 963 scsi_host_put(sh); wd719x_pci_remove()
|
/linux-4.1.27/drivers/md/ |
H A D | raid5.c | 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 188 static inline int raid6_d0(struct stripe_head *sh) raid6_d0() argument 190 if (sh->ddf_layout) raid6_d0() 194 if (sh->qd_idx == sh->disks - 1) raid6_d0() 197 return sh->qd_idx + 1; raid6_d0() 210 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, raid6_idx_to_slot() argument 215 if (sh->ddf_layout) raid6_idx_to_slot() 217 if (idx == sh->pd_idx) raid6_idx_to_slot() 219 if (idx == sh->qd_idx) raid6_idx_to_slot() 221 if (!sh->ddf_layout) raid6_idx_to_slot() 243 static int stripe_operations_active(struct stripe_head *sh) stripe_operations_active() argument 245 return sh->check_state || sh->reconstruct_state || stripe_operations_active() 246 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || stripe_operations_active() 247 test_bit(STRIPE_COMPUTE_RUN, &sh->state); stripe_operations_active() 250 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) raid5_wakeup_stripe_thread() argument 252 struct r5conf *conf = sh->raid_conf; raid5_wakeup_stripe_thread() 255 int i, cpu = sh->cpu; raid5_wakeup_stripe_thread() 259 sh->cpu = cpu; raid5_wakeup_stripe_thread() 262 if (list_empty(&sh->lru)) { raid5_wakeup_stripe_thread() 265 list_add_tail(&sh->lru, &group->handle_list); raid5_wakeup_stripe_thread() 267 sh->group = group; raid5_wakeup_stripe_thread() 275 group = conf->worker_groups + cpu_to_group(sh->cpu); raid5_wakeup_stripe_thread() 279 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); raid5_wakeup_stripe_thread() 286 queue_work_on(sh->cpu, raid5_wq, raid5_wakeup_stripe_thread() 293 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, do_release_stripe() argument 296 BUG_ON(!list_empty(&sh->lru)); do_release_stripe() 298 if (test_bit(STRIPE_HANDLE, &sh->state)) { do_release_stripe() 299 if (test_bit(STRIPE_DELAYED, &sh->state) && do_release_stripe() 300 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) do_release_stripe() 301 list_add_tail(&sh->lru, &conf->delayed_list); do_release_stripe() 302 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && do_release_stripe() 303 sh->bm_seq - conf->seq_write > 0) do_release_stripe() 304 list_add_tail(&sh->lru, &conf->bitmap_list); do_release_stripe() 306 clear_bit(STRIPE_DELAYED, &sh->state); do_release_stripe() 307 clear_bit(STRIPE_BIT_DELAY, &sh->state); do_release_stripe() 309 list_add_tail(&sh->lru, &conf->handle_list); do_release_stripe() 311 raid5_wakeup_stripe_thread(sh); do_release_stripe() 317 BUG_ON(stripe_operations_active(sh)); do_release_stripe() 318 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) do_release_stripe() 323 if (!test_bit(STRIPE_EXPANDING, &sh->state)) do_release_stripe() 324 list_add_tail(&sh->lru, temp_inactive_list); do_release_stripe() 328 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, __release_stripe() argument 331 if (atomic_dec_and_test(&sh->count)) __release_stripe() 332 do_release_stripe(conf, sh, temp_inactive_list); __release_stripe() 386 struct stripe_head *sh; release_stripe_list() local 395 sh = llist_entry(head, struct stripe_head, release_list); release_stripe_list() 397 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ release_stripe_list() 399 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); release_stripe_list() 405 hash = sh->hash_lock_index; release_stripe_list() 406 __release_stripe(conf, sh, &temp_inactive_list[hash]); release_stripe_list() 413 static void release_stripe(struct stripe_head *sh) release_stripe() argument 415 struct r5conf *conf = sh->raid_conf; release_stripe() 423 if (atomic_add_unless(&sh->count, -1, 1)) release_stripe() 427 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) release_stripe() 429 wakeup = llist_add(&sh->release_list, &conf->released_stripes); release_stripe() 436 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { release_stripe() 438 hash = sh->hash_lock_index; release_stripe() 439 do_release_stripe(conf, sh, &list); release_stripe() 446 static inline void remove_hash(struct stripe_head *sh) remove_hash() argument 449 (unsigned long long)sh->sector); remove_hash() 451 hlist_del_init(&sh->hash); remove_hash() 454 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) insert_hash() argument 456 struct hlist_head *hp = stripe_hash(conf, sh->sector); insert_hash() 459 (unsigned long long)sh->sector); insert_hash() 461 hlist_add_head(&sh->hash, hp); insert_hash() 467 struct stripe_head *sh = NULL; get_free_stripe() local 473 sh = list_entry(first, struct stripe_head, lru); get_free_stripe() 475 remove_hash(sh); get_free_stripe() 477 BUG_ON(hash != sh->hash_lock_index); get_free_stripe() 481 return sh; get_free_stripe() 484 static void shrink_buffers(struct stripe_head *sh) shrink_buffers() argument 488 int num = sh->raid_conf->pool_size; shrink_buffers() 491 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); shrink_buffers() 492 p = sh->dev[i].page; shrink_buffers() 495 sh->dev[i].page = NULL; shrink_buffers() 500 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) grow_buffers() argument 503 int num = sh->raid_conf->pool_size; grow_buffers() 511 sh->dev[i].page = page; grow_buffers() 512 sh->dev[i].orig_page = page; grow_buffers() 517 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 519 struct stripe_head *sh); 521 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) init_stripe() argument 523 struct r5conf *conf = sh->raid_conf; init_stripe() 526 BUG_ON(atomic_read(&sh->count) != 0); init_stripe() 527 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); init_stripe() 528 BUG_ON(stripe_operations_active(sh)); init_stripe() 529 BUG_ON(sh->batch_head); init_stripe() 535 sh->generation = conf->generation - previous; init_stripe() 536 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; init_stripe() 537 sh->sector = sector; init_stripe() 538 stripe_set_idx(sector, conf, previous, sh); init_stripe() 539 sh->state = 0; init_stripe() 541 for (i = sh->disks; i--; ) { init_stripe() 542 struct r5dev *dev = &sh->dev[i]; init_stripe() 547 (unsigned long long)sh->sector, i, dev->toread, init_stripe() 553 raid5_build_block(sh, i, previous); init_stripe() 557 sh->overwrite_disks = 0; init_stripe() 558 insert_hash(conf, sh); init_stripe() 559 sh->cpu = smp_processor_id(); init_stripe() 560 set_bit(STRIPE_BATCH_READY, &sh->state); init_stripe() 566 struct stripe_head *sh; __find_stripe() local 569 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) __find_stripe() 570 if (sh->sector == sector && sh->generation == generation) __find_stripe() 571 return sh; __find_stripe() 662 struct stripe_head *sh; get_active_stripe() local 673 sh = __find_stripe(conf, sector, conf->generation - previous); get_active_stripe() 674 if (!sh) { get_active_stripe() 676 sh = get_free_stripe(conf, hash); get_active_stripe() 677 if (!sh && llist_empty(&conf->released_stripes) && get_active_stripe() 682 if (noblock && sh == NULL) get_active_stripe() 684 if (!sh) { get_active_stripe() 698 init_stripe(sh, sector, previous); get_active_stripe() 699 atomic_inc(&sh->count); get_active_stripe() 701 } else if (!atomic_inc_not_zero(&sh->count)) { get_active_stripe() 703 if (!atomic_read(&sh->count)) { get_active_stripe() 704 if (!test_bit(STRIPE_HANDLE, &sh->state)) get_active_stripe() 706 BUG_ON(list_empty(&sh->lru) && get_active_stripe() 707 !test_bit(STRIPE_EXPANDING, &sh->state)); get_active_stripe() 708 list_del_init(&sh->lru); get_active_stripe() 709 if (sh->group) { get_active_stripe() 710 sh->group->stripes_cnt--; get_active_stripe() 711 sh->group = NULL; get_active_stripe() 714 atomic_inc(&sh->count); get_active_stripe() 717 } while (sh == NULL); get_active_stripe() 720 return sh; get_active_stripe() 723 static bool is_full_stripe_write(struct stripe_head *sh) is_full_stripe_write() argument 725 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); is_full_stripe_write() 726 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); is_full_stripe_write() 749 static bool stripe_can_batch(struct stripe_head *sh) stripe_can_batch() argument 751 return test_bit(STRIPE_BATCH_READY, &sh->state) && stripe_can_batch() 752 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && stripe_can_batch() 753 is_full_stripe_write(sh); stripe_can_batch() 757 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) stripe_add_to_batch_list() argument 764 if (!stripe_can_batch(sh)) stripe_add_to_batch_list() 767 tmp_sec = sh->sector; stripe_add_to_batch_list() 770 head_sector = sh->sector - STRIPE_SECTORS; stripe_add_to_batch_list() 798 lock_two_stripes(head, sh); stripe_add_to_batch_list() 800 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) stripe_add_to_batch_list() 803 if (sh->batch_head) stripe_add_to_batch_list() 807 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) stripe_add_to_batch_list() 809 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) stripe_add_to_batch_list() 824 list_add(&sh->batch_list, &head->batch_list); stripe_add_to_batch_list() 827 sh->batch_head = head->batch_head; stripe_add_to_batch_list() 830 sh->batch_head = head->batch_head; stripe_add_to_batch_list() 832 list_add_tail(&sh->batch_list, &head->batch_list); stripe_add_to_batch_list() 836 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) stripe_add_to_batch_list() 841 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { stripe_add_to_batch_list() 842 int seq = sh->bm_seq; stripe_add_to_batch_list() 843 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && stripe_add_to_batch_list() 844 sh->batch_head->bm_seq > seq) stripe_add_to_batch_list() 845 seq = sh->batch_head->bm_seq; stripe_add_to_batch_list() 846 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); stripe_add_to_batch_list() 847 sh->batch_head->bm_seq = seq; stripe_add_to_batch_list() 850 atomic_inc(&sh->count); stripe_add_to_batch_list() 852 unlock_two_stripes(head, sh); stripe_add_to_batch_list() 860 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) use_new_offset() argument 870 if (sh->generation == conf->generation - 1) use_new_offset() 883 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ops_run_io() argument 885 struct r5conf *conf = sh->raid_conf; ops_run_io() 886 int i, disks = sh->disks; ops_run_io() 887 struct stripe_head *head_sh = sh; ops_run_io() 897 sh = head_sh; ops_run_io() 898 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { ops_run_io() 899 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) ops_run_io() 903 if (test_bit(R5_Discard, &sh->dev[i].flags)) ops_run_io() 905 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) ops_run_io() 908 &sh->dev[i].flags)) { ops_run_io() 913 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) ops_run_io() 917 bi = &sh->dev[i].req; ops_run_io() 918 rbi = &sh->dev[i].rreq; /* For writing to replacement */ ops_run_io() 958 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, ops_run_io() 992 set_bit(STRIPE_IO_STARTED, &sh->state); ops_run_io() 1000 bi->bi_private = sh; ops_run_io() 1003 __func__, (unsigned long long)sh->sector, ops_run_io() 1005 atomic_inc(&sh->count); ops_run_io() 1006 if (sh != head_sh) ops_run_io() 1008 if (use_new_offset(conf, sh)) ops_run_io() 1009 bi->bi_iter.bi_sector = (sh->sector ops_run_io() 1012 bi->bi_iter.bi_sector = (sh->sector ops_run_io() 1017 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) ops_run_io() 1018 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); ops_run_io() 1019 sh->dev[i].vec.bv_page = sh->dev[i].page; ops_run_io() 1031 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); ops_run_io() 1036 sh->dev[i].sector); ops_run_io() 1044 set_bit(STRIPE_IO_STARTED, &sh->state); ops_run_io() 1051 rbi->bi_private = sh; ops_run_io() 1055 __func__, (unsigned long long)sh->sector, ops_run_io() 1057 atomic_inc(&sh->count); ops_run_io() 1058 if (sh != head_sh) ops_run_io() 1060 if (use_new_offset(conf, sh)) ops_run_io() 1061 rbi->bi_iter.bi_sector = (sh->sector ops_run_io() 1064 rbi->bi_iter.bi_sector = (sh->sector ops_run_io() 1066 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) ops_run_io() 1067 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); ops_run_io() 1068 sh->dev[i].rvec.bv_page = sh->dev[i].page; ops_run_io() 1082 sh->dev[i].sector); ops_run_io() 1087 set_bit(STRIPE_DEGRADED, &sh->state); ops_run_io() 1089 bi->bi_rw, i, (unsigned long long)sh->sector); ops_run_io() 1090 clear_bit(R5_LOCKED, &sh->dev[i].flags); ops_run_io() 1091 set_bit(STRIPE_HANDLE, &sh->state); ops_run_io() 1096 sh = list_first_entry(&sh->batch_list, struct stripe_head, ops_run_io() 1098 if (sh != head_sh) ops_run_io() 1106 struct stripe_head *sh) async_copy_data() 1144 if (sh->raid_conf->skip_copy && bio_for_each_segment() 1168 struct stripe_head *sh = stripe_head_ref; ops_complete_biofill() local 1173 (unsigned long long)sh->sector); ops_complete_biofill() 1176 for (i = sh->disks; i--; ) { ops_complete_biofill() 1177 struct r5dev *dev = &sh->dev[i]; ops_complete_biofill() 1201 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); ops_complete_biofill() 1205 set_bit(STRIPE_HANDLE, &sh->state); ops_complete_biofill() 1206 release_stripe(sh); ops_complete_biofill() 1209 static void ops_run_biofill(struct stripe_head *sh) ops_run_biofill() argument 1215 BUG_ON(sh->batch_head); ops_run_biofill() 1217 (unsigned long long)sh->sector); ops_run_biofill() 1219 for (i = sh->disks; i--; ) { ops_run_biofill() 1220 struct r5dev *dev = &sh->dev[i]; ops_run_biofill() 1223 spin_lock_irq(&sh->stripe_lock); ops_run_biofill() 1226 spin_unlock_irq(&sh->stripe_lock); ops_run_biofill() 1230 dev->sector, tx, sh); ops_run_biofill() 1236 atomic_inc(&sh->count); ops_run_biofill() 1237 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); ops_run_biofill() 1241 static void mark_target_uptodate(struct stripe_head *sh, int target) mark_target_uptodate() argument 1248 tgt = &sh->dev[target]; mark_target_uptodate() 1256 struct stripe_head *sh = stripe_head_ref; ops_complete_compute() local 1259 (unsigned long long)sh->sector); ops_complete_compute() 1262 mark_target_uptodate(sh, sh->ops.target); ops_complete_compute() 1263 mark_target_uptodate(sh, sh->ops.target2); ops_complete_compute() 1265 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); ops_complete_compute() 1266 if (sh->check_state == check_state_compute_run) ops_complete_compute() 1267 sh->check_state = check_state_compute_result; ops_complete_compute() 1268 set_bit(STRIPE_HANDLE, &sh->state); ops_complete_compute() 1269 release_stripe(sh); ops_complete_compute() 1273 static addr_conv_t *to_addr_conv(struct stripe_head *sh, to_addr_conv() argument 1279 return addr + sizeof(struct page *) * (sh->disks + 2); to_addr_conv() 1292 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) ops_run_compute5() argument 1294 int disks = sh->disks; ops_run_compute5() 1296 int target = sh->ops.target; ops_run_compute5() 1297 struct r5dev *tgt = &sh->dev[target]; ops_run_compute5() 1304 BUG_ON(sh->batch_head); ops_run_compute5() 1307 __func__, (unsigned long long)sh->sector, target); ops_run_compute5() 1312 xor_srcs[count++] = sh->dev[i].page; ops_run_compute5() 1314 atomic_inc(&sh->count); ops_run_compute5() 1317 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); ops_run_compute5() 1327 * @srcs - (struct page *) array of size sh->disks 1328 * @sh - stripe_head to parse 1336 struct stripe_head *sh, set_syndrome_sources() 1339 int disks = sh->disks; set_syndrome_sources() 1340 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); set_syndrome_sources() 1341 int d0_idx = raid6_d0(sh); set_syndrome_sources() 1351 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); set_syndrome_sources() 1352 struct r5dev *dev = &sh->dev[i]; set_syndrome_sources() 1354 if (i == sh->qd_idx || i == sh->pd_idx || set_syndrome_sources() 1360 srcs[slot] = sh->dev[i].page; set_syndrome_sources() 1368 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) ops_run_compute6_1() argument 1370 int disks = sh->disks; ops_run_compute6_1() 1373 int qd_idx = sh->qd_idx; ops_run_compute6_1() 1381 BUG_ON(sh->batch_head); ops_run_compute6_1() 1382 if (sh->ops.target < 0) ops_run_compute6_1() 1383 target = sh->ops.target2; ops_run_compute6_1() 1384 else if (sh->ops.target2 < 0) ops_run_compute6_1() 1385 target = sh->ops.target; ops_run_compute6_1() 1391 __func__, (unsigned long long)sh->sector, target); ops_run_compute6_1() 1393 tgt = &sh->dev[target]; ops_run_compute6_1() 1397 atomic_inc(&sh->count); ops_run_compute6_1() 1400 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); ops_run_compute6_1() 1404 ops_complete_compute, sh, ops_run_compute6_1() 1405 to_addr_conv(sh, percpu, 0)); ops_run_compute6_1() 1413 blocks[count++] = sh->dev[i].page; ops_run_compute6_1() 1417 NULL, ops_complete_compute, sh, ops_run_compute6_1() 1418 to_addr_conv(sh, percpu, 0)); ops_run_compute6_1() 1426 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) ops_run_compute6_2() argument 1428 int i, count, disks = sh->disks; ops_run_compute6_2() 1429 int syndrome_disks = sh->ddf_layout ? disks : disks-2; ops_run_compute6_2() 1430 int d0_idx = raid6_d0(sh); ops_run_compute6_2() 1432 int target = sh->ops.target; ops_run_compute6_2() 1433 int target2 = sh->ops.target2; ops_run_compute6_2() 1434 struct r5dev *tgt = &sh->dev[target]; ops_run_compute6_2() 1435 struct r5dev *tgt2 = &sh->dev[target2]; ops_run_compute6_2() 1440 BUG_ON(sh->batch_head); ops_run_compute6_2() 1442 __func__, (unsigned long long)sh->sector, target, target2); ops_run_compute6_2() 1455 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); ops_run_compute6_2() 1457 blocks[slot] = sh->dev[i].page; ops_run_compute6_2() 1470 __func__, (unsigned long long)sh->sector, faila, failb); ops_run_compute6_2() 1472 atomic_inc(&sh->count); ops_run_compute6_2() 1479 ops_complete_compute, sh, ops_run_compute6_2() 1480 to_addr_conv(sh, percpu, 0)); ops_run_compute6_2() 1486 int qd_idx = sh->qd_idx; ops_run_compute6_2() 1498 blocks[count++] = sh->dev[i].page; ops_run_compute6_2() 1500 dest = sh->dev[data_target].page; ops_run_compute6_2() 1504 to_addr_conv(sh, percpu, 0)); ops_run_compute6_2() 1508 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); ops_run_compute6_2() 1510 ops_complete_compute, sh, ops_run_compute6_2() 1511 to_addr_conv(sh, percpu, 0)); ops_run_compute6_2() 1517 ops_complete_compute, sh, ops_run_compute6_2() 1518 to_addr_conv(sh, percpu, 0)); ops_run_compute6_2() 1535 struct stripe_head *sh = stripe_head_ref; ops_complete_prexor() local 1538 (unsigned long long)sh->sector); ops_complete_prexor() 1542 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, ops_run_prexor5() argument 1545 int disks = sh->disks; ops_run_prexor5() 1547 int count = 0, pd_idx = sh->pd_idx, i; ops_run_prexor5() 1551 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; ops_run_prexor5() 1553 BUG_ON(sh->batch_head); ops_run_prexor5() 1555 (unsigned long long)sh->sector); ops_run_prexor5() 1558 struct r5dev *dev = &sh->dev[i]; ops_run_prexor5() 1565 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); ops_run_prexor5() 1572 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, ops_run_prexor6() argument 1580 (unsigned long long)sh->sector); ops_run_prexor6() 1582 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); ops_run_prexor6() 1585 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); ops_run_prexor6() 1592 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) ops_run_biodrain() argument 1594 int disks = sh->disks; ops_run_biodrain() 1596 struct stripe_head *head_sh = sh; ops_run_biodrain() 1599 (unsigned long long)sh->sector); ops_run_biodrain() 1605 sh = head_sh; ops_run_biodrain() 1610 dev = &sh->dev[i]; ops_run_biodrain() 1611 spin_lock_irq(&sh->stripe_lock); ops_run_biodrain() 1614 sh->overwrite_disks = 0; ops_run_biodrain() 1617 spin_unlock_irq(&sh->stripe_lock); ops_run_biodrain() 1630 dev->sector, tx, sh); ops_run_biodrain() 1641 sh = list_first_entry(&sh->batch_list, ops_run_biodrain() 1644 if (sh == head_sh) ops_run_biodrain() 1656 struct stripe_head *sh = stripe_head_ref; ops_complete_reconstruct() local 1657 int disks = sh->disks; ops_complete_reconstruct() 1658 int pd_idx = sh->pd_idx; ops_complete_reconstruct() 1659 int qd_idx = sh->qd_idx; ops_complete_reconstruct() 1664 (unsigned long long)sh->sector); ops_complete_reconstruct() 1667 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); ops_complete_reconstruct() 1668 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); ops_complete_reconstruct() 1669 discard |= test_bit(R5_Discard, &sh->dev[i].flags); ops_complete_reconstruct() 1673 struct r5dev *dev = &sh->dev[i]; ops_complete_reconstruct() 1685 if (sh->reconstruct_state == reconstruct_state_drain_run) ops_complete_reconstruct() 1686 sh->reconstruct_state = reconstruct_state_drain_result; ops_complete_reconstruct() 1687 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) ops_complete_reconstruct() 1688 sh->reconstruct_state = reconstruct_state_prexor_drain_result; ops_complete_reconstruct() 1690 BUG_ON(sh->reconstruct_state != reconstruct_state_run); ops_complete_reconstruct() 1691 sh->reconstruct_state = reconstruct_state_result; ops_complete_reconstruct() 1694 set_bit(STRIPE_HANDLE, &sh->state); ops_complete_reconstruct() 1695 release_stripe(sh); ops_complete_reconstruct() 1699 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, ops_run_reconstruct5() argument 1702 int disks = sh->disks; ops_run_reconstruct5() 1705 int count, pd_idx = sh->pd_idx, i; ops_run_reconstruct5() 1710 struct stripe_head *head_sh = sh; ops_run_reconstruct5() 1714 (unsigned long long)sh->sector); ops_run_reconstruct5() 1716 for (i = 0; i < sh->disks; i++) { ops_run_reconstruct5() 1719 if (!test_bit(R5_Discard, &sh->dev[i].flags)) ops_run_reconstruct5() 1722 if (i >= sh->disks) { ops_run_reconstruct5() 1723 atomic_inc(&sh->count); ops_run_reconstruct5() 1724 set_bit(R5_Discard, &sh->dev[pd_idx].flags); ops_run_reconstruct5() 1725 ops_complete_reconstruct(sh); ops_run_reconstruct5() 1736 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; ops_run_reconstruct5() 1738 struct r5dev *dev = &sh->dev[i]; ops_run_reconstruct5() 1743 xor_dest = sh->dev[pd_idx].page; ops_run_reconstruct5() 1745 struct r5dev *dev = &sh->dev[i]; ops_run_reconstruct5() 1757 list_first_entry(&sh->batch_list, ops_run_reconstruct5() 1765 to_addr_conv(sh, percpu, j)); ops_run_reconstruct5() 1769 to_addr_conv(sh, percpu, j)); ops_run_reconstruct5() 1778 sh = list_first_entry(&sh->batch_list, struct stripe_head, ops_run_reconstruct5() 1785 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, ops_run_reconstruct6() argument 1791 struct stripe_head *head_sh = sh; ops_run_reconstruct6() 1796 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); ops_run_reconstruct6() 1798 for (i = 0; i < sh->disks; i++) { ops_run_reconstruct6() 1799 if (sh->pd_idx == i || sh->qd_idx == i) ops_run_reconstruct6() 1801 if (!test_bit(R5_Discard, &sh->dev[i].flags)) ops_run_reconstruct6() 1804 if (i >= sh->disks) { ops_run_reconstruct6() 1805 atomic_inc(&sh->count); ops_run_reconstruct6() 1806 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); ops_run_reconstruct6() 1807 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); ops_run_reconstruct6() 1808 ops_complete_reconstruct(sh); ops_run_reconstruct6() 1815 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { ops_run_reconstruct6() 1823 count = set_syndrome_sources(blocks, sh, synflags); ops_run_reconstruct6() 1825 list_first_entry(&sh->batch_list, ops_run_reconstruct6() 1831 head_sh, to_addr_conv(sh, percpu, j)); ops_run_reconstruct6() 1834 to_addr_conv(sh, percpu, j)); ops_run_reconstruct6() 1838 sh = list_first_entry(&sh->batch_list, struct stripe_head, ops_run_reconstruct6() 1846 struct stripe_head *sh = stripe_head_ref; ops_complete_check() local 1849 (unsigned long long)sh->sector); ops_complete_check() 1851 sh->check_state = check_state_check_result; ops_complete_check() 1852 set_bit(STRIPE_HANDLE, &sh->state); ops_complete_check() 1853 release_stripe(sh); ops_complete_check() 1856 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) ops_run_check_p() argument 1858 int disks = sh->disks; ops_run_check_p() 1859 int pd_idx = sh->pd_idx; ops_run_check_p() 1860 int qd_idx = sh->qd_idx; ops_run_check_p() 1869 (unsigned long long)sh->sector); ops_run_check_p() 1871 BUG_ON(sh->batch_head); ops_run_check_p() 1873 xor_dest = sh->dev[pd_idx].page; ops_run_check_p() 1878 xor_srcs[count++] = sh->dev[i].page; ops_run_check_p() 1882 to_addr_conv(sh, percpu, 0)); ops_run_check_p() 1884 &sh->ops.zero_sum_result, &submit); ops_run_check_p() 1886 atomic_inc(&sh->count); ops_run_check_p() 1887 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); ops_run_check_p() 1891 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) ops_run_check_pq() argument 1898 (unsigned long long)sh->sector, checkp); ops_run_check_pq() 1900 BUG_ON(sh->batch_head); ops_run_check_pq() 1901 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); ops_run_check_pq() 1905 atomic_inc(&sh->count); ops_run_check_pq() 1907 sh, to_addr_conv(sh, percpu, 0)); ops_run_check_pq() 1909 &sh->ops.zero_sum_result, percpu->spare_page, &submit); ops_run_check_pq() 1912 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) raid_run_ops() argument 1914 int overlap_clear = 0, i, disks = sh->disks; raid_run_ops() 1916 struct r5conf *conf = sh->raid_conf; raid_run_ops() 1924 ops_run_biofill(sh); raid_run_ops() 1930 tx = ops_run_compute5(sh, percpu); raid_run_ops() 1932 if (sh->ops.target2 < 0 || sh->ops.target < 0) raid_run_ops() 1933 tx = ops_run_compute6_1(sh, percpu); raid_run_ops() 1935 tx = ops_run_compute6_2(sh, percpu); raid_run_ops() 1944 tx = ops_run_prexor5(sh, percpu, tx); raid_run_ops() 1946 tx = ops_run_prexor6(sh, percpu, tx); raid_run_ops() 1950 tx = ops_run_biodrain(sh, tx); raid_run_ops() 1956 ops_run_reconstruct5(sh, percpu, tx); raid_run_ops() 1958 ops_run_reconstruct6(sh, percpu, tx); raid_run_ops() 1962 if (sh->check_state == check_state_run) raid_run_ops() 1963 ops_run_check_p(sh, percpu); raid_run_ops() 1964 else if (sh->check_state == check_state_run_q) raid_run_ops() 1965 ops_run_check_pq(sh, percpu, 0); raid_run_ops() 1966 else if (sh->check_state == check_state_run_pq) raid_run_ops() 1967 ops_run_check_pq(sh, percpu, 1); raid_run_ops() 1972 if (overlap_clear && !sh->batch_head) raid_run_ops() 1974 struct r5dev *dev = &sh->dev[i]; raid_run_ops() 1976 wake_up(&sh->raid_conf->wait_for_overlap); raid_run_ops() 1983 struct stripe_head *sh; alloc_stripe() local 1985 sh = kmem_cache_zalloc(sc, gfp); alloc_stripe() 1986 if (sh) { alloc_stripe() 1987 spin_lock_init(&sh->stripe_lock); alloc_stripe() 1988 spin_lock_init(&sh->batch_lock); alloc_stripe() 1989 INIT_LIST_HEAD(&sh->batch_list); alloc_stripe() 1990 INIT_LIST_HEAD(&sh->lru); alloc_stripe() 1991 atomic_set(&sh->count, 1); alloc_stripe() 1993 return sh; alloc_stripe() 1997 struct stripe_head *sh; grow_one_stripe() local 1999 sh = alloc_stripe(conf->slab_cache, gfp); grow_one_stripe() 2000 if (!sh) grow_one_stripe() 2003 sh->raid_conf = conf; grow_one_stripe() 2005 if (grow_buffers(sh, gfp)) { grow_one_stripe() 2006 shrink_buffers(sh); grow_one_stripe() 2007 kmem_cache_free(conf->slab_cache, sh); grow_one_stripe() 2010 sh->hash_lock_index = grow_one_stripe() 2015 release_stripe(sh); grow_one_stripe() 2259 struct stripe_head *sh; drop_one_stripe() local 2263 sh = get_free_stripe(conf, hash); drop_one_stripe() 2265 if (!sh) drop_one_stripe() 2267 BUG_ON(atomic_read(&sh->count)); drop_one_stripe() 2268 shrink_buffers(sh); drop_one_stripe() 2269 kmem_cache_free(conf->slab_cache, sh); drop_one_stripe() 2288 struct stripe_head *sh = bi->bi_private; raid5_end_read_request() local 2289 struct r5conf *conf = sh->raid_conf; raid5_end_read_request() 2290 int disks = sh->disks, i; raid5_end_read_request() 2297 if (bi == &sh->dev[i].req) raid5_end_read_request() 2301 (unsigned long long)sh->sector, i, atomic_read(&sh->count), raid5_end_read_request() 2307 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) raid5_end_read_request() 2317 if (use_new_offset(conf, sh)) raid5_end_read_request() 2318 s = sh->sector + rdev->new_data_offset; raid5_end_read_request() 2320 s = sh->sector + rdev->data_offset; raid5_end_read_request() 2322 set_bit(R5_UPTODATE, &sh->dev[i].flags); raid5_end_read_request() 2323 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { raid5_end_read_request() 2336 clear_bit(R5_ReadError, &sh->dev[i].flags); raid5_end_read_request() 2337 clear_bit(R5_ReWrite, &sh->dev[i].flags); raid5_end_read_request() 2338 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) raid5_end_read_request() 2339 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); raid5_end_read_request() 2348 clear_bit(R5_UPTODATE, &sh->dev[i].flags); raid5_end_read_request() 2350 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) raid5_end_read_request() 2367 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { raid5_end_read_request() 2385 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) raid5_end_read_request() 2388 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { raid5_end_read_request() 2389 set_bit(R5_ReadError, &sh->dev[i].flags); raid5_end_read_request() 2390 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); raid5_end_read_request() 2392 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); raid5_end_read_request() 2394 clear_bit(R5_ReadError, &sh->dev[i].flags); raid5_end_read_request() 2395 clear_bit(R5_ReWrite, &sh->dev[i].flags); raid5_end_read_request() 2399 rdev, sh->sector, STRIPE_SECTORS, 0))) raid5_end_read_request() 2404 clear_bit(R5_LOCKED, &sh->dev[i].flags); raid5_end_read_request() 2405 set_bit(STRIPE_HANDLE, &sh->state); raid5_end_read_request() 2406 release_stripe(sh); raid5_end_read_request() 2411 struct stripe_head *sh = bi->bi_private; raid5_end_write_request() local 2412 struct r5conf *conf = sh->raid_conf; raid5_end_write_request() 2413 int disks = sh->disks, i; raid5_end_write_request() 2421 if (bi == &sh->dev[i].req) { raid5_end_write_request() 2425 if (bi == &sh->dev[i].rreq) { raid5_end_write_request() 2439 (unsigned long long)sh->sector, i, atomic_read(&sh->count), raid5_end_write_request() 2449 else if (is_badblock(rdev, sh->sector, raid5_end_write_request() 2452 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); raid5_end_write_request() 2455 set_bit(STRIPE_DEGRADED, &sh->state); raid5_end_write_request() 2457 set_bit(R5_WriteError, &sh->dev[i].flags); raid5_end_write_request() 2461 } else if (is_badblock(rdev, sh->sector, raid5_end_write_request() 2464 set_bit(R5_MadeGood, &sh->dev[i].flags); raid5_end_write_request() 2465 if (test_bit(R5_ReadError, &sh->dev[i].flags)) raid5_end_write_request() 2470 set_bit(R5_ReWrite, &sh->dev[i].flags); raid5_end_write_request() 2475 if (sh->batch_head && !uptodate && !replacement) raid5_end_write_request() 2476 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); raid5_end_write_request() 2478 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) raid5_end_write_request() 2479 clear_bit(R5_LOCKED, &sh->dev[i].flags); raid5_end_write_request() 2480 set_bit(STRIPE_HANDLE, &sh->state); raid5_end_write_request() 2481 release_stripe(sh); raid5_end_write_request() 2483 if (sh->batch_head && sh != sh->batch_head) raid5_end_write_request() 2484 release_stripe(sh->batch_head); raid5_end_write_request() 2487 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 2489 static void raid5_build_block(struct stripe_head *sh, int i, int previous) raid5_build_block() argument 2491 struct r5dev *dev = &sh->dev[i]; raid5_build_block() 2496 dev->req.bi_private = sh; raid5_build_block() 2501 dev->rreq.bi_private = sh; raid5_build_block() 2504 dev->sector = compute_blocknr(sh, i, previous); raid5_build_block() 2538 struct stripe_head *sh) raid5_compute_sector() 2726 if (sh) { raid5_compute_sector() 2727 sh->pd_idx = pd_idx; raid5_compute_sector() 2728 sh->qd_idx = qd_idx; raid5_compute_sector() 2729 sh->ddf_layout = ddf_layout; raid5_compute_sector() 2738 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) compute_blocknr() argument 2740 struct r5conf *conf = sh->raid_conf; compute_blocknr() 2741 int raid_disks = sh->disks; compute_blocknr() 2743 sector_t new_sector = sh->sector, check; compute_blocknr() 2758 if (i == sh->pd_idx) compute_blocknr() 2766 if (i > sh->pd_idx) compute_blocknr() 2771 if (i < sh->pd_idx) compute_blocknr() 2773 i -= (sh->pd_idx + 1); compute_blocknr() 2785 if (i == sh->qd_idx) compute_blocknr() 2792 if (sh->pd_idx == raid_disks-1) compute_blocknr() 2794 else if (i > sh->pd_idx) compute_blocknr() 2799 if (sh->pd_idx == raid_disks-1) compute_blocknr() 2803 if (i < sh->pd_idx) compute_blocknr() 2805 i -= (sh->pd_idx + 2); compute_blocknr() 2815 if (sh->pd_idx == 0) compute_blocknr() 2819 if (i < sh->pd_idx) compute_blocknr() 2821 i -= (sh->pd_idx + 1); compute_blocknr() 2826 if (i > sh->pd_idx) compute_blocknr() 2831 if (i < sh->pd_idx) compute_blocknr() 2833 i -= (sh->pd_idx + 1); compute_blocknr() 2849 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx compute_blocknr() 2850 || sh2.qd_idx != sh->qd_idx) { compute_blocknr() 2859 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, schedule_reconstruction() argument 2862 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; schedule_reconstruction() 2863 struct r5conf *conf = sh->raid_conf; schedule_reconstruction() 2869 struct r5dev *dev = &sh->dev[i]; schedule_reconstruction() 2887 sh->reconstruct_state = reconstruct_state_drain_run; schedule_reconstruction() 2890 sh->reconstruct_state = reconstruct_state_run; schedule_reconstruction() 2895 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) schedule_reconstruction() 2898 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || schedule_reconstruction() 2899 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); schedule_reconstruction() 2901 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || schedule_reconstruction() 2902 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); schedule_reconstruction() 2905 struct r5dev *dev = &sh->dev[i]; schedule_reconstruction() 2921 sh->reconstruct_state = reconstruct_state_prexor_drain_run; schedule_reconstruction() 2930 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); schedule_reconstruction() 2931 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); schedule_reconstruction() 2935 int qd_idx = sh->qd_idx; schedule_reconstruction() 2936 struct r5dev *dev = &sh->dev[qd_idx]; schedule_reconstruction() 2944 __func__, (unsigned long long)sh->sector, schedule_reconstruction() 2953 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, add_stripe_bio() argument 2957 struct r5conf *conf = sh->raid_conf; add_stripe_bio() 2962 (unsigned long long)sh->sector); add_stripe_bio() 2972 spin_lock_irq(&sh->stripe_lock); add_stripe_bio() 2974 if (sh->batch_head) add_stripe_bio() 2977 bip = &sh->dev[dd_idx].towrite; add_stripe_bio() 2981 bip = &sh->dev[dd_idx].toread; add_stripe_bio() 2991 clear_bit(STRIPE_BATCH_READY, &sh->state); add_stripe_bio() 3001 sector_t sector = sh->dev[dd_idx].sector; add_stripe_bio() 3002 for (bi=sh->dev[dd_idx].towrite; add_stripe_bio() 3003 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && add_stripe_bio() 3005 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { add_stripe_bio() 3009 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) add_stripe_bio() 3010 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) add_stripe_bio() 3011 sh->overwrite_disks++; add_stripe_bio() 3016 (unsigned long long)sh->sector, dd_idx); add_stripe_bio() 3031 set_bit(STRIPE_BITMAP_PENDING, &sh->state); add_stripe_bio() 3032 spin_unlock_irq(&sh->stripe_lock); add_stripe_bio() 3033 bitmap_startwrite(conf->mddev->bitmap, sh->sector, add_stripe_bio() 3035 spin_lock_irq(&sh->stripe_lock); add_stripe_bio() 3036 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); add_stripe_bio() 3037 if (!sh->batch_head) { add_stripe_bio() 3038 sh->bm_seq = conf->seq_flush+1; add_stripe_bio() 3039 set_bit(STRIPE_BIT_DELAY, &sh->state); add_stripe_bio() 3042 spin_unlock_irq(&sh->stripe_lock); add_stripe_bio() 3044 if (stripe_can_batch(sh)) add_stripe_bio() 3045 stripe_add_to_batch_list(conf, sh); add_stripe_bio() 3049 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); add_stripe_bio() 3050 spin_unlock_irq(&sh->stripe_lock); add_stripe_bio() 3057 struct stripe_head *sh) stripe_set_idx() 3069 &dd_idx, sh); stripe_set_idx() 3073 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, handle_failed_stripe() argument 3078 BUG_ON(sh->batch_head); handle_failed_stripe() 3083 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { handle_failed_stripe() 3095 sh->sector, handle_failed_stripe() 3101 spin_lock_irq(&sh->stripe_lock); handle_failed_stripe() 3103 bi = sh->dev[i].towrite; handle_failed_stripe() 3104 sh->dev[i].towrite = NULL; handle_failed_stripe() 3105 sh->overwrite_disks = 0; handle_failed_stripe() 3106 spin_unlock_irq(&sh->stripe_lock); handle_failed_stripe() 3110 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) handle_failed_stripe() 3114 sh->dev[i].sector + STRIPE_SECTORS) { handle_failed_stripe() 3115 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); handle_failed_stripe() 3125 bitmap_endwrite(conf->mddev->bitmap, sh->sector, handle_failed_stripe() 3129 bi = sh->dev[i].written; handle_failed_stripe() 3130 sh->dev[i].written = NULL; handle_failed_stripe() 3131 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { handle_failed_stripe() 3132 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); handle_failed_stripe() 3133 sh->dev[i].page = sh->dev[i].orig_page; handle_failed_stripe() 3138 sh->dev[i].sector + STRIPE_SECTORS) { handle_failed_stripe() 3139 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); handle_failed_stripe() 3152 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && handle_failed_stripe() 3153 (!test_bit(R5_Insync, &sh->dev[i].flags) || handle_failed_stripe() 3154 test_bit(R5_ReadError, &sh->dev[i].flags))) { handle_failed_stripe() 3155 spin_lock_irq(&sh->stripe_lock); handle_failed_stripe() 3156 bi = sh->dev[i].toread; handle_failed_stripe() 3157 sh->dev[i].toread = NULL; handle_failed_stripe() 3158 spin_unlock_irq(&sh->stripe_lock); handle_failed_stripe() 3159 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) handle_failed_stripe() 3162 sh->dev[i].sector + STRIPE_SECTORS) { handle_failed_stripe() 3164 r5_next_bio(bi, sh->dev[i].sector); handle_failed_stripe() 3174 bitmap_endwrite(conf->mddev->bitmap, sh->sector, handle_failed_stripe() 3179 clear_bit(R5_LOCKED, &sh->dev[i].flags); handle_failed_stripe() 3182 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) handle_failed_stripe() 3188 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, handle_failed_sync() argument 3194 BUG_ON(sh->batch_head); handle_failed_sync() 3195 clear_bit(STRIPE_SYNCING, &sh->state); handle_failed_sync() 3196 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) handle_failed_sync() 3216 && !rdev_set_badblocks(rdev, sh->sector, handle_failed_sync() 3223 && !rdev_set_badblocks(rdev, sh->sector, handle_failed_sync() 3234 static int want_replace(struct stripe_head *sh, int disk_idx) want_replace() argument 3239 rdev = sh->raid_conf->disks[disk_idx].replacement; want_replace() 3243 && (rdev->recovery_offset <= sh->sector want_replace() 3244 || rdev->mddev->recovery_cp <= sh->sector)) want_replace() 3257 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, need_this_block() argument 3260 struct r5dev *dev = &sh->dev[disk_idx]; need_this_block() 3261 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], need_this_block() 3262 &sh->dev[s->failed_num[1]] }; need_this_block() 3279 (s->replacing && want_replace(sh, disk_idx))) need_this_block() 3304 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) need_this_block() 3332 if (sh->raid_conf->level != 6 && need_this_block() 3333 sh->sector < sh->raid_conf->mddev->recovery_cp) need_this_block() 3337 if (s->failed_num[i] != sh->pd_idx && need_this_block() 3338 s->failed_num[i] != sh->qd_idx && need_this_block() 3347 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, fetch_block() argument 3350 struct r5dev *dev = &sh->dev[disk_idx]; fetch_block() 3353 if (need_this_block(sh, s, disk_idx, disks)) { fetch_block() 3359 BUG_ON(sh->batch_head); fetch_block() 3367 (unsigned long long)sh->sector, disk_idx); fetch_block() 3368 set_bit(STRIPE_COMPUTE_RUN, &sh->state); fetch_block() 3371 sh->ops.target = disk_idx; fetch_block() 3372 sh->ops.target2 = -1; /* no 2nd target */ fetch_block() 3391 &sh->dev[other].flags)) fetch_block() 3396 (unsigned long long)sh->sector, fetch_block() 3398 set_bit(STRIPE_COMPUTE_RUN, &sh->state); fetch_block() 3400 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); fetch_block() 3401 set_bit(R5_Wantcompute, &sh->dev[other].flags); fetch_block() 3402 sh->ops.target = disk_idx; fetch_block() 3403 sh->ops.target2 = other; fetch_block() 3422 static void handle_stripe_fill(struct stripe_head *sh, handle_stripe_fill() argument 3432 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && handle_stripe_fill() 3433 !sh->reconstruct_state) handle_stripe_fill() 3435 if (fetch_block(sh, s, i, disks)) handle_stripe_fill() 3437 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_fill() 3448 struct stripe_head *sh, int disks, struct bio **return_bi) handle_stripe_clean_event() 3453 struct stripe_head *head_sh = sh; handle_stripe_clean_event() 3457 if (sh->dev[i].written) { handle_stripe_clean_event() 3458 dev = &sh->dev[i]; handle_stripe_clean_event() 3487 bitmap_endwrite(conf->mddev->bitmap, sh->sector, handle_stripe_clean_event() 3489 !test_bit(STRIPE_DEGRADED, &sh->state), handle_stripe_clean_event() 3492 sh = list_first_entry(&sh->batch_list, handle_stripe_clean_event() 3495 if (sh != head_sh) { handle_stripe_clean_event() 3496 dev = &sh->dev[i]; handle_stripe_clean_event() 3500 sh = head_sh; handle_stripe_clean_event() 3501 dev = &sh->dev[i]; handle_stripe_clean_event() 3508 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { handle_stripe_clean_event() 3510 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); handle_stripe_clean_event() 3511 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); handle_stripe_clean_event() 3512 if (sh->qd_idx >= 0) { handle_stripe_clean_event() 3513 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); handle_stripe_clean_event() 3514 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); handle_stripe_clean_event() 3517 clear_bit(STRIPE_DISCARD, &sh->state); handle_stripe_clean_event() 3524 hash = sh->hash_lock_index; handle_stripe_clean_event() 3526 remove_hash(sh); handle_stripe_clean_event() 3529 sh = list_first_entry(&sh->batch_list, handle_stripe_clean_event() 3531 if (sh != head_sh) handle_stripe_clean_event() 3534 sh = head_sh; handle_stripe_clean_event() 3536 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) handle_stripe_clean_event() 3537 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_clean_event() 3541 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) handle_stripe_clean_event() 3550 struct stripe_head *sh, handle_stripe_dirtying() 3565 (recovery_cp < MaxSector && sh->sector >= recovery_cp && handle_stripe_dirtying() 3571 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", handle_stripe_dirtying() 3573 (unsigned long long)sh->sector); handle_stripe_dirtying() 3576 struct r5dev *dev = &sh->dev[i]; handle_stripe_dirtying() 3577 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && handle_stripe_dirtying() 3588 i != sh->pd_idx && i != sh->qd_idx && handle_stripe_dirtying() 3599 (unsigned long long)sh->sector, rmw, rcw); handle_stripe_dirtying() 3600 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_dirtying() 3606 (unsigned long long)sh->sector, rmw); handle_stripe_dirtying() 3608 struct r5dev *dev = &sh->dev[i]; handle_stripe_dirtying() 3609 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && handle_stripe_dirtying() 3615 &sh->state)) { handle_stripe_dirtying() 3622 set_bit(STRIPE_DELAYED, &sh->state); handle_stripe_dirtying() 3623 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_dirtying() 3633 struct r5dev *dev = &sh->dev[i]; handle_stripe_dirtying() 3635 i != sh->pd_idx && i != sh->qd_idx && handle_stripe_dirtying() 3642 &sh->state)) { handle_stripe_dirtying() 3650 set_bit(STRIPE_DELAYED, &sh->state); handle_stripe_dirtying() 3651 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_dirtying() 3657 (unsigned long long)sh->sector, handle_stripe_dirtying() 3658 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); handle_stripe_dirtying() 3662 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) handle_stripe_dirtying() 3663 set_bit(STRIPE_DELAYED, &sh->state); handle_stripe_dirtying() 3675 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && handle_stripe_dirtying() 3677 !test_bit(STRIPE_BIT_DELAY, &sh->state))) handle_stripe_dirtying() 3678 schedule_reconstruction(sh, s, rcw == 0, 0); handle_stripe_dirtying() 3681 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, handle_parity_checks5() argument 3686 BUG_ON(sh->batch_head); handle_parity_checks5() 3687 set_bit(STRIPE_HANDLE, &sh->state); handle_parity_checks5() 3689 switch (sh->check_state) { handle_parity_checks5() 3694 sh->check_state = check_state_run; handle_parity_checks5() 3696 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); handle_parity_checks5() 3700 dev = &sh->dev[s->failed_num[0]]; handle_parity_checks5() 3703 sh->check_state = check_state_idle; handle_parity_checks5() 3705 dev = &sh->dev[sh->pd_idx]; handle_parity_checks5() 3708 if (test_bit(STRIPE_INSYNC, &sh->state)) handle_parity_checks5() 3719 clear_bit(STRIPE_DEGRADED, &sh->state); handle_parity_checks5() 3720 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks5() 3725 sh->check_state = check_state_idle; handle_parity_checks5() 3737 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) handle_parity_checks5() 3741 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks5() 3746 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks5() 3748 sh->check_state = check_state_compute_run; handle_parity_checks5() 3749 set_bit(STRIPE_COMPUTE_RUN, &sh->state); handle_parity_checks5() 3752 &sh->dev[sh->pd_idx].flags); handle_parity_checks5() 3753 sh->ops.target = sh->pd_idx; handle_parity_checks5() 3754 sh->ops.target2 = -1; handle_parity_checks5() 3763 __func__, sh->check_state, handle_parity_checks5() 3764 (unsigned long long) sh->sector); handle_parity_checks5() 3769 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, handle_parity_checks6() argument 3773 int pd_idx = sh->pd_idx; handle_parity_checks6() 3774 int qd_idx = sh->qd_idx; handle_parity_checks6() 3777 BUG_ON(sh->batch_head); handle_parity_checks6() 3778 set_bit(STRIPE_HANDLE, &sh->state); handle_parity_checks6() 3788 switch (sh->check_state) { handle_parity_checks6() 3796 sh->check_state = check_state_run; handle_parity_checks6() 3802 if (sh->check_state == check_state_run) handle_parity_checks6() 3803 sh->check_state = check_state_run_pq; handle_parity_checks6() 3805 sh->check_state = check_state_run_q; handle_parity_checks6() 3809 sh->ops.zero_sum_result = 0; handle_parity_checks6() 3811 if (sh->check_state == check_state_run) { handle_parity_checks6() 3813 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); handle_parity_checks6() 3816 if (sh->check_state >= check_state_run && handle_parity_checks6() 3817 sh->check_state <= check_state_run_pq) { handle_parity_checks6() 3829 sh->check_state = check_state_idle; handle_parity_checks6() 3832 if (test_bit(STRIPE_INSYNC, &sh->state)) handle_parity_checks6() 3840 dev = &sh->dev[s->failed_num[1]]; handle_parity_checks6() 3846 dev = &sh->dev[s->failed_num[0]]; handle_parity_checks6() 3851 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { handle_parity_checks6() 3852 dev = &sh->dev[pd_idx]; handle_parity_checks6() 3857 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { handle_parity_checks6() 3858 dev = &sh->dev[qd_idx]; handle_parity_checks6() 3863 clear_bit(STRIPE_DEGRADED, &sh->state); handle_parity_checks6() 3865 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks6() 3872 sh->check_state = check_state_idle; handle_parity_checks6() 3878 if (sh->ops.zero_sum_result == 0) { handle_parity_checks6() 3881 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks6() 3887 sh->check_state = check_state_compute_result; handle_parity_checks6() 3898 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks6() 3900 int *target = &sh->ops.target; handle_parity_checks6() 3902 sh->ops.target = -1; handle_parity_checks6() 3903 sh->ops.target2 = -1; handle_parity_checks6() 3904 sh->check_state = check_state_compute_run; handle_parity_checks6() 3905 set_bit(STRIPE_COMPUTE_RUN, &sh->state); handle_parity_checks6() 3907 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { handle_parity_checks6() 3909 &sh->dev[pd_idx].flags); handle_parity_checks6() 3911 target = &sh->ops.target2; handle_parity_checks6() 3914 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { handle_parity_checks6() 3916 &sh->dev[qd_idx].flags); handle_parity_checks6() 3927 __func__, sh->check_state, handle_parity_checks6() 3928 (unsigned long long) sh->sector); handle_parity_checks6() 3933 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) handle_stripe_expansion() argument 3941 BUG_ON(sh->batch_head); handle_stripe_expansion() 3942 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); handle_stripe_expansion() 3943 for (i = 0; i < sh->disks; i++) handle_stripe_expansion() 3944 if (i != sh->pd_idx && i != sh->qd_idx) { handle_stripe_expansion() 3949 sector_t bn = compute_blocknr(sh, i, 1); handle_stripe_expansion() 3969 sh->dev[i].page, 0, 0, STRIPE_SIZE, handle_stripe_expansion() 4004 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) analyse_stripe() argument 4006 struct r5conf *conf = sh->raid_conf; analyse_stripe() 4007 int disks = sh->disks; analyse_stripe() 4014 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; analyse_stripe() 4015 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; analyse_stripe() 4027 dev = &sh->dev[i]; analyse_stripe() 4038 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) analyse_stripe() 4067 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && analyse_stripe() 4068 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, analyse_stripe() 4080 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, analyse_stripe() 4107 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) analyse_stripe() 4166 if (test_bit(STRIPE_SYNCING, &sh->state)) { analyse_stripe() 4176 sh->sector >= conf->mddev->recovery_cp || analyse_stripe() 4185 static int clear_batch_ready(struct stripe_head *sh) clear_batch_ready() argument 4192 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) clear_batch_ready() 4193 return (sh->batch_head && sh->batch_head != sh); clear_batch_ready() 4194 spin_lock(&sh->stripe_lock); clear_batch_ready() 4195 if (!sh->batch_head) { clear_batch_ready() 4196 spin_unlock(&sh->stripe_lock); clear_batch_ready() 4204 if (sh->batch_head != sh) { clear_batch_ready() 4205 spin_unlock(&sh->stripe_lock); clear_batch_ready() 4208 spin_lock(&sh->batch_lock); clear_batch_ready() 4209 list_for_each_entry(tmp, &sh->batch_list, batch_list) clear_batch_ready() 4211 spin_unlock(&sh->batch_lock); clear_batch_ready() 4212 spin_unlock(&sh->stripe_lock); clear_batch_ready() 4224 struct stripe_head *sh, *next; break_stripe_batch_list() local 4228 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { break_stripe_batch_list() 4230 list_del_init(&sh->batch_list); break_stripe_batch_list() 4232 WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | break_stripe_batch_list() 4248 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | break_stripe_batch_list() 4253 sh->check_state = head_sh->check_state; break_stripe_batch_list() 4254 sh->reconstruct_state = head_sh->reconstruct_state; break_stripe_batch_list() 4255 for (i = 0; i < sh->disks; i++) { break_stripe_batch_list() 4256 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) break_stripe_batch_list() 4258 sh->dev[i].flags = head_sh->dev[i].flags & break_stripe_batch_list() 4261 spin_lock_irq(&sh->stripe_lock); break_stripe_batch_list() 4262 sh->batch_head = NULL; break_stripe_batch_list() 4263 spin_unlock_irq(&sh->stripe_lock); break_stripe_batch_list() 4265 sh->state & handle_flags) break_stripe_batch_list() 4266 set_bit(STRIPE_HANDLE, &sh->state); break_stripe_batch_list() 4267 release_stripe(sh); break_stripe_batch_list() 4282 static void handle_stripe(struct stripe_head *sh) handle_stripe() argument 4285 struct r5conf *conf = sh->raid_conf; handle_stripe() 4288 int disks = sh->disks; handle_stripe() 4291 clear_bit(STRIPE_HANDLE, &sh->state); handle_stripe() 4292 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { handle_stripe() 4295 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe() 4299 if (clear_batch_ready(sh) ) { handle_stripe() 4300 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); handle_stripe() 4304 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) handle_stripe() 4305 break_stripe_batch_list(sh, 0); handle_stripe() 4307 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { handle_stripe() 4308 spin_lock(&sh->stripe_lock); handle_stripe() 4310 if (!test_bit(STRIPE_DISCARD, &sh->state) && handle_stripe() 4311 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { handle_stripe() 4312 set_bit(STRIPE_SYNCING, &sh->state); handle_stripe() 4313 clear_bit(STRIPE_INSYNC, &sh->state); handle_stripe() 4314 clear_bit(STRIPE_REPLACED, &sh->state); handle_stripe() 4316 spin_unlock(&sh->stripe_lock); handle_stripe() 4318 clear_bit(STRIPE_DELAYED, &sh->state); handle_stripe() 4322 (unsigned long long)sh->sector, sh->state, handle_stripe() 4323 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, handle_stripe() 4324 sh->check_state, sh->reconstruct_state); handle_stripe() 4326 analyse_stripe(sh, &s); handle_stripe() 4329 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe() 4336 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe() 4344 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { handle_stripe() 4346 set_bit(STRIPE_BIOFILL_RUN, &sh->state); handle_stripe() 4357 sh->check_state = 0; handle_stripe() 4358 sh->reconstruct_state = 0; handle_stripe() 4359 break_stripe_batch_list(sh, 0); handle_stripe() 4361 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); handle_stripe() 4363 handle_failed_sync(conf, sh, &s); handle_stripe() 4370 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) handle_stripe() 4372 if (sh->reconstruct_state == reconstruct_state_drain_result || handle_stripe() 4373 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { handle_stripe() 4374 sh->reconstruct_state = reconstruct_state_idle; handle_stripe() 4379 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && handle_stripe() 4380 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); handle_stripe() 4381 BUG_ON(sh->qd_idx >= 0 && handle_stripe() 4382 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && handle_stripe() 4383 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); handle_stripe() 4385 struct r5dev *dev = &sh->dev[i]; handle_stripe() 4387 (i == sh->pd_idx || i == sh->qd_idx || handle_stripe() 4396 ((i == sh->pd_idx || i == sh->qd_idx) && handle_stripe() 4398 set_bit(STRIPE_INSYNC, &sh->state); handle_stripe() 4401 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) handle_stripe() 4409 pdev = &sh->dev[sh->pd_idx]; handle_stripe() 4410 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) handle_stripe() 4411 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); handle_stripe() 4412 qdev = &sh->dev[sh->qd_idx]; handle_stripe() 4413 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) handle_stripe() 4414 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) handle_stripe() 4426 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); handle_stripe() 4437 handle_stripe_fill(sh, &s, disks); handle_stripe() 4445 if (s.to_write && !sh->reconstruct_state && !sh->check_state) handle_stripe() 4446 handle_stripe_dirtying(conf, sh, &s, disks); handle_stripe() 4453 if (sh->check_state || handle_stripe() 4455 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && handle_stripe() 4456 !test_bit(STRIPE_INSYNC, &sh->state))) { handle_stripe() 4458 handle_parity_checks6(conf, sh, &s, disks); handle_stripe() 4460 handle_parity_checks5(conf, sh, &s, disks); handle_stripe() 4464 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) handle_stripe() 4465 && !test_bit(STRIPE_REPLACED, &sh->state)) { handle_stripe() 4468 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { handle_stripe() 4469 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); handle_stripe() 4470 set_bit(R5_WantReplace, &sh->dev[i].flags); handle_stripe() 4471 set_bit(R5_LOCKED, &sh->dev[i].flags); handle_stripe() 4475 set_bit(STRIPE_INSYNC, &sh->state); handle_stripe() 4476 set_bit(STRIPE_REPLACED, &sh->state); handle_stripe() 4479 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && handle_stripe() 4480 test_bit(STRIPE_INSYNC, &sh->state)) { handle_stripe() 4482 clear_bit(STRIPE_SYNCING, &sh->state); handle_stripe() 4483 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) handle_stripe() 4492 struct r5dev *dev = &sh->dev[s.failed_num[i]]; handle_stripe() 4512 if (sh->reconstruct_state == reconstruct_state_result) { handle_stripe() 4514 = get_active_stripe(conf, sh->sector, 1, 1, 1); handle_stripe() 4516 /* sh cannot be written until sh_src has been read. handle_stripe() 4517 * so arrange for sh to be delayed a little handle_stripe() 4519 set_bit(STRIPE_DELAYED, &sh->state); handle_stripe() 4520 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe() 4530 sh->reconstruct_state = reconstruct_state_idle; handle_stripe() 4531 clear_bit(STRIPE_EXPANDING, &sh->state); handle_stripe() 4533 set_bit(R5_Wantwrite, &sh->dev[i].flags); handle_stripe() 4534 set_bit(R5_LOCKED, &sh->dev[i].flags); handle_stripe() 4539 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && handle_stripe() 4540 !sh->reconstruct_state) { handle_stripe() 4542 sh->disks = conf->raid_disks; handle_stripe() 4543 stripe_set_idx(sh->sector, conf, 0, sh); handle_stripe() 4544 schedule_reconstruction(sh, &s, 1, 1); handle_stripe() 4545 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { handle_stripe() 4546 clear_bit(STRIPE_EXPAND_READY, &sh->state); handle_stripe() 4553 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) handle_stripe() 4554 handle_stripe_expansion(conf, sh); handle_stripe() 4574 struct r5dev *dev = &sh->dev[i]; handle_stripe() 4578 if (!rdev_set_badblocks(rdev, sh->sector, handle_stripe() 4585 rdev_clear_badblocks(rdev, sh->sector, handle_stripe() 4594 rdev_clear_badblocks(rdev, sh->sector, handle_stripe() 4601 raid_run_ops(sh, s.ops_request); handle_stripe() 4603 ops_run_io(sh, &s); handle_stripe() 4618 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); handle_stripe() 4626 struct stripe_head *sh; raid5_activate_delayed() local 4627 sh = list_entry(l, struct stripe_head, lru); raid5_activate_delayed() 4629 clear_bit(STRIPE_DELAYED, &sh->state); raid5_activate_delayed() 4630 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) raid5_activate_delayed() 4632 list_add_tail(&sh->lru, &conf->hold_list); raid5_activate_delayed() 4633 raid5_wakeup_stripe_thread(sh); raid5_activate_delayed() 4646 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); activate_bit_delay() local 4648 list_del_init(&sh->lru); activate_bit_delay() 4649 atomic_inc(&sh->count); activate_bit_delay() 4650 hash = sh->hash_lock_index; activate_bit_delay() 4651 __release_stripe(conf, sh, &temp_inactive_list[hash]); activate_bit_delay() 4908 struct stripe_head *sh = NULL, *tmp; __get_priority_stripe() local 4934 sh = list_entry(handle_list->next, typeof(*sh), lru); __get_priority_stripe() 4938 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { __get_priority_stripe() 4958 sh = tmp; __get_priority_stripe() 4963 if (sh) { __get_priority_stripe() 4971 if (!sh) __get_priority_stripe() 4976 sh->group = NULL; __get_priority_stripe() 4978 list_del_init(&sh->lru); __get_priority_stripe() 4979 BUG_ON(atomic_inc_return(&sh->count) != 1); __get_priority_stripe() 4980 return sh; __get_priority_stripe() 4993 struct stripe_head *sh; raid5_unplug() local 5002 sh = list_first_entry(&cb->list, struct stripe_head, lru); raid5_unplug() 5003 list_del_init(&sh->lru); raid5_unplug() 5010 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); raid5_unplug() 5015 hash = sh->hash_lock_index; raid5_unplug() 5016 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); raid5_unplug() 5029 struct stripe_head *sh) release_stripe_plug() 5037 release_stripe(sh); release_stripe_plug() 5050 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) release_stripe_plug() 5051 list_add_tail(&sh->lru, &cb->list); release_stripe_plug() 5053 release_stripe(sh); release_stripe_plug() 5060 struct stripe_head *sh; make_discard_request() local 5088 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); make_discard_request() 5091 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); make_discard_request() 5092 if (test_bit(STRIPE_SYNCING, &sh->state)) { make_discard_request() 5093 release_stripe(sh); make_discard_request() 5097 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); make_discard_request() 5098 spin_lock_irq(&sh->stripe_lock); make_discard_request() 5100 if (d == sh->pd_idx || d == sh->qd_idx) make_discard_request() 5102 if (sh->dev[d].towrite || sh->dev[d].toread) { make_discard_request() 5103 set_bit(R5_Overlap, &sh->dev[d].flags); make_discard_request() 5104 spin_unlock_irq(&sh->stripe_lock); make_discard_request() 5105 release_stripe(sh); make_discard_request() 5110 set_bit(STRIPE_DISCARD, &sh->state); make_discard_request() 5112 sh->overwrite_disks = 0; make_discard_request() 5114 if (d == sh->pd_idx || d == sh->qd_idx) make_discard_request() 5116 sh->dev[d].towrite = bi; make_discard_request() 5117 set_bit(R5_OVERWRITE, &sh->dev[d].flags); make_discard_request() 5119 sh->overwrite_disks++; make_discard_request() 5121 spin_unlock_irq(&sh->stripe_lock); make_discard_request() 5127 sh->sector, make_discard_request() 5130 sh->bm_seq = conf->seq_flush + 1; make_discard_request() 5131 set_bit(STRIPE_BIT_DELAY, &sh->state); make_discard_request() 5134 set_bit(STRIPE_HANDLE, &sh->state); make_discard_request() 5135 clear_bit(STRIPE_DELAYED, &sh->state); make_discard_request() 5136 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) make_discard_request() 5138 release_stripe_plug(mddev, sh); make_discard_request() 5154 struct stripe_head *sh; make_request() local 5233 sh = get_active_stripe(conf, new_sector, previous, make_request() 5235 if (sh) { make_request() 5241 * 'sh', we know that if that happens, make_request() 5254 release_stripe(sh); make_request() 5264 release_stripe(sh); make_request() 5271 release_stripe(sh); make_request() 5287 if (test_bit(STRIPE_EXPANDING, &sh->state) || make_request() 5288 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { make_request() 5294 release_stripe(sh); make_request() 5299 set_bit(STRIPE_HANDLE, &sh->state); make_request() 5300 clear_bit(STRIPE_DELAYED, &sh->state); make_request() 5301 if ((!sh->batch_head || sh == sh->batch_head) && make_request() 5303 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) make_request() 5305 release_stripe_plug(mddev, sh); make_request() 5340 struct stripe_head *sh; reshape_request() local 5472 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); reshape_request() 5473 set_bit(STRIPE_EXPANDING, &sh->state); reshape_request() 5478 for (j=sh->disks; j--;) { reshape_request() 5480 if (j == sh->pd_idx) reshape_request() 5483 j == sh->qd_idx) reshape_request() 5485 s = compute_blocknr(sh, j, 0); reshape_request() 5490 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); reshape_request() 5491 set_bit(R5_Expanded, &sh->dev[j].flags); reshape_request() 5492 set_bit(R5_UPTODATE, &sh->dev[j].flags); reshape_request() 5495 set_bit(STRIPE_EXPAND_READY, &sh->state); reshape_request() 5496 set_bit(STRIPE_HANDLE, &sh->state); reshape_request() 5498 list_add(&sh->lru, &stripes); reshape_request() 5521 sh = get_active_stripe(conf, first_sector, 1, 0, 1); reshape_request() 5522 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); reshape_request() 5523 set_bit(STRIPE_HANDLE, &sh->state); reshape_request() 5524 release_stripe(sh); reshape_request() 5531 sh = list_entry(stripes.next, struct stripe_head, lru); reshape_request() 5532 list_del_init(&sh->lru); reshape_request() 5533 release_stripe(sh); reshape_request() 5570 struct stripe_head *sh; sync_request() local 5628 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); sync_request() 5629 if (sh == NULL) { sync_request() 5630 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); sync_request() 5651 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); sync_request() 5652 set_bit(STRIPE_HANDLE, &sh->state); sync_request() 5654 release_stripe(sh); sync_request() 5671 struct stripe_head *sh; retry_aligned_read() local 5693 sh = get_active_stripe(conf, sector, 0, 1, 1); retry_aligned_read() 5695 if (!sh) { retry_aligned_read() 5702 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { retry_aligned_read() 5703 release_stripe(sh); retry_aligned_read() 5709 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); retry_aligned_read() 5710 handle_stripe(sh); retry_aligned_read() 5711 release_stripe(sh); retry_aligned_read() 5729 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; handle_active_stripes() local 5734 (sh = __get_priority_stripe(conf, group)) != NULL) handle_active_stripes() 5735 batch[batch_size++] = sh; handle_active_stripes() 1104 async_copy_data(int frombio, struct bio *bio, struct page **page, sector_t sector, struct dma_async_tx_descriptor *tx, struct stripe_head *sh) async_copy_data() argument 1335 set_syndrome_sources(struct page **srcs, struct stripe_head *sh, int srctype) set_syndrome_sources() argument 2536 raid5_compute_sector(struct r5conf *conf, sector_t r_sector, int previous, int *dd_idx, struct stripe_head *sh) raid5_compute_sector() argument 3056 stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, struct stripe_head *sh) stripe_set_idx() argument 3447 handle_stripe_clean_event(struct r5conf *conf, struct stripe_head *sh, int disks, struct bio **return_bi) handle_stripe_clean_event() argument 3549 handle_stripe_dirtying(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) handle_stripe_dirtying() argument 5028 release_stripe_plug(struct mddev *mddev, struct stripe_head *sh) release_stripe_plug() argument
|
/linux-4.1.27/arch/sh/include/mach-x3proto/mach/ |
H A D | hardware.h | 6 /* arch/sh/boards/mach-x3proto/gpio.c */
|
H A D | ilsel.h | 40 /* arch/sh/boards/renesas/x3proto/ilsel.c */
|
/linux-4.1.27/arch/arm/mach-shmobile/include/mach/ |
H A D | irqs.h | 4 /* Stuck here until drivers/pinctl/sh-pfc gets rid of legacy code */
|
/linux-4.1.27/arch/alpha/math-emu/ |
H A D | sfp-util.h | 7 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 8 ((sl) = (al) + (bl), (sh) = (ah) + (bh) + ((sl) < (al))) 10 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 11 ((sl) = (al) - (bl), (sh) = (ah) - (bh) - ((al) < (bl)))
|
/linux-4.1.27/tools/power/cpupower/bench/ |
H A D | Makefile | 29 install -m 755 cpufreq-bench_plot.sh $(DESTDIR)/$(bindir)/cpufreq-bench_plot.sh 31 install -m 755 cpufreq-bench_script.sh $(DESTDIR)/$(docdir)/cpufreq-bench_script.sh
|
/linux-4.1.27/tools/testing/selftests/user/ |
H A D | Makefile | 6 TEST_PROGS := test_user_copy.sh
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | clock.h | 10 /* arch/sh/kernel/cpu/clock-cpg.c */ 13 /* arch/sh/kernel/cpu/clock.c */
|
H A D | kdebug.h | 13 /* arch/sh/kernel/dumpstack.c */
|
H A D | timex.h | 2 * linux/include/asm-sh/timex.h 4 * sh architecture timex specifications
|
H A D | mmzone.h | 28 /* arch/sh/mm/numa.c */ 40 /* arch/sh/kernel/setup.c */ 43 /* arch/sh/mm/init.c */
|
H A D | ftrace.h | 24 /* No extra data needed on sh */ 40 /* arch/sh/kernel/return_address.c */
|
H A D | freq.h | 2 * include/asm-sh/freq.h
|
H A D | reboot.h | 18 /* arch/sh/kernel/machine_kexec.c */
|
H A D | processor.h | 13 * in arch/sh/mm/Kconfig, as well as an entry in arch/sh/kernel/setup.c 114 /* arch/sh/kernel/process.c */ 119 /* arch/sh/mm/alignment.c */ 126 /* arch/sh/mm/init.c */ 129 /* arch/sh/kernel/setup.c */
|
H A D | perf_event.h | 24 /* arch/sh/kernel/perf_event.c */
|
H A D | shmparam.h | 2 * include/asm-sh/shmparam.h
|
H A D | sram.h | 9 /* arch/sh/mm/sram.c */
|
H A D | bugs.h | 22 char *p = &init_utsname()->machine[2]; /* "sh" */ check_bugs()
|
H A D | clkdev.h | 2 * Copyright (C) 2010 Paul Mundt <lethal@linux-sh.org>
|
H A D | machvec.h | 2 * include/asm-sh/machvec.h
|
H A D | dma-register.h | 4 * extracted from arch/sh/include/asm/dma-sh.h:
|
H A D | dma.h | 2 * include/asm-sh/dma.h 111 /* arch/sh/drivers/dma/dma-api.c */ 140 /* arch/sh/drivers/dma/dma-sysfs.c */
|
H A D | cache.h | 3 * include/asm-sh/cache.h
|
H A D | flat.h | 2 * include/asm-sh/flat.h
|
H A D | gpio.h | 2 * include/asm-sh/gpio.h
|
/linux-4.1.27/arch/sh/kernel/cpu/ |
H A D | clock-cpg.c | 59 clk_add_alias("fck", "sh-tmu-sh3.0", "peripheral_clk", NULL); cpg_clk_init() 60 clk_add_alias("fck", "sh-tmu.0", "peripheral_clk", NULL); cpg_clk_init() 61 clk_add_alias("fck", "sh-tmu.1", "peripheral_clk", NULL); cpg_clk_init() 62 clk_add_alias("fck", "sh-tmu.2", "peripheral_clk", NULL); cpg_clk_init() 63 clk_add_alias("fck", "sh-mtu2", "peripheral_clk", NULL); cpg_clk_init() 64 clk_add_alias("fck", "sh-cmt-16.0", "peripheral_clk", NULL); cpg_clk_init() 65 clk_add_alias("fck", "sh-cmt-32.0", "peripheral_clk", NULL); cpg_clk_init()
|
H A D | adc.c | 2 * linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support
|
H A D | clock.c | 2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
|
/linux-4.1.27/drivers/pinctrl/sh-pfc/ |
H A D | Makefile | 0 sh-pfc-objs = core.o pinctrl.o 3 sh-pfc-objs += gpio.o 5 obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc.o
|
/linux-4.1.27/arch/sh/kernel/ |
H A D | kdebugfs.c | 10 arch_debugfs_dir = debugfs_create_dir("sh", NULL); arch_kdebugfs_init()
|
H A D | vmlinux.lds.S | 7 OUTPUT_ARCH(sh:sh5) 10 OUTPUT_ARCH(sh)
|
H A D | debugtraps.S | 2 * arch/sh/kernel/debugtraps.S
|
H A D | ioport.c | 2 * arch/sh/kernel/ioport.c
|
H A D | return_address.c | 2 * arch/sh/kernel/return_address.c
|
H A D | sh_ksyms_64.c | 2 * arch/sh/kernel/sh_ksyms_64.c
|
H A D | time.c | 2 * arch/sh/kernel/time.c 7 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
|
/linux-4.1.27/arch/sh/mm/ |
H A D | extable_32.c | 2 * linux/arch/sh/mm/extable.c
|
H A D | cache-shx3.c | 2 * arch/sh/mm/cache-shx3.c - SH-X3 optimized cache ops
|
H A D | hugetlbpage.c | 2 * arch/sh/mm/hugetlbpage.c
|
H A D | kmap.c | 2 * arch/sh/mm/kmap.c
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | sfp-util.h | 6 #define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \ 16 (sh) = __sh; \ 20 #define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \ 30 (sh) = __sh; \
|
/linux-4.1.27/tools/testing/selftests/net/ |
H A D | Makefile | 13 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh
|
/linux-4.1.27/arch/s390/boot/ |
H A D | Makefile | 25 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmsmac/phy/ |
H A D | phy_cmn.c | 132 wlapi_bmac_ucode_wake_override_phyreg_set(pi->sh->physhim); wlc_phyreg_enter() 138 wlapi_bmac_ucode_wake_override_phyreg_clear(pi->sh->physhim); wlc_phyreg_exit() 144 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, MCTL_LOCK_RADIO); wlc_radioreg_enter() 155 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0); wlc_radioreg_exit() 185 if ((D11REV_GE(pi->sh->corerev, 24)) || read_radio_reg() 186 (D11REV_IS(pi->sh->corerev, 22) read_radio_reg() 201 if ((D11REV_GE(pi->sh->corerev, 24)) || write_radio_reg() 202 (D11REV_IS(pi->sh->corerev, 22) write_radio_reg() 223 if (D11REV_GE(pi->sh->corerev, 24)) { read_radio_id() 367 struct shared_phy *sh; wlc_phy_shared_attach() local 369 sh = kzalloc(sizeof(struct shared_phy), GFP_ATOMIC); wlc_phy_shared_attach() 370 if (sh == NULL) wlc_phy_shared_attach() 373 sh->physhim = shp->physhim; wlc_phy_shared_attach() 374 sh->unit = shp->unit; wlc_phy_shared_attach() 375 sh->corerev = shp->corerev; wlc_phy_shared_attach() 377 sh->vid = shp->vid; wlc_phy_shared_attach() 378 sh->did = shp->did; wlc_phy_shared_attach() 379 sh->chip = shp->chip; wlc_phy_shared_attach() 380 sh->chiprev = shp->chiprev; wlc_phy_shared_attach() 381 sh->chippkg = shp->chippkg; wlc_phy_shared_attach() 382 sh->sromrev = shp->sromrev; wlc_phy_shared_attach() 383 sh->boardtype = shp->boardtype; wlc_phy_shared_attach() 384 sh->boardrev = shp->boardrev; wlc_phy_shared_attach() 385 sh->boardflags = shp->boardflags; wlc_phy_shared_attach() 386 sh->boardflags2 = shp->boardflags2; wlc_phy_shared_attach() 388 sh->fast_timer = PHY_SW_TIMER_FAST; wlc_phy_shared_attach() 389 sh->slow_timer = PHY_SW_TIMER_SLOW; wlc_phy_shared_attach() 390 sh->glacial_timer = PHY_SW_TIMER_GLACIAL; wlc_phy_shared_attach() 392 sh->rssi_mode = RSSI_ANT_MERGE_MAX; wlc_phy_shared_attach() 394 return sh; wlc_phy_shared_attach() 402 if (!pi->sh->up) { wlc_phy_timercb_phycal() 429 wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, wlc_phy_attach() argument 438 if (D11REV_IS(sh->corerev, 4)) wlc_phy_attach() 448 pi = sh->phy_head; wlc_phy_attach() 450 wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); wlc_phy_attach() 460 pi->sh = sh; wlc_phy_attach() 473 wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); wlc_phy_attach() 531 pi->sh->phyrxchain = 0x3; wlc_phy_attach() 559 pi->phycal_timer = wlapi_init_timer(pi->sh->physhim, wlc_phy_attach() 575 pi->next = pi->sh->phy_head; wlc_phy_attach() 576 sh->phy_head = pi; wlc_phy_attach() 600 if (pi->sh->phy_head == pi) wlc_phy_detach() 601 pi->sh->phy_head = pi->next; wlc_phy_detach() 602 else if (pi->sh->phy_head->next == pi) wlc_phy_detach() 603 pi->sh->phy_head->next = NULL; wlc_phy_detach() 729 if (!pi || !pi->sh) wlc_phy_hw_clk_state_upd() 732 pi->sh->clk = newstate; wlc_phy_hw_clk_state_upd() 739 if (!pi || !pi->sh) wlc_phy_hw_state_upd() 742 pi->sh->up = newstate; wlc_phy_hw_state_upd() 777 wlapi_bmac_bw_set(pi->sh->physhim, wlc_phy_init() 788 if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) wlc_phy_init() 794 wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi, pi->sh->rx_antdiv); wlc_phy_init() 841 if (pi->sh->chip == BCMA_CHIP_ID_BCM43224 && wlc_phy_table_addr() 842 pi->sh->chiprev == 1) { wlc_phy_table_addr() 851 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && wlc_phy_table_data_write() 852 (pi->sh->chiprev == 1) && wlc_phy_table_data_write() 885 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && wlc_phy_write_table() 886 (pi->sh->chiprev == 1) && wlc_phy_write_table() 922 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && wlc_phy_read_table() 923 (pi->sh->chiprev == 1)) { wlc_phy_read_table() 1009 wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN, wlc_phy_do_dummy_tx() 1014 if (D11REV_GE(pi->sh->corerev, 11)) wlc_phy_do_dummy_tx() 1093 pi->nphy_perical_last = pi->sh->now - pi->sh->glacial_timer; wlc_phy_mute_upd() 1104 wlapi_bmac_write_shm(pi->sh->physhim, M_B_TSSI_0, NULL_TSSI_W); wlc_phy_clear_tssi() 1105 wlapi_bmac_write_shm(pi->sh->physhim, M_B_TSSI_1, NULL_TSSI_W); wlc_phy_clear_tssi() 1106 wlapi_bmac_write_shm(pi->sh->physhim, M_G_TSSI_0, NULL_TSSI_W); wlc_phy_clear_tssi() 1107 wlapi_bmac_write_shm(pi->sh->physhim, M_G_TSSI_1, NULL_TSSI_W); wlc_phy_clear_tssi() 1188 wlapi_bmac_write_shm(pi->sh->physhim, M_CURCHANNEL, m_cur_channel); wlc_phy_chanspec_set() 1363 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_target_set() 1369 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_target_set() 1385 if (pi->sh->up) { wlc_phy_txpower_set() 1394 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_set() 1400 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_set() 1824 pi->sh->machwcap = machwcap; wlc_phy_machwcap_set() 1875 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_limit_set() 1879 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_limit_set() 1902 if (!pi->sh->clk) wlc_phy_txpower_update_shm() 1908 wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_MAX, 63); wlc_phy_txpower_update_shm() 1909 wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_N, wlc_phy_txpower_update_shm() 1912 wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_TARGET, wlc_phy_txpower_update_shm() 1915 wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_CUR, wlc_phy_txpower_update_shm() 1923 pi->sh->physhim, wlc_phy_txpower_update_shm() 1925 wlapi_bmac_write_shm(pi->sh->physhim, offset + 6, wlc_phy_txpower_update_shm() 1927 wlapi_bmac_write_shm(pi->sh->physhim, offset + 14, wlc_phy_txpower_update_shm() 1931 wlapi_bmac_mhf(pi->sh->physhim, MHF2, MHF2_HWPWRCTL, wlc_phy_txpower_update_shm() 1939 wlapi_bmac_write_shm(pi->sh->physhim, M_OFDM_OFFSET, wlc_phy_txpower_update_shm() 1973 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_hw_ctrl_set() 1983 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_hw_ctrl_set() 2079 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_get_current() 2083 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_get_current() 2108 } else if (pi->hwpwrctrl && pi->sh->up) { wlc_phy_txpower_get_current() 2156 pi->sh->rx_antdiv = val; wlc_phy_ant_rxdiv_set() 2158 if (!(ISNPHY(pi) && D11REV_IS(pi->sh->corerev, 16))) { wlc_phy_ant_rxdiv_set() 2160 wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_ANTDIV, wlc_phy_ant_rxdiv_set() 2163 wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_ANTDIV, 0, wlc_phy_ant_rxdiv_set() 2170 if (!pi->sh->clk) wlc_phy_ant_rxdiv_set() 2176 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_ant_rxdiv_set() 2191 wlapi_enable_mac(pi->sh->physhim); wlc_phy_ant_rxdiv_set() 2229 pi->sh->phy_noise_window[pi->sh->phy_noise_index] = wlc_phy_noise_cb() 2231 pi->sh->phy_noise_index = wlc_phy_noise_cb() 2232 MODINC(pi->sh->phy_noise_index, MA_WINDOW_SZ); wlc_phy_noise_cb() 2256 lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP(idx)); wlc_phy_noise_read_shmem() 2257 hi = wlapi_bmac_read_shm(pi->sh->physhim, wlc_phy_noise_read_shmem() 2297 jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX); wlc_phy_noise_sample_intr() 2300 lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP0); wlc_phy_noise_sample_intr() 2301 hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP1); wlc_phy_noise_sample_intr() 2304 lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP2); wlc_phy_noise_sample_intr() 2305 hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP3); wlc_phy_noise_sample_intr() 2310 status_1 = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_0); wlc_phy_noise_sample_intr() 2329 jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX); wlc_phy_noise_sample_intr() 2364 pi->phynoise_now = pi->sh->now; wlc_phy_noise_sample_request() 2386 wlapi_bmac_write_shm(pi->sh->physhim, M_JSSI_0, 0); wlc_phy_noise_sample_request() 2387 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP0, 0); wlc_phy_noise_sample_request() 2388 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP1, 0); wlc_phy_noise_sample_request() 2389 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); wlc_phy_noise_sample_request() 2390 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); wlc_phy_noise_sample_request() 2395 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_noise_sample_request() 2399 wlapi_enable_mac(pi->sh->physhim); wlc_phy_noise_sample_request() 2406 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP0, 0); wlc_phy_noise_sample_request() 2407 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP1, 0); wlc_phy_noise_sample_request() 2408 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); wlc_phy_noise_sample_request() 2409 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); wlc_phy_noise_sample_request() 2429 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_noise_sample_request() 2435 wlapi_enable_mac(pi->sh->physhim); wlc_phy_noise_sample_request() 2536 if ((pi->sh->corerev >= 11) wlc_phy_rssi_compute() 2596 pi->sh->now++; wlc_phy_watchdog() 2607 if (pi->phynoise_state && (pi->sh->now - pi->phynoise_now) > 5) wlc_phy_watchdog() 2611 ((pi->sh->now - pi->phycal_txpower) >= pi->sh->fast_timer)) { wlc_phy_watchdog() 2614 pi->phycal_txpower = pi->sh->now; wlc_phy_watchdog() 2625 ((pi->sh->now - pi->nphy_perical_last) >= wlc_phy_watchdog() 2626 pi->sh->glacial_timer)) wlc_phy_watchdog() 2635 ((pi->sh->now - pi->phy_lastcal) >= wlc_phy_watchdog() 2636 pi->sh->glacial_timer)) { wlc_phy_watchdog() 2659 pi->sh->phy_noise_window[i] = (s8) (rssi & 0xff); wlc_phy_BSSinit() 2662 pi->sh->phy_noise_window[i] = wlc_phy_BSSinit() 2665 pi->sh->phy_noise_index = 0; wlc_phy_BSSinit() 2809 pi->sh->hw_phytxchain = txchain; wlc_phy_stf_chain_init() 2810 pi->sh->hw_phyrxchain = rxchain; wlc_phy_stf_chain_init() 2811 pi->sh->phytxchain = txchain; wlc_phy_stf_chain_init() 2812 pi->sh->phyrxchain = rxchain; wlc_phy_stf_chain_init() 2813 pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain); wlc_phy_stf_chain_init() 2820 pi->sh->phytxchain = txchain; wlc_phy_stf_chain_set() 2825 pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain); wlc_phy_stf_chain_set() 2832 *txchain = pi->sh->phytxchain; wlc_phy_stf_chain_get() 2833 *rxchain = pi->sh->phyrxchain; wlc_phy_stf_chain_get() 2848 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_stf_chain_active_get() 2850 wlapi_enable_mac(pi->sh->physhim); wlc_phy_stf_chain_active_get() 2894 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4313) && wlc_lcnphy_epa_switch() 2895 (pi->sh->boardflags & BFL_FEM)) { wlc_lcnphy_epa_switch() 2898 txant = wlapi_bmac_get_txant(pi->sh->physhim); wlc_lcnphy_epa_switch()
|
/linux-4.1.27/arch/sh/ |
H A D | Makefile | 2 # arch/sh/Makefile 19 isa-$(CONFIG_SH_DSP) := sh 103 UTS_MACHINE := sh 128 head-y := arch/sh/kernel/head_$(BITS).o 130 core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/ 131 core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/ 155 core-y += $(addprefix arch/sh/boards/, \ 159 # Common machine type headers. Not part of the arch/sh/boards/ hierarchy. 163 core-$(CONFIG_HD6446X_SERIES) += arch/sh/cchips/hd6446x/ 183 drivers-y += arch/sh/drivers/ 184 drivers-$(CONFIG_OPROFILE) += arch/sh/oprofile/ 186 boot := arch/sh/boot 188 cflags-y += $(foreach d, $(cpuincdir-y), -Iarch/sh/include/$(d)) \ 189 $(foreach d, $(machdir-y), -Iarch/sh/include/$(d)) 203 libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) 204 libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) 219 $(Q)$(MAKE) $(build)=arch/sh/tools include/generated/machtypes.h 223 $(Q)$(MAKE) $(clean)=arch/sh/kernel/vsyscall
|
/linux-4.1.27/drivers/xen/events/ |
H A D | events_2l.c | 145 struct shared_info *sh, active_evtchns() 148 return sh->evtchn_pending[idx] & active_evtchns() 150 ~sh->evtchn_mask[idx]; active_evtchns() 266 struct shared_info *sh = HYPERVISOR_shared_info; xen_debug_interrupt() local 292 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) 294 (int)sizeof(sh->evtchn_pending[0])*2, 295 sh->evtchn_pending[i], 298 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 300 (int)(sizeof(sh->evtchn_mask[0])*2), 301 sh->evtchn_mask[i], 305 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) 307 (int)(sizeof(sh->evtchn_mask[0])*2), 308 sh->evtchn_pending[i] & ~sh->evtchn_mask[i], 318 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { 319 xen_ulong_t pending = sh->evtchn_pending[i] 320 & ~sh->evtchn_mask[i] 323 (int)(sizeof(sh->evtchn_mask[0])*2), 329 if (sync_test_bit(i, BM(sh->evtchn_pending))) { 336 !sync_test_bit(i, BM(sh->evtchn_mask)) 144 active_evtchns(unsigned int cpu, struct shared_info *sh, unsigned int idx) active_evtchns() argument
|
/linux-4.1.27/arch/sh/boot/romimage/ |
H A D | Makefile | 2 # linux/arch/sh/boot/romimage/Makefile 29 $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE
|
/linux-4.1.27/arch/sh/kernel/cpu/sh2a/ |
H A D | clock-sh7269.c | 2 * arch/sh/kernel/cpu/sh2a/clock-sh7269.c 153 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP47]), 154 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP46]), 155 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP45]), 156 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP44]), 157 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP43]), 158 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP42]), 159 CLKDEV_ICK_ID("sci_fck", "sh-sci.6", &mstp_clks[MSTP41]), 160 CLKDEV_ICK_ID("sci_fck", "sh-sci.7", &mstp_clks[MSTP40]), 161 CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]), 163 CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]),
|
H A D | setup-sh7201.c | 192 .name = "sh-sci", 213 .name = "sh-sci", 234 .name = "sh-sci", 255 .name = "sh-sci", 276 .name = "sh-sci", 297 .name = "sh-sci", 318 .name = "sh-sci", 339 .name = "sh-sci", 362 .name = "sh-rtc", 376 .name = "sh-mtu2",
|
H A D | setup-sh7264.c | 245 .name = "sh-sci", 271 .name = "sh-sci", 297 .name = "sh-sci", 323 .name = "sh-sci", 349 .name = "sh-sci", 375 .name = "sh-sci", 401 .name = "sh-sci", 427 .name = "sh-sci", 447 .name = "sh-cmt-16", 463 .name = "sh-mtu2", 483 .name = "sh-rtc",
|
H A D | setup-sh7203.c | 190 .name = "sh-sci", 213 .name = "sh-sci", 236 .name = "sh-sci", 259 .name = "sh-sci", 279 .name = "sh-cmt-16", 295 .name = "sh-mtu2", 315 .name = "sh-rtc",
|
H A D | clock-sh7264.c | 2 * arch/sh/kernel/cpu/sh2a/clock-sh7264.c 120 CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]), 122 CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]),
|
H A D | ex.S | 2 * arch/sh/kernel/cpu/sh2a/ex.S
|
H A D | setup-sh7269.c | 267 .name = "sh-sci", 293 .name = "sh-sci", 319 .name = "sh-sci", 345 .name = "sh-sci", 371 .name = "sh-sci", 397 .name = "sh-sci", 423 .name = "sh-sci", 449 .name = "sh-sci", 469 .name = "sh-cmt-16", 485 .name = "sh-mtu2", 505 .name = "sh-rtc",
|
/linux-4.1.27/arch/sh/kernel/cpu/sh4a/ |
H A D | clock-sh7786.c | 2 * arch/sh/kernel/cpu/sh4a/clock-sh7786.c 142 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]), 143 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]), 144 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]), 145 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]), 146 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]), 147 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]), 158 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]), 159 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]), 160 CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP010]), 161 CLKDEV_ICK_ID("fck", "sh-tmu.3", &mstp_clks[MSTP011]),
|
H A D | clock-shx3.c | 2 * arch/sh/kernel/cpu/sh4/clock-shx3.c 117 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]), 118 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]), 119 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]), 120 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]), 127 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]), 128 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
|
H A D | clock-sh7785.c | 2 * arch/sh/kernel/cpu/sh4a/clock-sh7785.c 135 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]), 136 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]), 137 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]), 138 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]), 139 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]), 140 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]), 149 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]), 150 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
|
H A D | clock-sh7723.c | 2 * arch/sh/kernel/cpu/sh4a/clock-sh7723.c 235 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]), 239 CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]), 240 CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]), 241 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]), 261 CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]), 267 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]), 268 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]), 270 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[HWBLK_SCIF0]), 271 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[HWBLK_SCIF1]), 272 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[HWBLK_SCIF2]), 273 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[HWBLK_SCIF3]), 274 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[HWBLK_SCIF4]), 275 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
|
H A D | clock-sh7757.c | 2 * arch/sh/kernel/cpu/sh4/clock-sh7757.c 126 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP113]), 127 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP114]), 128 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP112]), 129 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP111]), 130 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP110]),
|
H A D | clock-sh7724.c | 2 * arch/sh/kernel/cpu/sh4a/clock-sh7724.c 302 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]), 307 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]), 308 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]), 310 CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]), 311 CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]), 312 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]), 314 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]), 315 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]), 316 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]), 317 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[HWBLK_SCIF3]), 318 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[HWBLK_SCIF4]), 319 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[HWBLK_SCIF5]), 346 CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
|
H A D | clock-sh7734.c | 2 * arch/sh/kernel/cpu/sh4a/clock-sh7734.c 197 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP026]), 198 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]), 199 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP024]), 200 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP023]), 201 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP022]), 202 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP021]), 204 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]), 205 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP015]), 206 CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP014]),
|
H A D | clock-sh7722.c | 2 * arch/sh/kernel/cpu/sh4a/clock-sh7722.c 206 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU]), 208 CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]), 209 CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]), 212 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]), 213 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]), 214 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]), 223 CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
|
H A D | setup-sh7770.c | 30 .name = "sh-sci", 51 .name = "sh-sci", 72 .name = "sh-sci", 93 .name = "sh-sci", 114 .name = "sh-sci", 135 .name = "sh-sci", 156 .name = "sh-sci", 177 .name = "sh-sci", 198 .name = "sh-sci", 219 .name = "sh-sci", 240 .name = "sh-tmu", 261 .name = "sh-tmu", 282 .name = "sh-tmu",
|
H A D | setup-sh7734.c | 2 * arch/sh/kernel/cpu/sh4a/setup-sh7734.c 40 .name = "sh-sci", 62 .name = "sh-sci", 84 .name = "sh-sci", 106 .name = "sh-sci", 128 .name = "sh-sci", 150 .name = "sh-sci", 174 .name = "sh-rtc", 214 .name = "sh-tmu", 235 .name = "sh-tmu", 256 .name = "sh-tmu",
|
/linux-4.1.27/lib/mpi/ |
H A D | longlong.h | 114 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 117 : "=r" ((USItype)(sh)), \ 123 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 126 : "=r" ((USItype)(sh)), \ 176 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 179 : "=r" ((USItype)(sh)), \ 185 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 188 : "=r" ((USItype)(sh)), \ 263 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 266 : "=g" ((USItype)(sh)), \ 272 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 275 : "=g" ((USItype)(sh)), \ 300 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 303 : "=r" ((USItype)(sh)), \ 309 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 312 : "=r" ((USItype)(sh)), \ 397 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 400 : "=r" ((USItype)(sh)), \ 406 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 409 : "=r" ((USItype)(sh)), \ 450 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 454 : "=r" ((USItype)(sh)), \ 460 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 464 : "=r" ((USItype)(sh)), \ 515 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 518 : "=d" ((USItype)(sh)), \ 524 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 527 : "=d" ((USItype)(sh)), \ 594 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 597 : "=r" ((USItype)(sh)), \ 603 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 606 : "=r" ((USItype)(sh)), \ 739 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 743 : "=r" ((USItype)(sh)), \ 750 : "=r" ((USItype)(sh)), \ 757 : "=r" ((USItype)(sh)), \ 764 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 768 : "=r" ((USItype)(sh)), \ 775 : "=r" ((USItype)(sh)), \ 782 : "=r" ((USItype)(sh)), \ 789 : "=r" ((USItype)(sh)), \ 796 : "=r" ((USItype)(sh)), \ 857 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 860 : "=r" ((USItype)(sh)), \ 866 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 869 : "=r" ((USItype)(sh)), \ 892 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 895 : "=r" ((USItype)(sh)), \ 901 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 904 : "=r" ((USItype)(sh)), \ 968 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 971 : "=r" ((USItype)(sh)), \ 978 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 981 : "=r" ((USItype)(sh)), \ 1163 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 1166 : "=g" ((USItype)(sh)), \ 1172 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 1175 : "=g" ((USItype)(sh)), \ 1211 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 1213 : "=r" ((unsigned int)(sh)), \ 1219 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 1221 : "=r" ((unsigned int)(sh)), \ 1268 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 1272 (sh) = (ah) + (bh) + (__x < (al)); \ 1278 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 1282 (sh) = (ah) - (bh) - (__x > (al)); \
|
/linux-4.1.27/arch/xtensa/include/asm/ |
H A D | unistd.h | 13 * Ignore legacy system calls in the checksyscalls.sh script
|
/linux-4.1.27/arch/sh/math-emu/ |
H A D | sfp-util.h | 5 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 9 (sh) = (ah) + (bh) + (__x < (al)); \ 13 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 17 (sh) = (ah) - (bh) - (__x > (al)); \
|
/linux-4.1.27/arch/sparc/math-emu/ |
H A D | sfp-util_64.h | 14 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 20 : "=r" (sh), \ 28 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 34 : "=r" (sh), \
|
H A D | sfp-util_32.h | 6 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 9 : "=r" (sh), \ 16 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 19 : "=r" (sh), \
|
/linux-4.1.27/arch/sh/boards/mach-se/ |
H A D | board-se7619.c | 2 * arch/sh/boards/se/7619/setup.c
|
/linux-4.1.27/arch/sh/include/cpu-common/cpu/ |
H A D | mmu_context.h | 2 * include/asm-sh/cpu-sh2/mmu_context.h
|
/linux-4.1.27/arch/sh/include/cpu-sh2/cpu/ |
H A D | freq.h | 2 * include/asm-sh/cpu-sh2/freq.h
|
H A D | cache.h | 2 * include/asm-sh/cpu-sh2/cache.h
|
/linux-4.1.27/arch/sh/include/cpu-sh2a/cpu/ |
H A D | freq.h | 2 * include/asm-sh/cpu-sh2a/freq.h
|
H A D | cache.h | 2 * include/asm-sh/cpu-sh2a/cache.h
|
/linux-4.1.27/arch/sh/include/mach-sh03/mach/ |
H A D | io.h | 2 * include/asm-sh/sh03/io.h
|
H A D | sh03.h | 5 * linux/include/asm-sh/sh03/sh03.h
|
/linux-4.1.27/arch/frv/boot/ |
H A D | Makefile | 64 sh ./install.sh $(KERNELRELEASE) Image System.map "$(INSTALL_PATH)" 67 sh ./install.sh $(KERNELRELEASE) zImage System.map "$(INSTALL_PATH)"
|
/linux-4.1.27/scripts/ |
H A D | headers.sh | 1 #!/bin/sh
|
H A D | makelst | 1 #!/bin/sh
|
/linux-4.1.27/tools/testing/selftests/memfd/ |
H A D | Makefile | 19 @./run_fuse_test.sh || echo "fuse_test: [FAIL]"
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | uasm.h | 207 # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh) 209 # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh) 210 # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh) 211 # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh) 212 # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh) 223 # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh) 225 # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh) 226 # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh) 227 # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) 228 # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
|
/linux-4.1.27/net/netfilter/ |
H A D | xt_sctp.c | 121 const sctp_sctphdr_t *sh; sctp_mt() local 129 sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh); sctp_mt() 130 if (sh == NULL) { sctp_mt() 135 pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); sctp_mt() 137 return SCCHECK(ntohs(sh->source) >= info->spts[0] sctp_mt() 138 && ntohs(sh->source) <= info->spts[1], sctp_mt() 140 && SCCHECK(ntohs(sh->dest) >= info->dpts[0] sctp_mt() 141 && ntohs(sh->dest) <= info->dpts[1], sctp_mt()
|
H A D | nf_conntrack_proto_sctp.c | 312 const struct sctphdr *sh; sctp_packet() local 319 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); sctp_packet() 320 if (sh == NULL) sctp_packet() 332 sh->vtag != ct->proto.sctp.vtag[dir]) { sctp_packet() 343 if (sh->vtag != 0) for_each_sctp_chunk() 347 if (sh->vtag != ct->proto.sctp.vtag[dir] && for_each_sctp_chunk() 348 sh->vtag != ct->proto.sctp.vtag[!dir]) for_each_sctp_chunk() 352 if (sh->vtag != ct->proto.sctp.vtag[dir] && for_each_sctp_chunk() 353 sh->vtag != ct->proto.sctp.vtag[!dir] && for_each_sctp_chunk() 358 if (sh->vtag != ct->proto.sctp.vtag[dir]) for_each_sctp_chunk() 416 const struct sctphdr *sh; sctp_new() local 423 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); sctp_new() 424 if (sh == NULL) sctp_new() 452 if (sh->vtag == 0) { for_each_sctp_chunk() 474 sh->vtag); for_each_sctp_chunk() 475 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; for_each_sctp_chunk()
|
/linux-4.1.27/drivers/sh/superhyway/ |
H A D | superhyway-sysfs.c | 2 * drivers/sh/superhyway/superhyway-sysfs.c 6 * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org>
|
/linux-4.1.27/arch/sh/boards/mach-dreamcast/ |
H A D | setup.c | 2 * arch/sh/boards/dreamcast/setup.c 7 * Copyright (c) 2002, 2003, 2004 Paul Mundt <lethal@linux-sh.org>
|
/linux-4.1.27/arch/sh/drivers/pci/ |
H A D | fixups-snapgear.c | 2 * arch/sh/drivers/pci/ops-snapgear.c 6 * Ported to new API by Paul Mundt <lethal@linux-sh.org>
|
H A D | fixups-titan.c | 2 * arch/sh/drivers/pci/ops-titan.c 4 * Ported to new API by Paul Mundt <lethal@linux-sh.org>
|
H A D | fixups-r7780rp.c | 2 * arch/sh/drivers/pci/fixups-r7780rp.c
|
H A D | fixups-sdk7780.c | 2 * arch/sh/drivers/pci/fixups-sdk7780.c
|
/linux-4.1.27/arch/sh/include/cpu-sh4/cpu/ |
H A D | sq.h | 2 * include/asm-sh/cpu-sh4/sq.h 30 /* arch/sh/kernel/cpu/sh4/sq.c */
|
H A D | fpu.h | 2 * linux/arch/sh/kernel/cpu/sh4/sh4_fpu.h
|
H A D | cache.h | 2 * include/asm-sh/cpu-sh4/cache.h
|
H A D | watchdog.h | 2 * include/asm-sh/cpu-sh4/watchdog.h
|
/linux-4.1.27/arch/sh/include/mach-common/mach/ |
H A D | sh7763rdp.h | 5 * linux/include/asm-sh/sh7763drp.h 48 /* arch/sh/boards/renesas/sh7763rdp/irq.c */
|
H A D | secureedge5410.h | 2 * include/asm-sh/snapgear.h
|
/linux-4.1.27/arch/arm64/boot/ |
H A D | Makefile | 26 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 30 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
/linux-4.1.27/tools/testing/selftests/x86/ |
H A D | Makefile | 17 CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) 18 CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
|
/linux-4.1.27/include/net/sctp/ |
H A D | checksum.h | 63 struct sctphdr *sh = sctp_hdr(skb); sctp_compute_cksum() local 64 __le32 ret, old = sh->checksum; sctp_compute_cksum() 70 sh->checksum = 0; sctp_compute_cksum() 73 sh->checksum = old; sctp_compute_cksum()
|
/linux-4.1.27/arch/sh/boards/ |
H A D | board-shmin.c | 2 * arch/sh/boards/shmin/setup.c
|
H A D | board-titan.c | 2 * arch/sh/boards/titan/setup.c - Setup for Titan
|
H A D | board-edosk7705.c | 2 * arch/sh/boards/renesas/edosk7705/setup.c
|
/linux-4.1.27/arch/sh/boards/mach-lboxre2/ |
H A D | irq.c | 2 * linux/arch/sh/boards/lboxre2/irq.c
|
/linux-4.1.27/arch/sh/include/cpu-sh3/cpu/ |
H A D | freq.h | 2 * include/asm-sh/cpu-sh3/freq.h
|
H A D | watchdog.h | 2 * include/asm-sh/cpu-sh3/watchdog.h
|
H A D | cache.h | 2 * include/asm-sh/cpu-sh3/cache.h
|
H A D | mmu_context.h | 2 * include/asm-sh/cpu-sh3/mmu_context.h
|
/linux-4.1.27/arch/sh/include/mach-dreamcast/mach/ |
H A D | dma.h | 2 * include/asm-sh/dreamcast/dma.h
|
H A D | pci.h | 2 * include/asm-sh/dreamcast/pci.h
|
H A D | sysasic.h | 1 /* include/asm-sh/dreamcast/sysasic.h 6 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> 42 /* arch/sh/boards/mach-dreamcast/irq.c */
|
/linux-4.1.27/arch/m32r/mm/ |
H A D | init.c | 6 * Some code taken from sh version. 110 * orig : arch/sh/mm/init.c 132 * orig : arch/sh/mm/init.c 142 * orig : arch/sh/mm/init.c
|
/linux-4.1.27/sound/ |
H A D | Makefile | 8 obj-$(CONFIG_SND) += core/ i2c/ drivers/ isa/ pci/ ppc/ arm/ sh/ synth/ usb/ \
|
/linux-4.1.27/tools/testing/selftests/exec/ |
H A D | Makefile | 9 echo '#!/bin/sh' > $@
|
/linux-4.1.27/arch/x86/syscalls/ |
H A D | Makefile | 11 syshdr := $(srctree)/$(src)/syscallhdr.sh 12 systbl := $(srctree)/$(src)/syscalltbl.sh 53 $(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh
|
/linux-4.1.27/arch/sh/kernel/cpu/sh5/ |
H A D | setup-sh5.c | 33 .name = "sh-sci", 66 .name = "sh-rtc", 87 .name = "sh-tmu",
|
/linux-4.1.27/arch/sh/kernel/cpu/sh3/ |
H A D | clock-sh3.c | 2 * arch/sh/kernel/cpu/sh3/clock-sh3.c 8 * FRQCR parsing hacked out of arch/sh/kernel/time.c 13 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
|
H A D | clock-sh7705.c | 2 * arch/sh/kernel/cpu/sh3/clock-sh7705.c 8 * FRQCR parsing hacked out of arch/sh/kernel/time.c 13 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
|
H A D | clock-sh7710.c | 2 * arch/sh/kernel/cpu/sh3/clock-sh7710.c 8 * FRQCR parsing hacked out of arch/sh/kernel/time.c 13 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
|
H A D | setup-sh7705.c | 87 .name = "sh-sci", 110 .name = "sh-sci", 136 .name = "sh-rtc", 157 .name = "sh-tmu-sh3",
|
H A D | clock-sh7706.c | 2 * arch/sh/kernel/cpu/sh3/clock-sh7706.c 8 * Based on arch/sh/kernel/cpu/sh3/clock-sh7709.c
|
H A D | clock-sh7709.c | 2 * arch/sh/kernel/cpu/sh3/clock-sh7709.c 8 * Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c
|
H A D | clock-sh7712.c | 2 * arch/sh/kernel/cpu/sh3/clock-sh7712.c 8 * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
|
H A D | setup-sh770x.c | 105 .name = "sh-rtc", 126 .name = "sh-sci", 151 .name = "sh-sci", 177 .name = "sh-sci", 199 .name = "sh-tmu-sh3",
|
H A D | setup-sh7710.c | 91 .name = "sh-rtc", 113 .name = "sh-sci", 135 .name = "sh-sci", 156 .name = "sh-tmu-sh3",
|
/linux-4.1.27/arch/sh/kernel/cpu/sh4/ |
H A D | clock-sh4.c | 2 * arch/sh/kernel/cpu/sh4/clock-sh4.c 8 * FRQCR parsing hacked out of arch/sh/kernel/time.c 13 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
|
/linux-4.1.27/arch/parisc/ |
H A D | install.sh | 1 #!/bin/sh 3 # arch/parisc/install.sh, derived from arch/i386/boot/install.sh
|
/linux-4.1.27/arch/blackfin/boot/ |
H A D | install.sh | 1 #!/bin/sh 3 # arch/blackfin/boot/install.sh 12 # Adapted from code in arch/i386/boot/install.sh by Mike Frysinger
|
/linux-4.1.27/arch/arm/boot/ |
H A D | install.sh | 1 #!/bin/sh 3 # arch/arm/boot/install.sh 12 # Adapted from code in arch/i386/boot/install.sh by Russell King
|
H A D | Makefile | 99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ 103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ 107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ 111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \ 115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
|
/linux-4.1.27/usr/ |
H A D | Makefile | 41 initramfs := $(CONFIG_SHELL) $(srctree)/scripts/gen_initramfs_list.sh 51 # The dependency list is generated by gen_initramfs.sh -l 71 # 4) arguments to gen_initramfs.sh changes
|
/linux-4.1.27/drivers/message/fusion/ |
H A D | mptfc.c | 209 ioc->name, ioc->sh->host_no, mptfc_block_error_handler() 223 ioc->name, ioc->sh->host_no, mptfc_block_error_handler() 230 ioc->name, ioc->sh->host_no, mptfc_block_error_handler() 469 rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); mptfc_register_dev() 497 ioc->sh->host_no, mptfc_register_dev() 990 struct Scsi_Host *sh; mptfc_init_host_attr() local 998 sh = ioc->sh; mptfc_init_host_attr() 1000 sn = fc_host_symbolic_name(sh); mptfc_init_host_attr() 1006 fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN; mptfc_init_host_attr() 1008 fc_host_maxframe_size(sh) = pp0->MaxFrameSize; mptfc_init_host_attr() 1010 fc_host_node_name(sh) = mptfc_init_host_attr() 1013 fc_host_port_name(sh) = mptfc_init_host_attr() 1016 fc_host_port_id(sh) = pp0->PortIdentifier; mptfc_init_host_attr() 1025 fc_host_supported_classes(sh) = cos; mptfc_init_host_attr() 1037 fc_host_speed(sh) = speed; mptfc_init_host_attr() 1048 fc_host_supported_speeds(sh) = speed; mptfc_init_host_attr() 1055 fc_host_port_state(sh) = port_state; mptfc_init_host_attr() 1066 fc_host_port_type(sh) = port_type; mptfc_init_host_attr() 1068 fc_host_fabric_name(sh) = mptfc_init_host_attr() 1115 ioc->sh->host_no, mptfc_setup_reset() 1170 ioc->sh->host_no, mptfc_rescan_devices() 1179 struct Scsi_Host *sh; mptfc_probe() local 1231 sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); mptfc_probe() 1233 if (!sh) { mptfc_probe() 1250 ioc->sh = sh; mptfc_probe() 1252 sh->io_port = 0; mptfc_probe() 1253 sh->n_io_port = 0; mptfc_probe() 1254 sh->irq = 0; mptfc_probe() 1257 sh->max_cmd_len = 16; mptfc_probe() 1259 sh->max_id = ioc->pfacts->MaxDevices; mptfc_probe() 1260 sh->max_lun = max_lun; mptfc_probe() 1264 sh->unique_id = ioc->id; mptfc_probe() 1286 if (numSGE < sh->sg_tablesize) { mptfc_probe() 1290 ioc->name, numSGE, sh->sg_tablesize)); mptfc_probe() 1291 sh->sg_tablesize = numSGE; mptfc_probe() 1296 hd = shost_priv(sh); mptfc_probe() 1314 sh->transportt = mptfc_transport_template; mptfc_probe() 1315 error = scsi_add_host (sh, &ioc->pcidev->dev); mptfc_probe() 1325 "mptfc_wq_%d", sh->host_no); mptfc_probe() 1382 if (ioc->sh == NULL || mptfc_event_process() 1383 ((hd = shost_priv(ioc->sh)) == NULL)) mptfc_event_process() 1512 fc_remove_host(ioc->sh); mptfc_remove()
|
H A D | mptspi.c | 1122 struct Scsi_Host *shost = ioc->sh; mpt_work_wrapper() 1153 scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1); 1163 shost_printk(KERN_ERR, ioc->sh, MYIOC_s_FMT mpt_dv_raid() 1179 struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); mptspi_event_process() 1276 shost_for_each_device(sdev, ioc->sh) { mptspi_dv_renegotiate_work() 1287 shost_for_each_device(sdev, ioc->sh) mptspi_dv_renegotiate_work() 1319 * if we get an ioc fault on bringup, ioc->sh will be NULL */ mptspi_ioc_reset() 1321 ioc->sh) { mptspi_ioc_reset() 1322 struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); mptspi_ioc_reset() 1338 struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); mptspi_resume() 1360 struct Scsi_Host *sh; mptspi_probe() local 1412 sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST)); mptspi_probe() 1414 if (!sh) { mptspi_probe() 1425 sh->no_write_same = 1; mptspi_probe() 1431 ioc->sh = sh; mptspi_probe() 1433 sh->io_port = 0; mptspi_probe() 1434 sh->n_io_port = 0; mptspi_probe() 1435 sh->irq = 0; mptspi_probe() 1438 sh->max_cmd_len = 16; mptspi_probe() 1450 sh->max_id = ioc->devices_per_bus; mptspi_probe() 1452 sh->max_lun = MPT_LAST_LUN + 1; mptspi_probe() 1457 sh->max_channel = 1; mptspi_probe() 1459 sh->max_channel = 0; mptspi_probe() 1460 sh->this_id = ioc->pfacts[0].PortSCSIID; mptspi_probe() 1464 sh->unique_id = ioc->id; mptspi_probe() 1486 if (numSGE < sh->sg_tablesize) { mptspi_probe() 1490 ioc->name, numSGE, sh->sg_tablesize)); mptspi_probe() 1491 sh->sg_tablesize = numSGE; mptspi_probe() 1496 hd = shost_priv(sh); mptspi_probe() 1525 sh->transportt = mptspi_transport_template; mptspi_probe() 1527 error = scsi_add_host (sh, &ioc->pcidev->dev); mptspi_probe() 1542 scsi_scan_host(sh); mptspi_probe()
|
/linux-4.1.27/lib/ |
H A D | digsig.c | 193 struct signature_hdr *sh = (struct signature_hdr *)sig; digsig_verify() local 199 if (siglen < sizeof(*sh) + 2) digsig_verify() 202 if (sh->algo != PUBKEY_ALGO_RSA) digsig_verify() 205 sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid)); digsig_verify() 234 crypto_shash_update(desc, sig, sizeof(*sh)); digsig_verify() 240 err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh), digsig_verify()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | sfp-machine.h | 209 * #define add_ssaaaa(sh,sl,ah,al,bh,bl) (sh = ah+bh+ (( sl = al+bl) < al)) 216 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ 220 : "=r" ((USItype)(sh)), \ 227 : "=r" ((USItype)(sh)), \ 234 : "=r" ((USItype)(sh)), \ 243 * #define sub_ddmmss(sh, sl, ah, al, bh, bl) (sh = ah-bh - ((sl = al-bl) > al)) 251 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ 255 : "=r" ((USItype)(sh)), \ 262 : "=r" ((USItype)(sh)), \ 269 : "=r" ((USItype)(sh)), \ 276 : "=r" ((USItype)(sh)), \ 283 : "=r" ((USItype)(sh)), \
|
/linux-4.1.27/arch/arm/mach-shmobile/ |
H A D | clock-r8a7779.c | 172 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]), /* TMU0 */ 181 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */ 182 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */ 183 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */ 184 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP023]), /* SCIF3 */ 185 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP022]), /* SCIF4 */ 186 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP021]), /* SCIF6 */ 187 CLKDEV_DEV_ID("sh-hspi.0", &mstp_clks[MSTP007]), /* HSPI0 */ 189 CLKDEV_DEV_ID("sh-hspi.1", &mstp_clks[MSTP007]), /* HSPI1 */ 191 CLKDEV_DEV_ID("sh-hspi.2", &mstp_clks[MSTP007]), /* HSPI2 */
|
H A D | clock-sh73a0.c | 631 CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */ 633 CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */ 634 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 638 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ 640 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */ 641 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* MP-DMAC */ 642 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 644 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ 646 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ 648 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */ 650 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */ 652 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */ 654 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */ 656 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */ 692 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), 693 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), 694 CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), 695 CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]), 696 CLKDEV_ICK_ID("dsiphy_clk", "sh-mipi-dsi.0", &dsi0phy_clk), 697 CLKDEV_ICK_ID("dsiphy_clk", "sh-mipi-dsi.1", &dsi1phy_clk), 698 CLKDEV_ICK_ID("fck", "sh-cmt-48.1", &mstp_clks[MSTP329]), /* CMT1 */ 700 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP125]), /* TMU0 */
|
H A D | clock-r8a7778.c | 200 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */ 202 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */ 204 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */ 206 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP023]), /* SCIF3 */ 208 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP022]), /* SCIF4 */ 210 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP021]), /* SCIF6 */ 212 CLKDEV_DEV_ID("sh-hspi.0", &mstp_clks[MSTP007]), /* HSPI0 */ 214 CLKDEV_DEV_ID("sh-hspi.1", &mstp_clks[MSTP007]), /* HSPI1 */ 216 CLKDEV_DEV_ID("sh-hspi.2", &mstp_clks[MSTP007]), /* HSPI2 */ 242 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]), 244 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP015]),
|
H A D | clock-r8a7740.c | 555 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), 557 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), 559 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), 561 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), 563 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), 565 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), 567 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), 569 CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), 570 CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), 571 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), 572 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), 573 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]), 579 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]), 602 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP111]), 604 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP125]), 606 CLKDEV_ICK_ID("fck", "sh-cmt-48.1", &mstp_clks[MSTP329]), 613 CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
|
H A D | clock.c | 6 * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
|
/linux-4.1.27/drivers/target/ |
H A D | target_core_pscsi.c | 115 struct Scsi_Host *sh = phv->phv_lld_host; pscsi_pmode_enable_hba() local 120 if (!sh) pscsi_pmode_enable_hba() 127 " %s\n", hba->hba_id, (sh->hostt->name) ? pscsi_pmode_enable_hba() 128 (sh->hostt->name) : "Unknown"); pscsi_pmode_enable_hba() 130 scsi_host_put(sh); pscsi_pmode_enable_hba() 137 sh = scsi_host_lookup(phv->phv_host_id); pscsi_pmode_enable_hba() 138 if (!sh) { pscsi_pmode_enable_hba() 144 phv->phv_lld_host = sh; pscsi_pmode_enable_hba() 148 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); pscsi_pmode_enable_hba() 370 __releases(sh->host_lock) 374 struct Scsi_Host *sh = sd->host; variable in typeref:struct:Scsi_Host 380 sh->host_no, sd->channel, sd->id, sd->lun); 381 spin_unlock_irq(sh->host_lock); 384 spin_unlock_irq(sh->host_lock); 406 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 414 __releases(sh->host_lock) 417 struct Scsi_Host *sh = sd->host; variable in typeref:struct:Scsi_Host 422 sh->host_no, sd->channel, sd->id, sd->lun); 423 spin_unlock_irq(sh->host_lock); 426 spin_unlock_irq(sh->host_lock); 434 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 445 __releases(sh->host_lock) 448 struct Scsi_Host *sh = sd->host; variable in typeref:struct:Scsi_Host 451 spin_unlock_irq(sh->host_lock); 457 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 468 struct Scsi_Host *sh = phv->phv_lld_host; pscsi_configure_device() local 484 if (!sh) { pscsi_configure_device() 516 sh = phv->phv_lld_host; pscsi_configure_device() 518 sh = scsi_host_lookup(pdv->pdv_host_id); pscsi_configure_device() 519 if (!sh) { pscsi_configure_device() 524 pdv->pdv_lld_host = sh; pscsi_configure_device() 534 spin_lock_irq(sh->host_lock); pscsi_configure_device() 535 list_for_each_entry(sd, &sh->__devices, siblings) { pscsi_configure_device() 559 scsi_host_put(sh); pscsi_configure_device() 569 spin_unlock_irq(sh->host_lock); pscsi_configure_device() 571 pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, pscsi_configure_device() 575 scsi_host_put(sh); pscsi_configure_device()
|
/linux-4.1.27/include/linux/ |
H A D | superhyway.h | 6 * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org> 97 /* drivers/sh/superhyway/superhyway.c */ 103 /* drivers/sh/superhyway/superhyway-sysfs.c */
|
/linux-4.1.27/drivers/lguest/ |
H A D | Makefile | 18 @sh ../../tools/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
|
/linux-4.1.27/drivers/spi/ |
H A D | Makefile | 75 obj-$(CONFIG_SPI_SH) += spi-sh.o 76 obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o 77 obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o 78 obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
|
/linux-4.1.27/arch/sh/lib64/ |
H A D | strlen.S | 4 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
|
H A D | udelay.c | 2 * arch/sh/lib64/udelay.c
|
/linux-4.1.27/arch/sh/boards/mach-se/7721/ |
H A D | irq.c | 2 * linux/arch/sh/boards/se/7721/irq.c
|
/linux-4.1.27/arch/sh/boards/mach-se/7751/ |
H A D | irq.c | 2 * linux/arch/sh/boards/se/7751/irq.c
|
H A D | setup.c | 2 * linux/arch/sh/boards/se/7751/setup.c
|
/linux-4.1.27/arch/sh/boards/mach-sh7763rdp/ |
H A D | irq.c | 2 * linux/arch/sh/boards/renesas/sh7763rdp/irq.c
|
/linux-4.1.27/arch/sh/kernel/cpu/sh2/ |
H A D | ex.S | 2 * arch/sh/kernel/cpu/sh2/ex.S
|
H A D | probe.c | 2 * arch/sh/kernel/cpu/sh2/probe.c
|
H A D | setup-sh7619.c | 75 .name = "sh-sci", 96 .name = "sh-sci", 117 .name = "sh-sci", 166 .name = "sh-cmt-16",
|
/linux-4.1.27/tools/build/tests/ |
H A D | run.sh | 1 #!/bin/sh
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
H A D | Makefile | 62 cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@ 67 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
|
H A D | mkcapflags.sh | 1 #!/bin/sh
|
/linux-4.1.27/arch/sparc/boot/ |
H A D | Makefile | 74 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/zImage \
|
H A D | install.sh | 1 #!/bin/sh
|
/linux-4.1.27/arch/sh/boards/mach-sh03/ |
H A D | setup.c | 2 * linux/arch/sh/boards/sh03/setup.c 24 /* arch/sh/boards/sh03/rtc.c */
|
/linux-4.1.27/arch/sh/drivers/dma/ |
H A D | dma-pvr2.c | 2 * arch/sh/drivers/dma/dma-pvr2.c 106 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
|
/linux-4.1.27/arch/sh/include/mach-se/mach/ |
H A D | se7724.h | 5 * linux/include/asm-sh/se7724.h 67 /* arch/sh/boards/se/7724/irq.c */
|
/linux-4.1.27/arch/nios2/boot/ |
H A D | Makefile | 59 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
|
H A D | install.sh | 1 #!/bin/sh
|
/linux-4.1.27/arch/avr32/boot/images/ |
H A D | Makefile | 54 sh $(srctree)/install-kernel.sh $<
|
/linux-4.1.27/tools/perf/config/ |
H A D | utilities.mak | 141 # (It's necessary to use `sh -c' because GNU make messes up by 144 lookup = $(call unescape-nl,$(shell sh -c $(_l-sh))) 145 _l-sh = $(call shell-sq,command -v $(shell-sq) | $(call shell-escape-nl,)) 151 # (It's necessary to use `sh -c' because GNU make messes up by 155 _is-executable-helper = $(shell sh -c $(_is-executable-sh)) 156 _is-executable-sh = $(call shell-sq,test -f $(1) -a -x $(1) && echo y)
|
/linux-4.1.27/fs/logfs/ |
H A D | super.c | 143 static void set_segment_header(struct logfs_segment_header *sh, u8 type, set_segment_header() argument 146 sh->pad = 0; set_segment_header() 147 sh->type = type; set_segment_header() 148 sh->level = level; set_segment_header() 149 sh->segno = cpu_to_be32(segno); set_segment_header() 150 sh->ec = cpu_to_be32(ec); set_segment_header() 151 sh->gec = cpu_to_be64(segno); set_segment_header() 152 sh->crc = logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4); set_segment_header() 159 struct logfs_segment_header *sh = &ds->ds_sh; logfs_write_ds() local 163 set_segment_header(sh, SEG_SUPER, 0, segno, ec); logfs_write_ds() 347 struct logfs_segment_header *sh = &ds->ds_sh; logfs_check_ds() local 351 if (sh->crc != logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4)) logfs_check_ds()
|
/linux-4.1.27/arch/x86/vdso/ |
H A D | vdso2c.h | 66 ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) + go() 68 if (GET_LE(&sh->sh_type) == SHT_SYMTAB) go() 69 symtab_hdr = sh; go() 71 if (!strcmp(secstrings + GET_LE(&sh->sh_name), go() 73 alt_sec = sh; go()
|
/linux-4.1.27/net/netfilter/ipset/ |
H A D | ip_set_getport.c | 42 const sctp_sctphdr_t *sh; get_port() local 44 sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh); get_port() 45 if (sh == NULL) get_port() 49 *port = src ? sh->source : sh->dest; get_port()
|
/linux-4.1.27/tools/testing/selftests/rcutorture/bin/ |
H A D | kvm.sh | 10 # Usage: kvm.sh [ options ] 33 T=/tmp/kvm.sh.$$ 51 . functions.sh 183 cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF` 322 print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &" 352 print "cat " rd cfr[j] "/kvm-test-1-run.sh.out"; 353 print "cat " rd cfr[j] "/kvm-test-1-run.sh.out >> " rd "/log"; 387 kvm-recheck.sh $resdir/$ds 403 sh $T/script
|
/linux-4.1.27/arch/powerpc/lib/ |
H A D | sstep.c | 648 unsigned int mb, me, sh; analyse_instr() local 897 sh = rb | ((instr & 2) << 4); analyse_instr() 898 val = ROTATE(val, sh); analyse_instr() 907 regs->gpr[ra] = val & MASK64(mb, 63 - sh); analyse_instr() 910 imm = MASK64(mb, 63 - sh); analyse_instr() 916 sh = regs->gpr[rb] & 0x3f; analyse_instr() 917 val = ROTATE(val, sh); analyse_instr() 977 for (sh = 0; sh < 8; ++sh) { analyse_instr() 978 if (instr & (0x80000 >> sh)) analyse_instr() 1221 sh = regs->gpr[rb] & 0x3f; analyse_instr() 1222 if (sh < 32) analyse_instr() 1223 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL; analyse_instr() 1229 sh = regs->gpr[rb] & 0x3f; analyse_instr() 1230 if (sh < 32) analyse_instr() 1231 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh; analyse_instr() 1237 sh = regs->gpr[rb] & 0x3f; analyse_instr() 1239 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); analyse_instr() 1240 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) analyse_instr() 1247 sh = rb; analyse_instr() 1249 regs->gpr[ra] = ival >> sh; analyse_instr() 1250 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) analyse_instr() 1258 sh = regs->gpr[rb] & 0x7f; analyse_instr() 1259 if (sh < 64) analyse_instr() 1260 regs->gpr[ra] = regs->gpr[rd] << sh; analyse_instr() 1266 sh = regs->gpr[rb] & 0x7f; analyse_instr() 1267 if (sh < 64) analyse_instr() 1268 regs->gpr[ra] = regs->gpr[rd] >> sh; analyse_instr() 1274 sh = regs->gpr[rb] & 0x7f; analyse_instr() 1276 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); analyse_instr() 1277 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) analyse_instr() 1285 sh = rb | ((instr & 2) << 4); analyse_instr() 1287 regs->gpr[ra] = ival >> sh; analyse_instr() 1288 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) analyse_instr()
|
/linux-4.1.27/drivers/target/loopback/ |
H A D | tcm_loop.c | 191 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) tcm_loop_queuecommand() argument 389 struct Scsi_Host *sh; tcm_loop_driver_probe() local 394 sh = scsi_host_alloc(&tcm_loop_driver_template, tcm_loop_driver_probe() 396 if (!sh) { tcm_loop_driver_probe() 400 tl_hba->sh = sh; tcm_loop_driver_probe() 405 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; tcm_loop_driver_probe() 409 sh->max_id = 2; tcm_loop_driver_probe() 410 sh->max_lun = 0; tcm_loop_driver_probe() 411 sh->max_channel = 0; tcm_loop_driver_probe() 412 sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN; tcm_loop_driver_probe() 418 scsi_host_set_prot(sh, host_prot); tcm_loop_driver_probe() 419 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); tcm_loop_driver_probe() 421 error = scsi_add_host(sh, &tl_hba->dev); tcm_loop_driver_probe() 424 scsi_host_put(sh); tcm_loop_driver_probe() 433 struct Scsi_Host *sh; tcm_loop_driver_remove() local 436 sh = tl_hba->sh; tcm_loop_driver_remove() 438 scsi_remove_host(sh); tcm_loop_driver_remove() 439 scsi_host_put(sh); tcm_loop_driver_remove() 884 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); tcm_loop_port_link() 901 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, tcm_loop_port_unlink() 1287 struct Scsi_Host *sh; tcm_loop_make_scsi_hba() local 1332 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after tcm_loop_make_scsi_hba() 1339 sh = tl_hba->sh; tcm_loop_make_scsi_hba() 1343 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); tcm_loop_make_scsi_hba() 1359 tl_hba->tl_wwn_address, tl_hba->sh->host_no); tcm_loop_drop_scsi_hba()
|
H A D | tcm_loop.h | 60 struct Scsi_Host *sh; member in struct:tcm_loop_hba
|
/linux-4.1.27/net/netfilter/ipvs/ |
H A D | ip_vs_sh.c | 16 * The sh algorithm is to select server by the hash key of source IP 28 * array. If the sh scheduler is used in cache cluster, it is good to 286 sctp_sctphdr_t _sctph, *sh; ip_vs_sh_get_port() local 302 sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph); ip_vs_sh_get_port() 303 if (unlikely(sh == NULL)) ip_vs_sh_get_port() 305 port = sh->source; ip_vs_sh_get_port() 357 .name = "sh",
|
/linux-4.1.27/security/ |
H A D | lsm_audit.c | 90 struct sctphdr *sh = sctp_hdr(skb); ipv4_skb_to_auditdata() local 91 if (sh == NULL) ipv4_skb_to_auditdata() 93 ad->u.net->sport = sh->source; ipv4_skb_to_auditdata() 94 ad->u.net->dport = sh->dest; ipv4_skb_to_auditdata() 170 struct sctphdr _sctph, *sh; ipv6_skb_to_auditdata() local 172 sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph); ipv6_skb_to_auditdata() 173 if (sh == NULL) ipv6_skb_to_auditdata() 175 ad->u.net->sport = sh->source; ipv6_skb_to_auditdata() 176 ad->u.net->dport = sh->dest; ipv6_skb_to_auditdata()
|
/linux-4.1.27/arch/sh/oprofile/ |
H A D | common.c | 2 * arch/sh/oprofile/init.c
|
/linux-4.1.27/arch/metag/oprofile/ |
H A D | common.c | 6 * Based on arch/sh/oprofile/common.c:
|
/linux-4.1.27/arch/sh/boards/mach-sdk7780/ |
H A D | irq.c | 2 * linux/arch/sh/boards/renesas/sdk7780/irq.c
|
/linux-4.1.27/arch/sh/boards/mach-se/7206/ |
H A D | setup.c | 3 * linux/arch/sh/boards/se/7206/setup.c
|
/linux-4.1.27/arch/sh/include/uapi/asm/ |
H A D | cpu-features.h | 8 * arch/sh/kernel/setup.c in sync so symbolic name
|
/linux-4.1.27/arch/avr32/include/asm/ |
H A D | addrspace.h | 3 * include/asm-sh/addrspace.h
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | unistd.h | 17 * The following defines stop scripts/checksyscalls.sh from complaining about
|
/linux-4.1.27/arch/m68k/ |
H A D | install.sh | 1 #!/bin/sh
|
/linux-4.1.27/sound/soc/ |
H A D | Makefile | 34 obj-$(CONFIG_SND_SOC) += sh/
|
/linux-4.1.27/tools/perf/arch/ |
H A D | common.c | 32 "sh-unknown-linux-gnu-", 125 if (!strncmp(arch, "sh", 2) && isdigit(arch[2])) normalize_arch() 126 return "sh"; normalize_arch() 173 else if (!strcmp(arch, "sh")) perf_session_env__lookup_binutils_path()
|
/linux-4.1.27/tools/testing/fault-injection/ |
H A D | failcmd.sh | 4 # failcmd.sh - run a command with injecting slab/page allocation failures 7 # failcmd.sh --help 8 # failcmd.sh [<options>] command [arguments] 96 TEMP=`getopt -o p:i:t:s:v:h --long $LONGOPTS -n 'failcmd.sh' -- "$@"`
|
/linux-4.1.27/tools/testing/selftests/ |
H A D | gen_kselftest_tar.sh | 47 ./kselftest_install.sh
|
/linux-4.1.27/arch/x86/um/vdso/ |
H A D | Makefile | 75 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
|
/linux-4.1.27/arch/sh/boot/compressed/ |
H A D | misc.c | 2 * arch/sh/boot/compressed/misc.c 74 /* This should be updated to use the sh-sci routines */ puts()
|