Searched refs:sh (Results 1 - 200 of 625) sorted by relevance

1234

/linux-4.4.14/tools/testing/selftests/zram/
H A DMakefile3 TEST_PROGS := zram.sh
4 TEST_FILES := zram01.sh zram02.sh zram_lib.sh
H A Dzram.sh2 TCID="zram.sh"
4 . ./zram_lib.sh
10 ./zram01.sh
12 ./zram02.sh
/linux-4.4.14/tools/testing/selftests/memory-hotplug/
H A DMakefile5 TEST_PROGS := mem-on-off-test.sh
6 override RUN_TESTS := ./mem-on-off-test.sh -r 2 || echo "selftests: memory-hotplug [FAIL]"
10 @/bin/bash ./mem-on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
/linux-4.4.14/tools/testing/selftests/cpu-hotplug/
H A DMakefile3 TEST_PROGS := cpu-on-off-test.sh
8 @/bin/bash ./cpu-on-off-test.sh -a || echo "cpu-hotplug selftests: [FAIL]"
/linux-4.4.14/arch/sh/include/mach-sdk7786/mach/
H A Dirq.h4 /* arch/sh/boards/mach-sdk7786/irq.c */
/linux-4.4.14/tools/testing/selftests/firmware/
H A DMakefile6 TEST_PROGS := fw_filesystem.sh fw_userhelper.sh
/linux-4.4.14/arch/sh/include/cpu-sh4a/cpu/
H A Dserial.h4 /* arch/sh/kernel/cpu/sh4a/serial-sh7722.c */
/linux-4.4.14/arch/sh/kernel/vsyscall/
H A Dvsyscall-syscall.S7 .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so"
H A Dvsyscall.lds.S9 OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
13 OUTPUT_ARCH(sh)
/linux-4.4.14/tools/testing/selftests/efivarfs/
H A DMakefile7 TEST_PROGS := efivarfs.sh
/linux-4.4.14/drivers/scsi/
H A Daha1542.c185 static int aha1542_test_port(struct Scsi_Host *sh) aha1542_test_port() argument
191 if (inb(STATUS(sh->io_port)) == 0xff) aha1542_test_port()
197 aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ aha1542_test_port()
199 outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port)); aha1542_test_port()
204 if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) aha1542_test_port()
208 if (inb(INTRFLAGS(sh->io_port)) & INTRMASK) aha1542_test_port()
214 aha1542_outb(sh->io_port, CMD_INQUIRY); aha1542_test_port()
217 if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0)) aha1542_test_port()
219 inquiry_result[i] = inb(DATA(sh->io_port)); aha1542_test_port()
223 if (inb(STATUS(sh->io_port)) & DF) aha1542_test_port()
227 if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0)) aha1542_test_port()
231 outb(IRST, CONTROL(sh->io_port)); aha1542_test_port()
238 struct Scsi_Host *sh = dev_id; aha1542_interrupt() local
239 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_interrupt()
251 flag = inb(INTRFLAGS(sh->io_port)); aha1542_interrupt()
252 shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: "); aha1542_interrupt()
263 printk("status %02x\n", inb(STATUS(sh->io_port))); aha1542_interrupt()
268 spin_lock_irqsave(sh->host_lock, flags); aha1542_interrupt()
270 flag = inb(INTRFLAGS(sh->io_port)); aha1542_interrupt()
284 aha1542_intr_reset(sh->io_port); aha1542_interrupt()
299 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_interrupt()
302 shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n"); aha1542_interrupt()
313 shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n", aha1542_interrupt()
321 shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi); aha1542_interrupt()
327 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_interrupt()
328 shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n"); aha1542_interrupt()
329 shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat, aha1542_interrupt()
355 shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus, aha1542_interrupt()
370 static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) aha1542_queuecommand() argument
372 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_queuecommand()
396 shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d", aha1542_queuecommand()
411 spin_lock_irqsave(sh->host_lock, flags); aha1542_queuecommand()
433 shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done); aha1542_queuecommand()
464 shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr);
484 spin_unlock_irqrestore(sh->host_lock, flags);
490 static void setup_mailboxes(struct Scsi_Host *sh) setup_mailboxes() argument
492 struct aha1542_hostdata *aha1542 = shost_priv(sh); setup_mailboxes()
503 aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ setup_mailboxes()
505 if (aha1542_out(sh->io_port, mb_cmd, 5)) setup_mailboxes()
506 shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n"); setup_mailboxes()
507 aha1542_intr_reset(sh->io_port); setup_mailboxes()
510 static int aha1542_getconfig(struct Scsi_Host *sh) aha1542_getconfig() argument
514 i = inb(STATUS(sh->io_port)); aha1542_getconfig()
516 i = inb(DATA(sh->io_port)); aha1542_getconfig()
518 aha1542_outb(sh->io_port, CMD_RETCONF); aha1542_getconfig()
519 aha1542_in(sh->io_port, inquiry_result, 3, 0); aha1542_getconfig()
520 if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) aha1542_getconfig()
521 shost_printk(KERN_ERR, sh, "error querying board settings\n"); aha1542_getconfig()
522 aha1542_intr_reset(sh->io_port); aha1542_getconfig()
525 sh->dma_channel = 7; aha1542_getconfig()
528 sh->dma_channel = 6; aha1542_getconfig()
531 sh->dma_channel = 5; aha1542_getconfig()
534 sh->dma_channel = 0; aha1542_getconfig()
539 sh->dma_channel = 0xFF; aha1542_getconfig()
542 shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n"); aha1542_getconfig()
547 sh->irq = 15; aha1542_getconfig()
550 sh->irq = 14; aha1542_getconfig()
553 sh->irq = 12; aha1542_getconfig()
556 sh->irq = 11; aha1542_getconfig()
559 sh->irq = 10; aha1542_getconfig()
562 sh->irq = 9; aha1542_getconfig()
565 shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n"); aha1542_getconfig()
568 sh->this_id = inquiry_result[2] & 7; aha1542_getconfig()
575 static int aha1542_mbenable(struct Scsi_Host *sh) aha1542_mbenable() argument
583 aha1542_outb(sh->io_port, CMD_EXTBIOS); aha1542_mbenable()
584 if (aha1542_in(sh->io_port, mbenable_result, 2, 100)) aha1542_mbenable()
586 if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100)) aha1542_mbenable()
588 aha1542_intr_reset(sh->io_port); aha1542_mbenable()
598 if (aha1542_out(sh->io_port, mbenable_cmd, 3)) aha1542_mbenable()
603 shost_printk(KERN_ERR, sh, "Mailbox init failed\n"); aha1542_mbenable()
605 aha1542_intr_reset(sh->io_port); aha1542_mbenable()
610 static int aha1542_query(struct Scsi_Host *sh) aha1542_query() argument
612 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_query()
615 i = inb(STATUS(sh->io_port)); aha1542_query()
617 i = inb(DATA(sh->io_port)); aha1542_query()
619 aha1542_outb(sh->io_port, CMD_INQUIRY); aha1542_query()
620 aha1542_in(sh->io_port, inquiry_result, 4, 0); aha1542_query()
621 if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) aha1542_query()
622 shost_printk(KERN_ERR, sh, "error querying card type\n"); aha1542_query()
623 aha1542_intr_reset(sh->io_port); aha1542_query()
634 shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n"); aha1542_query()
641 aha1542->bios_translation = aha1542_mbenable(sh); aha1542_query()
665 static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed) aha1542_set_bus_times() argument
670 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times()
671 if (aha1542_out(sh->io_port, oncmd, 2)) aha1542_set_bus_times()
678 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times()
679 if (aha1542_out(sh->io_port, offcmd, 2)) aha1542_set_bus_times()
686 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times()
687 if (aha1542_out(sh->io_port, dmacmd, 2)) aha1542_set_bus_times()
690 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times()
693 shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n"); aha1542_set_bus_times()
694 aha1542_intr_reset(sh->io_port); aha1542_set_bus_times()
701 struct Scsi_Host *sh; aha1542_hw_init() local
711 sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata)); aha1542_hw_init()
712 if (!sh) aha1542_hw_init()
714 aha1542 = shost_priv(sh); aha1542_hw_init()
716 sh->unique_id = base_io; aha1542_hw_init()
717 sh->io_port = base_io; aha1542_hw_init()
718 sh->n_io_port = AHA1542_REGION_SIZE; aha1542_hw_init()
722 if (!aha1542_test_port(sh)) aha1542_hw_init()
725 aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]); aha1542_hw_init()
726 if (aha1542_query(sh)) aha1542_hw_init()
728 if (aha1542_getconfig(sh) == -1) aha1542_hw_init()
731 if (sh->dma_channel != 0xFF) aha1542_hw_init()
732 snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel); aha1542_hw_init()
733 shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n", aha1542_hw_init()
734 sh->this_id, base_io, sh->irq, dma_info); aha1542_hw_init()
736 shost_printk(KERN_INFO, sh, "Using extended bios translation\n"); aha1542_hw_init()
738 setup_mailboxes(sh); aha1542_hw_init()
740 if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) { aha1542_hw_init()
741 shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n"); aha1542_hw_init()
744 if (sh->dma_channel != 0xFF) { aha1542_hw_init()
745 if (request_dma(sh->dma_channel, "aha1542")) { aha1542_hw_init()
746 shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n"); aha1542_hw_init()
749 if (sh->dma_channel == 0 || sh->dma_channel >= 5) { aha1542_hw_init()
750 set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE); aha1542_hw_init()
751 enable_dma(sh->dma_channel); aha1542_hw_init()
755 if (scsi_add_host(sh, pdev)) aha1542_hw_init()
758 scsi_scan_host(sh); aha1542_hw_init()
760 return sh; aha1542_hw_init()
762 if (sh->dma_channel != 0xff) aha1542_hw_init()
763 free_dma(sh->dma_channel); aha1542_hw_init()
765 free_irq(sh->irq, sh); aha1542_hw_init()
767 scsi_host_put(sh); aha1542_hw_init()
774 static int aha1542_release(struct Scsi_Host *sh) aha1542_release() argument
776 scsi_remove_host(sh); aha1542_release()
777 if (sh->dma_channel != 0xff) aha1542_release()
778 free_dma(sh->dma_channel); aha1542_release()
779 if (sh->irq) aha1542_release()
780 free_irq(sh->irq, sh); aha1542_release()
781 if (sh->io_port && sh->n_io_port) aha1542_release()
782 release_region(sh->io_port, sh->n_io_port); aha1542_release()
783 scsi_host_put(sh); aha1542_release()
794 struct Scsi_Host *sh = cmd->device->host; aha1542_dev_reset() local
795 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_dev_reset()
803 spin_lock_irqsave(sh->host_lock, flags); aha1542_dev_reset()
840 aha1542_outb(sh->io_port, CMD_START_SCSI); aha1542_dev_reset()
841 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_dev_reset()
851 struct Scsi_Host *sh = cmd->device->host; aha1542_reset() local
852 struct aha1542_hostdata *aha1542 = shost_priv(sh); aha1542_reset()
856 spin_lock_irqsave(sh->host_lock, flags); aha1542_reset()
867 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_reset()
907 spin_unlock_irqrestore(sh->host_lock, flags); aha1542_reset()
959 struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev); aha1542_isa_match() local
961 if (!sh) aha1542_isa_match()
964 dev_set_drvdata(pdev, sh); aha1542_isa_match()
995 struct Scsi_Host *sh; aha1542_pnp_probe() local
1012 sh = aha1542_hw_init(&driver_template, &pdev->dev, indx); aha1542_pnp_probe()
1013 if (!sh) aha1542_pnp_probe()
1016 pnp_set_drvdata(pdev, sh); aha1542_pnp_probe()
H A Deata_pio.c113 static int eata_pio_release(struct Scsi_Host *sh) eata_pio_release() argument
115 hostdata *hd = SD(sh); eata_pio_release()
116 if (sh->irq && reg_IRQ[sh->irq] == 1) eata_pio_release()
117 free_irq(sh->irq, NULL); eata_pio_release()
119 reg_IRQ[sh->irq]--; eata_pio_release()
120 if (SD(sh)->channel == 0) { eata_pio_release()
121 if (sh->io_port && sh->n_io_port) eata_pio_release()
122 release_region(sh->io_port, sh->n_io_port); eata_pio_release()
166 struct Scsi_Host *sh; eata_pio_int_handler() local
171 for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) eata_pio_int_handler()
173 if (sh->irq != irq) eata_pio_int_handler()
175 if (inb(sh->base + HA_RSTATUS) & HA_SBUSY) eata_pio_int_handler()
181 hd = SD(sh); eata_pio_int_handler()
309 struct Scsi_Host *sh; eata_pio_queue_lck() local
315 sh = cmd->device->host; eata_pio_queue_lck()
316 base = sh->base; eata_pio_queue_lck()
324 DBG(DBG_QUEUE, printk(KERN_EMERG "can_queue %d, x %d, y %d\n", sh->can_queue, x, y)); eata_pio_queue_lck()
399 "slot %d irq %d\n", sh->base, cmd, y, sh->irq)); eata_pio_queue_lck()
662 struct Scsi_Host *sh; register_pio_HBA() local
685 sh = scsi_register(&driver_template, size); register_pio_HBA()
686 if (sh == NULL) register_pio_HBA()
690 if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) { register_pio_HBA()
706 hd = SD(sh); register_pio_HBA()
711 strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor)); register_pio_HBA()
712 strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name)); register_pio_HBA()
713 SD(sh)->revision[0] = buff[32]; register_pio_HBA()
714 SD(sh)->revision[1] = buff[33]; register_pio_HBA()
715 SD(sh)->revision[2] = buff[34]; register_pio_HBA()
716 SD(sh)->revision[3] = '.'; register_pio_HBA()
717 SD(sh)->revision[4] = buff[35]; register_pio_HBA()
718 SD(sh)->revision[5] = 0; register_pio_HBA()
722 SD(sh)->EATA_revision = 'a'; register_pio_HBA()
725 SD(sh)->EATA_revision = 'b'; register_pio_HBA()
728 SD(sh)->EATA_revision = 'c'; register_pio_HBA()
731 SD(sh)->EATA_revision = 'z'; register_pio_HBA()
733 SD(sh)->EATA_revision = '?'; register_pio_HBA()
752 SD(sh)->cplen = cplen; register_pio_HBA()
753 SD(sh)->cppadlen = cppadlen; register_pio_HBA()
754 SD(sh)->hostid = gc->scsi_id[3]; register_pio_HBA()
755 SD(sh)->devflags = 1 << gc->scsi_id[3]; register_pio_HBA()
756 SD(sh)->moresupport = gc->MORE_support; register_pio_HBA()
757 sh->unique_id = base; register_pio_HBA()
758 sh->base = base; register_pio_HBA()
759 sh->io_port = base; register_pio_HBA()
760 sh->n_io_port = 9; register_pio_HBA()
761 sh->irq = gc->IRQ; register_pio_HBA()
762 sh->dma_channel = PIO; register_pio_HBA()
763 sh->this_id = gc->scsi_id[3]; register_pio_HBA()
764 sh->can_queue = 1; register_pio_HBA()
765 sh->cmd_per_lun = 1; register_pio_HBA()
766 sh->sg_tablesize = SG_ALL; register_pio_HBA()
772 sh->max_id = 8; register_pio_HBA()
773 sh->max_lun = 8; register_pio_HBA()
783 SD(hd->prev)->next = sh; register_pio_HBA()
784 last_HBA = sh; register_pio_HBA()
786 first_HBA = sh; register_pio_HBA()
H A Du14-34f.c609 static struct Scsi_Host *sh[MAX_BOARDS + 1]; variable in typeref:struct:Scsi_Host
630 #define HD(board) ((struct hostdata *) &sh[board]->hostdata)
763 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { board_inquiry()
771 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); board_inquiry()
774 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); board_inquiry()
777 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); board_inquiry()
891 sh[j] = scsi_register(tpnt, sizeof(struct hostdata)); port_detect()
894 if (sh[j] == NULL) { port_detect()
899 sh[j]->io_port = port_base; port_detect()
900 sh[j]->unique_id = port_base; port_detect()
901 sh[j]->n_io_port = REGION_SIZE; port_detect()
902 sh[j]->base = bios_segment_table[config_1.bios_segment]; port_detect()
903 sh[j]->irq = irq; port_detect()
904 sh[j]->sg_tablesize = MAX_SGLIST; port_detect()
905 sh[j]->this_id = config_2.ha_scsi_id; port_detect()
906 sh[j]->can_queue = MAX_MAILBOXES; port_detect()
907 sh[j]->cmd_per_lun = MAX_CMD_PER_LUN; port_detect()
913 sys_mask = inb(sh[j]->io_port + REG_SYS_MASK); port_detect()
914 lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK); port_detect()
920 if (sh[j]->this_id == 0) sh[j]->this_id = -1; port_detect()
923 if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK); port_detect()
932 if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST; port_detect()
935 sh[j]->unchecked_isa_dma = FALSE; port_detect()
936 sh[j]->dma_channel = NO_DMA; port_detect()
942 sh[j]->unchecked_isa_dma = TRUE; port_detect()
951 sh[j]->dma_channel = dma_channel; port_detect()
956 sh[j]->max_channel = MAX_CHANNEL - 1; port_detect()
957 sh[j]->max_id = MAX_TARGET; port_detect()
958 sh[j]->max_lun = MAX_LUN; port_detect()
967 sh[j]->hostt->use_clustering = DISABLE_CLUSTERING; port_detect()
968 sh[j]->sg_tablesize = MAX_SAFE_SGLIST; port_detect()
977 for (i = 0; i < sh[j]->can_queue; i++) port_detect()
981 for (i = 0; i < sh[j]->can_queue; i++) port_detect()
983 sh[j]->sg_tablesize * sizeof(struct sg_list), port_detect()
984 (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) { port_detect()
1005 BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base, port_detect()
1006 sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue); port_detect()
1008 if (sh[j]->max_id > 8 || sh[j]->max_lun > 8) port_detect()
1010 BN(j), sh[j]->max_id, sh[j]->max_lun); port_detect()
1012 for (i = 0; i <= sh[j]->max_channel; i++) port_detect()
1014 BN(j), i, sh[j]->this_id); port_detect()
1029 u14_34f_release(sh[j]); port_detect()
1099 for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL; u14_34f_detect()
1265 for (k = 0; k < sh[j]->can_queue; k++, i++) { u14_34f_queuecommand_lck()
1267 if (i >= sh[j]->can_queue) i = 0; u14_34f_queuecommand_lck()
1275 if (k == sh[j]->can_queue) { u14_34f_queuecommand_lck()
1313 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { u14_34f_queuecommand_lck()
1322 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); u14_34f_queuecommand_lck()
1325 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); u14_34f_queuecommand_lck()
1346 if (i >= sh[j]->can_queue) u14_34f_eh_abort()
1349 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { u14_34f_eh_abort()
1366 if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED) u14_34f_eh_abort()
1404 spin_lock_irq(sh[j]->host_lock); u14_34f_eh_host_reset()
1411 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset()
1415 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { u14_34f_eh_host_reset()
1417 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset()
1423 for (c = 0; c <= sh[j]->max_channel; c++) u14_34f_eh_host_reset()
1424 for (k = 0; k < sh[j]->max_id; k++) { u14_34f_eh_host_reset()
1429 for (i = 0; i < sh[j]->can_queue; i++) { u14_34f_eh_host_reset()
1464 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { u14_34f_eh_host_reset()
1466 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset()
1470 outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR); u14_34f_eh_host_reset()
1479 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset()
1482 spin_lock_irq(sh[j]->host_lock); u14_34f_eh_host_reset()
1486 for (i = 0; i < sh[j]->can_queue; i++) { u14_34f_eh_host_reset()
1526 spin_unlock_irq(sh[j]->host_lock); u14_34f_eh_host_reset()
1684 for (k = 0; k < sh[j]->can_queue; k++) { flush_dev()
1702 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { flush_dev()
1711 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); flush_dev()
1712 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); flush_dev()
1723 int irq = sh[j]->irq; ihdlr()
1726 if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none; ihdlr()
1734 if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) { ihdlr()
1735 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); ihdlr()
1741 ret = inl(sh[j]->io_port + REG_ICM); ihdlr()
1744 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); ihdlr()
1747 for (i = 0; i < sh[j]->can_queue; i++) ihdlr()
1750 if (i >= sh[j]->can_queue) ihdlr()
1862 for (c = 0; c <= sh[j]->max_channel; c++) ihdlr()
1863 for (k = 0; k < sh[j]->max_id; k++) ihdlr()
1936 spin_lock_irqsave(sh[j]->host_lock, spin_flags); do_interrupt_handler()
1938 spin_unlock_irqrestore(sh[j]->host_lock, spin_flags); do_interrupt_handler()
1945 for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++); u14_34f_release()
1947 if (sh[j] == NULL) u14_34f_release()
1950 for (i = 0; i < sh[j]->can_queue; i++) u14_34f_release()
1953 for (i = 0; i < sh[j]->can_queue; i++) u14_34f_release()
1957 free_irq(sh[j]->irq, &sha[j]); u14_34f_release()
1959 if (sh[j]->dma_channel != NO_DMA) u14_34f_release()
1960 free_dma(sh[j]->dma_channel); u14_34f_release()
1962 release_region(sh[j]->io_port, sh[j]->n_io_port); u14_34f_release()
1963 scsi_unregister(sh[j]); u14_34f_release()
H A Dwd719x.c201 static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) wd719x_queuecommand() argument
206 struct wd719x *wd = shost_priv(sh); wd719x_queuecommand()
212 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_queuecommand()
218 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_queuecommand()
221 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_queuecommand()
225 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_queuecommand()
263 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_queuecommand()
288 spin_unlock_irqrestore(wd->sh->host_lock, flags);
297 spin_unlock_irqrestore(wd->sh->host_lock, flags);
474 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_abort()
477 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_abort()
493 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_reset()
496 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_reset()
521 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_host_reset()
533 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_host_reset()
658 spin_lock_irqsave(wd->sh->host_lock, flags); wd719x_interrupt()
666 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_interrupt()
703 spin_unlock_irqrestore(wd->sh->host_lock, flags); wd719x_interrupt()
803 static int wd719x_board_found(struct Scsi_Host *sh) wd719x_board_found() argument
805 struct wd719x *wd = shost_priv(sh); wd719x_board_found()
812 sh->base = pci_resource_start(wd->pdev, 0); wd719x_board_found()
816 wd->sh = sh; wd719x_board_found()
817 sh->irq = wd->pdev->irq; wd719x_board_found()
853 sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK; wd719x_board_found()
856 card_types[wd->type], sh->base, sh->irq, sh->this_id); wd719x_board_found()
891 struct Scsi_Host *sh; wd719x_pci_probe() local
913 sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x)); wd719x_pci_probe()
914 if (!sh) wd719x_pci_probe()
917 wd = shost_priv(sh); wd719x_pci_probe()
923 err = wd719x_board_found(sh); wd719x_pci_probe()
927 err = scsi_add_host(sh, &wd->pdev->dev); wd719x_pci_probe()
931 scsi_scan_host(sh); wd719x_pci_probe()
933 pci_set_drvdata(pdev, sh); wd719x_pci_probe()
941 scsi_host_put(sh); wd719x_pci_probe()
953 struct Scsi_Host *sh = pci_get_drvdata(pdev); wd719x_pci_remove() local
954 struct wd719x *wd = shost_priv(sh); wd719x_pci_remove()
956 scsi_remove_host(sh); wd719x_pci_remove()
962 scsi_host_put(sh); wd719x_pci_remove()
/linux-4.4.14/drivers/md/
H A Draid5.c34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
188 static inline int raid6_d0(struct stripe_head *sh) raid6_d0() argument
190 if (sh->ddf_layout) raid6_d0()
194 if (sh->qd_idx == sh->disks - 1) raid6_d0()
197 return sh->qd_idx + 1; raid6_d0()
210 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, raid6_idx_to_slot() argument
215 if (sh->ddf_layout) raid6_idx_to_slot()
217 if (idx == sh->pd_idx) raid6_idx_to_slot()
219 if (idx == sh->qd_idx) raid6_idx_to_slot()
221 if (!sh->ddf_layout) raid6_idx_to_slot()
239 static int stripe_operations_active(struct stripe_head *sh) stripe_operations_active() argument
241 return sh->check_state || sh->reconstruct_state || stripe_operations_active()
242 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || stripe_operations_active()
243 test_bit(STRIPE_COMPUTE_RUN, &sh->state); stripe_operations_active()
246 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) raid5_wakeup_stripe_thread() argument
248 struct r5conf *conf = sh->raid_conf; raid5_wakeup_stripe_thread()
251 int i, cpu = sh->cpu; raid5_wakeup_stripe_thread()
255 sh->cpu = cpu; raid5_wakeup_stripe_thread()
258 if (list_empty(&sh->lru)) { raid5_wakeup_stripe_thread()
261 list_add_tail(&sh->lru, &group->handle_list); raid5_wakeup_stripe_thread()
263 sh->group = group; raid5_wakeup_stripe_thread()
271 group = conf->worker_groups + cpu_to_group(sh->cpu); raid5_wakeup_stripe_thread()
275 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); raid5_wakeup_stripe_thread()
282 queue_work_on(sh->cpu, raid5_wq, raid5_wakeup_stripe_thread()
289 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, do_release_stripe() argument
292 BUG_ON(!list_empty(&sh->lru)); do_release_stripe()
294 if (test_bit(STRIPE_HANDLE, &sh->state)) { do_release_stripe()
295 if (test_bit(STRIPE_DELAYED, &sh->state) && do_release_stripe()
296 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) do_release_stripe()
297 list_add_tail(&sh->lru, &conf->delayed_list); do_release_stripe()
298 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && do_release_stripe()
299 sh->bm_seq - conf->seq_write > 0) do_release_stripe()
300 list_add_tail(&sh->lru, &conf->bitmap_list); do_release_stripe()
302 clear_bit(STRIPE_DELAYED, &sh->state); do_release_stripe()
303 clear_bit(STRIPE_BIT_DELAY, &sh->state); do_release_stripe()
305 list_add_tail(&sh->lru, &conf->handle_list); do_release_stripe()
307 raid5_wakeup_stripe_thread(sh); do_release_stripe()
313 BUG_ON(stripe_operations_active(sh)); do_release_stripe()
314 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) do_release_stripe()
319 if (!test_bit(STRIPE_EXPANDING, &sh->state)) do_release_stripe()
320 list_add_tail(&sh->lru, temp_inactive_list); do_release_stripe()
324 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, __release_stripe() argument
327 if (atomic_dec_and_test(&sh->count)) __release_stripe()
328 do_release_stripe(conf, sh, temp_inactive_list); __release_stripe()
384 struct stripe_head *sh; release_stripe_list() local
393 sh = llist_entry(head, struct stripe_head, release_list); release_stripe_list()
395 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ release_stripe_list()
397 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); release_stripe_list()
403 hash = sh->hash_lock_index; release_stripe_list()
404 __release_stripe(conf, sh, &temp_inactive_list[hash]); release_stripe_list()
411 void raid5_release_stripe(struct stripe_head *sh) raid5_release_stripe() argument
413 struct r5conf *conf = sh->raid_conf; raid5_release_stripe()
421 if (atomic_add_unless(&sh->count, -1, 1)) raid5_release_stripe()
425 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) raid5_release_stripe()
427 wakeup = llist_add(&sh->release_list, &conf->released_stripes); raid5_release_stripe()
434 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { raid5_release_stripe()
436 hash = sh->hash_lock_index; raid5_release_stripe()
437 do_release_stripe(conf, sh, &list); raid5_release_stripe()
444 static inline void remove_hash(struct stripe_head *sh) remove_hash() argument
447 (unsigned long long)sh->sector); remove_hash()
449 hlist_del_init(&sh->hash); remove_hash()
452 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) insert_hash() argument
454 struct hlist_head *hp = stripe_hash(conf, sh->sector); insert_hash()
457 (unsigned long long)sh->sector); insert_hash()
459 hlist_add_head(&sh->hash, hp); insert_hash()
465 struct stripe_head *sh = NULL; get_free_stripe() local
471 sh = list_entry(first, struct stripe_head, lru); get_free_stripe()
473 remove_hash(sh); get_free_stripe()
475 BUG_ON(hash != sh->hash_lock_index); get_free_stripe()
479 return sh; get_free_stripe()
482 static void shrink_buffers(struct stripe_head *sh) shrink_buffers() argument
486 int num = sh->raid_conf->pool_size; shrink_buffers()
489 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); shrink_buffers()
490 p = sh->dev[i].page; shrink_buffers()
493 sh->dev[i].page = NULL; shrink_buffers()
498 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) grow_buffers() argument
501 int num = sh->raid_conf->pool_size; grow_buffers()
509 sh->dev[i].page = page; grow_buffers()
510 sh->dev[i].orig_page = page; grow_buffers()
515 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
517 struct stripe_head *sh);
519 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) init_stripe() argument
521 struct r5conf *conf = sh->raid_conf; init_stripe()
524 BUG_ON(atomic_read(&sh->count) != 0); init_stripe()
525 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); init_stripe()
526 BUG_ON(stripe_operations_active(sh)); init_stripe()
527 BUG_ON(sh->batch_head); init_stripe()
533 sh->generation = conf->generation - previous; init_stripe()
534 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; init_stripe()
535 sh->sector = sector; init_stripe()
536 stripe_set_idx(sector, conf, previous, sh); init_stripe()
537 sh->state = 0; init_stripe()
539 for (i = sh->disks; i--; ) { init_stripe()
540 struct r5dev *dev = &sh->dev[i]; init_stripe()
545 (unsigned long long)sh->sector, i, dev->toread, init_stripe()
551 raid5_build_block(sh, i, previous); init_stripe()
555 sh->overwrite_disks = 0; init_stripe()
556 insert_hash(conf, sh); init_stripe()
557 sh->cpu = smp_processor_id(); init_stripe()
558 set_bit(STRIPE_BATCH_READY, &sh->state); init_stripe()
564 struct stripe_head *sh; __find_stripe() local
567 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) __find_stripe()
568 if (sh->sector == sector && sh->generation == generation) __find_stripe()
569 return sh; __find_stripe()
660 struct stripe_head *sh; raid5_get_active_stripe() local
671 sh = __find_stripe(conf, sector, conf->generation - previous); raid5_get_active_stripe()
672 if (!sh) { raid5_get_active_stripe()
674 sh = get_free_stripe(conf, hash); raid5_get_active_stripe()
675 if (!sh && !test_bit(R5_DID_ALLOC, raid5_get_active_stripe()
680 if (noblock && sh == NULL) raid5_get_active_stripe()
682 if (!sh) { raid5_get_active_stripe()
696 init_stripe(sh, sector, previous); raid5_get_active_stripe()
697 atomic_inc(&sh->count); raid5_get_active_stripe()
699 } else if (!atomic_inc_not_zero(&sh->count)) { raid5_get_active_stripe()
701 if (!atomic_read(&sh->count)) { raid5_get_active_stripe()
702 if (!test_bit(STRIPE_HANDLE, &sh->state)) raid5_get_active_stripe()
704 BUG_ON(list_empty(&sh->lru) && raid5_get_active_stripe()
705 !test_bit(STRIPE_EXPANDING, &sh->state)); raid5_get_active_stripe()
706 list_del_init(&sh->lru); raid5_get_active_stripe()
707 if (sh->group) { raid5_get_active_stripe()
708 sh->group->stripes_cnt--; raid5_get_active_stripe()
709 sh->group = NULL; raid5_get_active_stripe()
712 atomic_inc(&sh->count); raid5_get_active_stripe()
715 } while (sh == NULL); raid5_get_active_stripe()
718 return sh; raid5_get_active_stripe()
721 static bool is_full_stripe_write(struct stripe_head *sh) is_full_stripe_write() argument
723 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); is_full_stripe_write()
724 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); is_full_stripe_write()
747 static bool stripe_can_batch(struct stripe_head *sh) stripe_can_batch() argument
749 struct r5conf *conf = sh->raid_conf; stripe_can_batch()
753 return test_bit(STRIPE_BATCH_READY, &sh->state) && stripe_can_batch()
754 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && stripe_can_batch()
755 is_full_stripe_write(sh); stripe_can_batch()
759 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) stripe_add_to_batch_list() argument
766 if (!stripe_can_batch(sh)) stripe_add_to_batch_list()
769 tmp_sec = sh->sector; stripe_add_to_batch_list()
772 head_sector = sh->sector - STRIPE_SECTORS; stripe_add_to_batch_list()
800 lock_two_stripes(head, sh); stripe_add_to_batch_list()
802 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) stripe_add_to_batch_list()
805 if (sh->batch_head) stripe_add_to_batch_list()
809 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) stripe_add_to_batch_list()
811 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) stripe_add_to_batch_list()
826 list_add(&sh->batch_list, &head->batch_list); stripe_add_to_batch_list()
829 sh->batch_head = head->batch_head; stripe_add_to_batch_list()
832 sh->batch_head = head->batch_head; stripe_add_to_batch_list()
834 list_add_tail(&sh->batch_list, &head->batch_list); stripe_add_to_batch_list()
838 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) stripe_add_to_batch_list()
843 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { stripe_add_to_batch_list()
844 int seq = sh->bm_seq; stripe_add_to_batch_list()
845 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && stripe_add_to_batch_list()
846 sh->batch_head->bm_seq > seq) stripe_add_to_batch_list()
847 seq = sh->batch_head->bm_seq; stripe_add_to_batch_list()
848 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); stripe_add_to_batch_list()
849 sh->batch_head->bm_seq = seq; stripe_add_to_batch_list()
852 atomic_inc(&sh->count); stripe_add_to_batch_list()
854 unlock_two_stripes(head, sh); stripe_add_to_batch_list()
862 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) use_new_offset() argument
872 if (sh->generation == conf->generation - 1) use_new_offset()
885 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) ops_run_io() argument
887 struct r5conf *conf = sh->raid_conf; ops_run_io()
888 int i, disks = sh->disks; ops_run_io()
889 struct stripe_head *head_sh = sh; ops_run_io()
893 if (r5l_write_stripe(conf->log, sh) == 0) ops_run_io()
901 sh = head_sh; ops_run_io()
902 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { ops_run_io()
903 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) ops_run_io()
907 if (test_bit(R5_Discard, &sh->dev[i].flags)) ops_run_io()
909 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) ops_run_io()
912 &sh->dev[i].flags)) { ops_run_io()
917 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) ops_run_io()
921 bi = &sh->dev[i].req; ops_run_io()
922 rbi = &sh->dev[i].rreq; /* For writing to replacement */ ops_run_io()
962 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, ops_run_io()
996 set_bit(STRIPE_IO_STARTED, &sh->state); ops_run_io()
1004 bi->bi_private = sh; ops_run_io()
1007 __func__, (unsigned long long)sh->sector, ops_run_io()
1009 atomic_inc(&sh->count); ops_run_io()
1010 if (sh != head_sh) ops_run_io()
1012 if (use_new_offset(conf, sh)) ops_run_io()
1013 bi->bi_iter.bi_sector = (sh->sector ops_run_io()
1016 bi->bi_iter.bi_sector = (sh->sector ops_run_io()
1021 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) ops_run_io()
1022 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); ops_run_io()
1023 sh->dev[i].vec.bv_page = sh->dev[i].page; ops_run_io()
1035 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); ops_run_io()
1040 sh->dev[i].sector); ops_run_io()
1048 set_bit(STRIPE_IO_STARTED, &sh->state); ops_run_io()
1055 rbi->bi_private = sh; ops_run_io()
1059 __func__, (unsigned long long)sh->sector, ops_run_io()
1061 atomic_inc(&sh->count); ops_run_io()
1062 if (sh != head_sh) ops_run_io()
1064 if (use_new_offset(conf, sh)) ops_run_io()
1065 rbi->bi_iter.bi_sector = (sh->sector ops_run_io()
1068 rbi->bi_iter.bi_sector = (sh->sector ops_run_io()
1070 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) ops_run_io()
1071 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); ops_run_io()
1072 sh->dev[i].rvec.bv_page = sh->dev[i].page; ops_run_io()
1086 sh->dev[i].sector); ops_run_io()
1091 set_bit(STRIPE_DEGRADED, &sh->state); ops_run_io()
1093 bi->bi_rw, i, (unsigned long long)sh->sector); ops_run_io()
1094 clear_bit(R5_LOCKED, &sh->dev[i].flags); ops_run_io()
1095 set_bit(STRIPE_HANDLE, &sh->state); ops_run_io()
1100 sh = list_first_entry(&sh->batch_list, struct stripe_head, ops_run_io()
1102 if (sh != head_sh) ops_run_io()
1110 struct stripe_head *sh) async_copy_data()
1148 if (sh->raid_conf->skip_copy && bio_for_each_segment()
1172 struct stripe_head *sh = stripe_head_ref; ops_complete_biofill() local
1177 (unsigned long long)sh->sector); ops_complete_biofill()
1180 for (i = sh->disks; i--; ) { ops_complete_biofill()
1181 struct r5dev *dev = &sh->dev[i]; ops_complete_biofill()
1203 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); ops_complete_biofill()
1207 set_bit(STRIPE_HANDLE, &sh->state); ops_complete_biofill()
1208 raid5_release_stripe(sh); ops_complete_biofill()
1211 static void ops_run_biofill(struct stripe_head *sh) ops_run_biofill() argument
1217 BUG_ON(sh->batch_head); ops_run_biofill()
1219 (unsigned long long)sh->sector); ops_run_biofill()
1221 for (i = sh->disks; i--; ) { ops_run_biofill()
1222 struct r5dev *dev = &sh->dev[i]; ops_run_biofill()
1225 spin_lock_irq(&sh->stripe_lock); ops_run_biofill()
1228 spin_unlock_irq(&sh->stripe_lock); ops_run_biofill()
1232 dev->sector, tx, sh); ops_run_biofill()
1238 atomic_inc(&sh->count); ops_run_biofill()
1239 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); ops_run_biofill()
1243 static void mark_target_uptodate(struct stripe_head *sh, int target) mark_target_uptodate() argument
1250 tgt = &sh->dev[target]; mark_target_uptodate()
1258 struct stripe_head *sh = stripe_head_ref; ops_complete_compute() local
1261 (unsigned long long)sh->sector); ops_complete_compute()
1264 mark_target_uptodate(sh, sh->ops.target); ops_complete_compute()
1265 mark_target_uptodate(sh, sh->ops.target2); ops_complete_compute()
1267 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); ops_complete_compute()
1268 if (sh->check_state == check_state_compute_run) ops_complete_compute()
1269 sh->check_state = check_state_compute_result; ops_complete_compute()
1270 set_bit(STRIPE_HANDLE, &sh->state); ops_complete_compute()
1271 raid5_release_stripe(sh); ops_complete_compute()
1275 static addr_conv_t *to_addr_conv(struct stripe_head *sh, to_addr_conv() argument
1281 return addr + sizeof(struct page *) * (sh->disks + 2); to_addr_conv()
1294 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) ops_run_compute5() argument
1296 int disks = sh->disks; ops_run_compute5()
1298 int target = sh->ops.target; ops_run_compute5()
1299 struct r5dev *tgt = &sh->dev[target]; ops_run_compute5()
1306 BUG_ON(sh->batch_head); ops_run_compute5()
1309 __func__, (unsigned long long)sh->sector, target); ops_run_compute5()
1314 xor_srcs[count++] = sh->dev[i].page; ops_run_compute5()
1316 atomic_inc(&sh->count); ops_run_compute5()
1319 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); ops_run_compute5()
1329 * @srcs - (struct page *) array of size sh->disks
1330 * @sh - stripe_head to parse
1338 struct stripe_head *sh, set_syndrome_sources()
1341 int disks = sh->disks; set_syndrome_sources()
1342 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); set_syndrome_sources()
1343 int d0_idx = raid6_d0(sh); set_syndrome_sources()
1353 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); set_syndrome_sources()
1354 struct r5dev *dev = &sh->dev[i]; set_syndrome_sources()
1356 if (i == sh->qd_idx || i == sh->pd_idx || set_syndrome_sources()
1362 srcs[slot] = sh->dev[i].page; set_syndrome_sources()
1370 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) ops_run_compute6_1() argument
1372 int disks = sh->disks; ops_run_compute6_1()
1375 int qd_idx = sh->qd_idx; ops_run_compute6_1()
1383 BUG_ON(sh->batch_head); ops_run_compute6_1()
1384 if (sh->ops.target < 0) ops_run_compute6_1()
1385 target = sh->ops.target2; ops_run_compute6_1()
1386 else if (sh->ops.target2 < 0) ops_run_compute6_1()
1387 target = sh->ops.target; ops_run_compute6_1()
1393 __func__, (unsigned long long)sh->sector, target); ops_run_compute6_1()
1395 tgt = &sh->dev[target]; ops_run_compute6_1()
1399 atomic_inc(&sh->count); ops_run_compute6_1()
1402 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); ops_run_compute6_1()
1406 ops_complete_compute, sh, ops_run_compute6_1()
1407 to_addr_conv(sh, percpu, 0)); ops_run_compute6_1()
1415 blocks[count++] = sh->dev[i].page; ops_run_compute6_1()
1419 NULL, ops_complete_compute, sh, ops_run_compute6_1()
1420 to_addr_conv(sh, percpu, 0)); ops_run_compute6_1()
1428 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) ops_run_compute6_2() argument
1430 int i, count, disks = sh->disks; ops_run_compute6_2()
1431 int syndrome_disks = sh->ddf_layout ? disks : disks-2; ops_run_compute6_2()
1432 int d0_idx = raid6_d0(sh); ops_run_compute6_2()
1434 int target = sh->ops.target; ops_run_compute6_2()
1435 int target2 = sh->ops.target2; ops_run_compute6_2()
1436 struct r5dev *tgt = &sh->dev[target]; ops_run_compute6_2()
1437 struct r5dev *tgt2 = &sh->dev[target2]; ops_run_compute6_2()
1442 BUG_ON(sh->batch_head); ops_run_compute6_2()
1444 __func__, (unsigned long long)sh->sector, target, target2); ops_run_compute6_2()
1457 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); ops_run_compute6_2()
1459 blocks[slot] = sh->dev[i].page; ops_run_compute6_2()
1472 __func__, (unsigned long long)sh->sector, faila, failb); ops_run_compute6_2()
1474 atomic_inc(&sh->count); ops_run_compute6_2()
1481 ops_complete_compute, sh, ops_run_compute6_2()
1482 to_addr_conv(sh, percpu, 0)); ops_run_compute6_2()
1488 int qd_idx = sh->qd_idx; ops_run_compute6_2()
1500 blocks[count++] = sh->dev[i].page; ops_run_compute6_2()
1502 dest = sh->dev[data_target].page; ops_run_compute6_2()
1506 to_addr_conv(sh, percpu, 0)); ops_run_compute6_2()
1510 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); ops_run_compute6_2()
1512 ops_complete_compute, sh, ops_run_compute6_2()
1513 to_addr_conv(sh, percpu, 0)); ops_run_compute6_2()
1519 ops_complete_compute, sh, ops_run_compute6_2()
1520 to_addr_conv(sh, percpu, 0)); ops_run_compute6_2()
1537 struct stripe_head *sh = stripe_head_ref; ops_complete_prexor() local
1540 (unsigned long long)sh->sector); ops_complete_prexor()
1544 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, ops_run_prexor5() argument
1547 int disks = sh->disks; ops_run_prexor5()
1549 int count = 0, pd_idx = sh->pd_idx, i; ops_run_prexor5()
1553 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; ops_run_prexor5()
1555 BUG_ON(sh->batch_head); ops_run_prexor5()
1557 (unsigned long long)sh->sector); ops_run_prexor5()
1560 struct r5dev *dev = &sh->dev[i]; ops_run_prexor5()
1567 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); ops_run_prexor5()
1574 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, ops_run_prexor6() argument
1582 (unsigned long long)sh->sector); ops_run_prexor6()
1584 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); ops_run_prexor6()
1587 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); ops_run_prexor6()
1594 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) ops_run_biodrain() argument
1596 int disks = sh->disks; ops_run_biodrain()
1598 struct stripe_head *head_sh = sh; ops_run_biodrain()
1601 (unsigned long long)sh->sector); ops_run_biodrain()
1607 sh = head_sh; ops_run_biodrain()
1612 dev = &sh->dev[i]; ops_run_biodrain()
1613 spin_lock_irq(&sh->stripe_lock); ops_run_biodrain()
1616 sh->overwrite_disks = 0; ops_run_biodrain()
1619 spin_unlock_irq(&sh->stripe_lock); ops_run_biodrain()
1632 dev->sector, tx, sh); ops_run_biodrain()
1643 sh = list_first_entry(&sh->batch_list, ops_run_biodrain()
1646 if (sh == head_sh) ops_run_biodrain()
1658 struct stripe_head *sh = stripe_head_ref; ops_complete_reconstruct() local
1659 int disks = sh->disks; ops_complete_reconstruct()
1660 int pd_idx = sh->pd_idx; ops_complete_reconstruct()
1661 int qd_idx = sh->qd_idx; ops_complete_reconstruct()
1666 (unsigned long long)sh->sector); ops_complete_reconstruct()
1669 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); ops_complete_reconstruct()
1670 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); ops_complete_reconstruct()
1671 discard |= test_bit(R5_Discard, &sh->dev[i].flags); ops_complete_reconstruct()
1675 struct r5dev *dev = &sh->dev[i]; ops_complete_reconstruct()
1687 if (sh->reconstruct_state == reconstruct_state_drain_run) ops_complete_reconstruct()
1688 sh->reconstruct_state = reconstruct_state_drain_result; ops_complete_reconstruct()
1689 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) ops_complete_reconstruct()
1690 sh->reconstruct_state = reconstruct_state_prexor_drain_result; ops_complete_reconstruct()
1692 BUG_ON(sh->reconstruct_state != reconstruct_state_run); ops_complete_reconstruct()
1693 sh->reconstruct_state = reconstruct_state_result; ops_complete_reconstruct()
1696 set_bit(STRIPE_HANDLE, &sh->state); ops_complete_reconstruct()
1697 raid5_release_stripe(sh); ops_complete_reconstruct()
1701 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, ops_run_reconstruct5() argument
1704 int disks = sh->disks; ops_run_reconstruct5()
1707 int count, pd_idx = sh->pd_idx, i; ops_run_reconstruct5()
1712 struct stripe_head *head_sh = sh; ops_run_reconstruct5()
1716 (unsigned long long)sh->sector); ops_run_reconstruct5()
1718 for (i = 0; i < sh->disks; i++) { ops_run_reconstruct5()
1721 if (!test_bit(R5_Discard, &sh->dev[i].flags)) ops_run_reconstruct5()
1724 if (i >= sh->disks) { ops_run_reconstruct5()
1725 atomic_inc(&sh->count); ops_run_reconstruct5()
1726 set_bit(R5_Discard, &sh->dev[pd_idx].flags); ops_run_reconstruct5()
1727 ops_complete_reconstruct(sh); ops_run_reconstruct5()
1738 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; ops_run_reconstruct5()
1740 struct r5dev *dev = &sh->dev[i]; ops_run_reconstruct5()
1745 xor_dest = sh->dev[pd_idx].page; ops_run_reconstruct5()
1747 struct r5dev *dev = &sh->dev[i]; ops_run_reconstruct5()
1759 list_first_entry(&sh->batch_list, ops_run_reconstruct5()
1767 to_addr_conv(sh, percpu, j)); ops_run_reconstruct5()
1771 to_addr_conv(sh, percpu, j)); ops_run_reconstruct5()
1780 sh = list_first_entry(&sh->batch_list, struct stripe_head, ops_run_reconstruct5()
1787 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, ops_run_reconstruct6() argument
1793 struct stripe_head *head_sh = sh; ops_run_reconstruct6()
1798 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); ops_run_reconstruct6()
1800 for (i = 0; i < sh->disks; i++) { ops_run_reconstruct6()
1801 if (sh->pd_idx == i || sh->qd_idx == i) ops_run_reconstruct6()
1803 if (!test_bit(R5_Discard, &sh->dev[i].flags)) ops_run_reconstruct6()
1806 if (i >= sh->disks) { ops_run_reconstruct6()
1807 atomic_inc(&sh->count); ops_run_reconstruct6()
1808 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); ops_run_reconstruct6()
1809 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); ops_run_reconstruct6()
1810 ops_complete_reconstruct(sh); ops_run_reconstruct6()
1817 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { ops_run_reconstruct6()
1825 count = set_syndrome_sources(blocks, sh, synflags); ops_run_reconstruct6()
1827 list_first_entry(&sh->batch_list, ops_run_reconstruct6()
1833 head_sh, to_addr_conv(sh, percpu, j)); ops_run_reconstruct6()
1836 to_addr_conv(sh, percpu, j)); ops_run_reconstruct6()
1840 sh = list_first_entry(&sh->batch_list, struct stripe_head, ops_run_reconstruct6()
1848 struct stripe_head *sh = stripe_head_ref; ops_complete_check() local
1851 (unsigned long long)sh->sector); ops_complete_check()
1853 sh->check_state = check_state_check_result; ops_complete_check()
1854 set_bit(STRIPE_HANDLE, &sh->state); ops_complete_check()
1855 raid5_release_stripe(sh); ops_complete_check()
1858 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) ops_run_check_p() argument
1860 int disks = sh->disks; ops_run_check_p()
1861 int pd_idx = sh->pd_idx; ops_run_check_p()
1862 int qd_idx = sh->qd_idx; ops_run_check_p()
1871 (unsigned long long)sh->sector); ops_run_check_p()
1873 BUG_ON(sh->batch_head); ops_run_check_p()
1875 xor_dest = sh->dev[pd_idx].page; ops_run_check_p()
1880 xor_srcs[count++] = sh->dev[i].page; ops_run_check_p()
1884 to_addr_conv(sh, percpu, 0)); ops_run_check_p()
1886 &sh->ops.zero_sum_result, &submit); ops_run_check_p()
1888 atomic_inc(&sh->count); ops_run_check_p()
1889 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); ops_run_check_p()
1893 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) ops_run_check_pq() argument
1900 (unsigned long long)sh->sector, checkp); ops_run_check_pq()
1902 BUG_ON(sh->batch_head); ops_run_check_pq()
1903 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); ops_run_check_pq()
1907 atomic_inc(&sh->count); ops_run_check_pq()
1909 sh, to_addr_conv(sh, percpu, 0)); ops_run_check_pq()
1911 &sh->ops.zero_sum_result, percpu->spare_page, &submit); ops_run_check_pq()
1914 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) raid_run_ops() argument
1916 int overlap_clear = 0, i, disks = sh->disks; raid_run_ops()
1918 struct r5conf *conf = sh->raid_conf; raid_run_ops()
1926 ops_run_biofill(sh); raid_run_ops()
1932 tx = ops_run_compute5(sh, percpu); raid_run_ops()
1934 if (sh->ops.target2 < 0 || sh->ops.target < 0) raid_run_ops()
1935 tx = ops_run_compute6_1(sh, percpu); raid_run_ops()
1937 tx = ops_run_compute6_2(sh, percpu); raid_run_ops()
1946 tx = ops_run_prexor5(sh, percpu, tx); raid_run_ops()
1948 tx = ops_run_prexor6(sh, percpu, tx); raid_run_ops()
1952 tx = ops_run_biodrain(sh, tx); raid_run_ops()
1958 ops_run_reconstruct5(sh, percpu, tx); raid_run_ops()
1960 ops_run_reconstruct6(sh, percpu, tx); raid_run_ops()
1964 if (sh->check_state == check_state_run) raid_run_ops()
1965 ops_run_check_p(sh, percpu); raid_run_ops()
1966 else if (sh->check_state == check_state_run_q) raid_run_ops()
1967 ops_run_check_pq(sh, percpu, 0); raid_run_ops()
1968 else if (sh->check_state == check_state_run_pq) raid_run_ops()
1969 ops_run_check_pq(sh, percpu, 1); raid_run_ops()
1974 if (overlap_clear && !sh->batch_head) raid_run_ops()
1976 struct r5dev *dev = &sh->dev[i]; raid_run_ops()
1978 wake_up(&sh->raid_conf->wait_for_overlap); raid_run_ops()
1985 struct stripe_head *sh; alloc_stripe() local
1987 sh = kmem_cache_zalloc(sc, gfp); alloc_stripe()
1988 if (sh) { alloc_stripe()
1989 spin_lock_init(&sh->stripe_lock); alloc_stripe()
1990 spin_lock_init(&sh->batch_lock); alloc_stripe()
1991 INIT_LIST_HEAD(&sh->batch_list); alloc_stripe()
1992 INIT_LIST_HEAD(&sh->lru); alloc_stripe()
1993 atomic_set(&sh->count, 1); alloc_stripe()
1995 return sh; alloc_stripe()
1999 struct stripe_head *sh; grow_one_stripe() local
2001 sh = alloc_stripe(conf->slab_cache, gfp); grow_one_stripe()
2002 if (!sh) grow_one_stripe()
2005 sh->raid_conf = conf; grow_one_stripe()
2007 if (grow_buffers(sh, gfp)) { grow_one_stripe()
2008 shrink_buffers(sh); grow_one_stripe()
2009 kmem_cache_free(conf->slab_cache, sh); grow_one_stripe()
2012 sh->hash_lock_index = grow_one_stripe()
2017 raid5_release_stripe(sh); grow_one_stripe()
2261 struct stripe_head *sh; drop_one_stripe() local
2265 sh = get_free_stripe(conf, hash); drop_one_stripe()
2267 if (!sh) drop_one_stripe()
2269 BUG_ON(atomic_read(&sh->count)); drop_one_stripe()
2270 shrink_buffers(sh); drop_one_stripe()
2271 kmem_cache_free(conf->slab_cache, sh); drop_one_stripe()
2289 struct stripe_head *sh = bi->bi_private; raid5_end_read_request() local
2290 struct r5conf *conf = sh->raid_conf; raid5_end_read_request()
2291 int disks = sh->disks, i; raid5_end_read_request()
2297 if (bi == &sh->dev[i].req) raid5_end_read_request()
2301 (unsigned long long)sh->sector, i, atomic_read(&sh->count), raid5_end_read_request()
2307 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) raid5_end_read_request()
2317 if (use_new_offset(conf, sh)) raid5_end_read_request()
2318 s = sh->sector + rdev->new_data_offset; raid5_end_read_request()
2320 s = sh->sector + rdev->data_offset; raid5_end_read_request()
2322 set_bit(R5_UPTODATE, &sh->dev[i].flags); raid5_end_read_request()
2323 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { raid5_end_read_request()
2336 clear_bit(R5_ReadError, &sh->dev[i].flags); raid5_end_read_request()
2337 clear_bit(R5_ReWrite, &sh->dev[i].flags); raid5_end_read_request()
2338 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) raid5_end_read_request()
2339 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); raid5_end_read_request()
2348 clear_bit(R5_UPTODATE, &sh->dev[i].flags); raid5_end_read_request()
2350 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) raid5_end_read_request()
2367 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { raid5_end_read_request()
2385 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) raid5_end_read_request()
2388 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { raid5_end_read_request()
2389 set_bit(R5_ReadError, &sh->dev[i].flags); raid5_end_read_request()
2390 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); raid5_end_read_request()
2392 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); raid5_end_read_request()
2394 clear_bit(R5_ReadError, &sh->dev[i].flags); raid5_end_read_request()
2395 clear_bit(R5_ReWrite, &sh->dev[i].flags); raid5_end_read_request()
2399 rdev, sh->sector, STRIPE_SECTORS, 0))) raid5_end_read_request()
2404 clear_bit(R5_LOCKED, &sh->dev[i].flags); raid5_end_read_request()
2405 set_bit(STRIPE_HANDLE, &sh->state); raid5_end_read_request()
2406 raid5_release_stripe(sh); raid5_end_read_request()
2411 struct stripe_head *sh = bi->bi_private; raid5_end_write_request() local
2412 struct r5conf *conf = sh->raid_conf; raid5_end_write_request()
2413 int disks = sh->disks, i; raid5_end_write_request()
2420 if (bi == &sh->dev[i].req) { raid5_end_write_request()
2424 if (bi == &sh->dev[i].rreq) { raid5_end_write_request()
2438 (unsigned long long)sh->sector, i, atomic_read(&sh->count), raid5_end_write_request()
2448 else if (is_badblock(rdev, sh->sector, raid5_end_write_request()
2451 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); raid5_end_write_request()
2454 set_bit(STRIPE_DEGRADED, &sh->state); raid5_end_write_request()
2456 set_bit(R5_WriteError, &sh->dev[i].flags); raid5_end_write_request()
2460 } else if (is_badblock(rdev, sh->sector, raid5_end_write_request()
2463 set_bit(R5_MadeGood, &sh->dev[i].flags); raid5_end_write_request()
2464 if (test_bit(R5_ReadError, &sh->dev[i].flags)) raid5_end_write_request()
2469 set_bit(R5_ReWrite, &sh->dev[i].flags); raid5_end_write_request()
2474 if (sh->batch_head && bi->bi_error && !replacement) raid5_end_write_request()
2475 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); raid5_end_write_request()
2477 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) raid5_end_write_request()
2478 clear_bit(R5_LOCKED, &sh->dev[i].flags); raid5_end_write_request()
2479 set_bit(STRIPE_HANDLE, &sh->state); raid5_end_write_request()
2480 raid5_release_stripe(sh); raid5_end_write_request()
2482 if (sh->batch_head && sh != sh->batch_head) raid5_end_write_request()
2483 raid5_release_stripe(sh->batch_head); raid5_end_write_request()
2486 static void raid5_build_block(struct stripe_head *sh, int i, int previous) raid5_build_block() argument
2488 struct r5dev *dev = &sh->dev[i]; raid5_build_block()
2493 dev->req.bi_private = sh; raid5_build_block()
2498 dev->rreq.bi_private = sh; raid5_build_block()
2501 dev->sector = raid5_compute_blocknr(sh, i, previous); raid5_build_block()
2536 struct stripe_head *sh) raid5_compute_sector()
2724 if (sh) { raid5_compute_sector()
2725 sh->pd_idx = pd_idx; raid5_compute_sector()
2726 sh->qd_idx = qd_idx; raid5_compute_sector()
2727 sh->ddf_layout = ddf_layout; raid5_compute_sector()
2736 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) raid5_compute_blocknr() argument
2738 struct r5conf *conf = sh->raid_conf; raid5_compute_blocknr()
2739 int raid_disks = sh->disks; raid5_compute_blocknr()
2741 sector_t new_sector = sh->sector, check; raid5_compute_blocknr()
2756 if (i == sh->pd_idx) raid5_compute_blocknr()
2764 if (i > sh->pd_idx) raid5_compute_blocknr()
2769 if (i < sh->pd_idx) raid5_compute_blocknr()
2771 i -= (sh->pd_idx + 1); raid5_compute_blocknr()
2783 if (i == sh->qd_idx) raid5_compute_blocknr()
2790 if (sh->pd_idx == raid_disks-1) raid5_compute_blocknr()
2792 else if (i > sh->pd_idx) raid5_compute_blocknr()
2797 if (sh->pd_idx == raid_disks-1) raid5_compute_blocknr()
2801 if (i < sh->pd_idx) raid5_compute_blocknr()
2803 i -= (sh->pd_idx + 2); raid5_compute_blocknr()
2813 if (sh->pd_idx == 0) raid5_compute_blocknr()
2817 if (i < sh->pd_idx) raid5_compute_blocknr()
2819 i -= (sh->pd_idx + 1); raid5_compute_blocknr()
2824 if (i > sh->pd_idx) raid5_compute_blocknr()
2829 if (i < sh->pd_idx) raid5_compute_blocknr()
2831 i -= (sh->pd_idx + 1); raid5_compute_blocknr()
2847 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx raid5_compute_blocknr()
2848 || sh2.qd_idx != sh->qd_idx) { raid5_compute_blocknr()
2857 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, schedule_reconstruction() argument
2860 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; schedule_reconstruction()
2861 struct r5conf *conf = sh->raid_conf; schedule_reconstruction()
2867 struct r5dev *dev = &sh->dev[i]; schedule_reconstruction()
2885 sh->reconstruct_state = reconstruct_state_drain_run; schedule_reconstruction()
2888 sh->reconstruct_state = reconstruct_state_run; schedule_reconstruction()
2893 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) schedule_reconstruction()
2896 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || schedule_reconstruction()
2897 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); schedule_reconstruction()
2899 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || schedule_reconstruction()
2900 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); schedule_reconstruction()
2903 struct r5dev *dev = &sh->dev[i]; schedule_reconstruction()
2919 sh->reconstruct_state = reconstruct_state_prexor_drain_run; schedule_reconstruction()
2928 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); schedule_reconstruction()
2929 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); schedule_reconstruction()
2933 int qd_idx = sh->qd_idx; schedule_reconstruction()
2934 struct r5dev *dev = &sh->dev[qd_idx]; schedule_reconstruction()
2942 __func__, (unsigned long long)sh->sector, schedule_reconstruction()
2951 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, add_stripe_bio() argument
2955 struct r5conf *conf = sh->raid_conf; add_stripe_bio()
2960 (unsigned long long)sh->sector); add_stripe_bio()
2970 spin_lock_irq(&sh->stripe_lock); add_stripe_bio()
2972 if (sh->batch_head) add_stripe_bio()
2975 bip = &sh->dev[dd_idx].towrite; add_stripe_bio()
2979 bip = &sh->dev[dd_idx].toread; add_stripe_bio()
2989 clear_bit(STRIPE_BATCH_READY, &sh->state); add_stripe_bio()
2999 sector_t sector = sh->dev[dd_idx].sector; add_stripe_bio()
3000 for (bi=sh->dev[dd_idx].towrite; add_stripe_bio()
3001 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && add_stripe_bio()
3003 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { add_stripe_bio()
3007 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) add_stripe_bio()
3008 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) add_stripe_bio()
3009 sh->overwrite_disks++; add_stripe_bio()
3014 (unsigned long long)sh->sector, dd_idx); add_stripe_bio()
3029 set_bit(STRIPE_BITMAP_PENDING, &sh->state); add_stripe_bio()
3030 spin_unlock_irq(&sh->stripe_lock); add_stripe_bio()
3031 bitmap_startwrite(conf->mddev->bitmap, sh->sector, add_stripe_bio()
3033 spin_lock_irq(&sh->stripe_lock); add_stripe_bio()
3034 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); add_stripe_bio()
3035 if (!sh->batch_head) { add_stripe_bio()
3036 sh->bm_seq = conf->seq_flush+1; add_stripe_bio()
3037 set_bit(STRIPE_BIT_DELAY, &sh->state); add_stripe_bio()
3040 spin_unlock_irq(&sh->stripe_lock); add_stripe_bio()
3042 if (stripe_can_batch(sh)) add_stripe_bio()
3043 stripe_add_to_batch_list(conf, sh); add_stripe_bio()
3047 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); add_stripe_bio()
3048 spin_unlock_irq(&sh->stripe_lock); add_stripe_bio()
3055 struct stripe_head *sh) stripe_set_idx()
3067 &dd_idx, sh); stripe_set_idx()
3071 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, handle_failed_stripe() argument
3076 BUG_ON(sh->batch_head); handle_failed_stripe()
3081 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { handle_failed_stripe()
3093 sh->sector, handle_failed_stripe()
3099 spin_lock_irq(&sh->stripe_lock); handle_failed_stripe()
3101 bi = sh->dev[i].towrite; handle_failed_stripe()
3102 sh->dev[i].towrite = NULL; handle_failed_stripe()
3103 sh->overwrite_disks = 0; handle_failed_stripe()
3104 spin_unlock_irq(&sh->stripe_lock); handle_failed_stripe()
3108 r5l_stripe_write_finished(sh); handle_failed_stripe()
3110 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) handle_failed_stripe()
3114 sh->dev[i].sector + STRIPE_SECTORS) { handle_failed_stripe()
3115 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); handle_failed_stripe()
3125 bitmap_endwrite(conf->mddev->bitmap, sh->sector, handle_failed_stripe()
3129 bi = sh->dev[i].written; handle_failed_stripe()
3130 sh->dev[i].written = NULL; handle_failed_stripe()
3131 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { handle_failed_stripe()
3132 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); handle_failed_stripe()
3133 sh->dev[i].page = sh->dev[i].orig_page; handle_failed_stripe()
3138 sh->dev[i].sector + STRIPE_SECTORS) { handle_failed_stripe()
3139 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); handle_failed_stripe()
3152 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && handle_failed_stripe()
3154 (!test_bit(R5_Insync, &sh->dev[i].flags) || handle_failed_stripe()
3155 test_bit(R5_ReadError, &sh->dev[i].flags))) { handle_failed_stripe()
3156 spin_lock_irq(&sh->stripe_lock); handle_failed_stripe()
3157 bi = sh->dev[i].toread; handle_failed_stripe()
3158 sh->dev[i].toread = NULL; handle_failed_stripe()
3159 spin_unlock_irq(&sh->stripe_lock); handle_failed_stripe()
3160 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) handle_failed_stripe()
3165 sh->dev[i].sector + STRIPE_SECTORS) { handle_failed_stripe()
3167 r5_next_bio(bi, sh->dev[i].sector); handle_failed_stripe()
3176 bitmap_endwrite(conf->mddev->bitmap, sh->sector, handle_failed_stripe()
3181 clear_bit(R5_LOCKED, &sh->dev[i].flags); handle_failed_stripe()
3186 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) handle_failed_stripe()
3192 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, handle_failed_sync() argument
3198 BUG_ON(sh->batch_head); handle_failed_sync()
3199 clear_bit(STRIPE_SYNCING, &sh->state); handle_failed_sync()
3200 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) handle_failed_sync()
3220 && !rdev_set_badblocks(rdev, sh->sector, handle_failed_sync()
3227 && !rdev_set_badblocks(rdev, sh->sector, handle_failed_sync()
3238 static int want_replace(struct stripe_head *sh, int disk_idx) want_replace() argument
3243 rdev = sh->raid_conf->disks[disk_idx].replacement; want_replace()
3247 && (rdev->recovery_offset <= sh->sector want_replace()
3248 || rdev->mddev->recovery_cp <= sh->sector)) want_replace()
3261 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, need_this_block() argument
3264 struct r5dev *dev = &sh->dev[disk_idx]; need_this_block()
3265 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], need_this_block()
3266 &sh->dev[s->failed_num[1]] }; need_this_block()
3283 (s->replacing && want_replace(sh, disk_idx))) need_this_block()
3308 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) need_this_block()
3336 if (sh->raid_conf->level != 6 && need_this_block()
3337 sh->sector < sh->raid_conf->mddev->recovery_cp) need_this_block()
3341 if (s->failed_num[i] != sh->pd_idx && need_this_block()
3342 s->failed_num[i] != sh->qd_idx && need_this_block()
3351 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, fetch_block() argument
3354 struct r5dev *dev = &sh->dev[disk_idx]; fetch_block()
3357 if (need_this_block(sh, s, disk_idx, disks)) { fetch_block()
3363 BUG_ON(sh->batch_head); fetch_block()
3371 (unsigned long long)sh->sector, disk_idx); fetch_block()
3372 set_bit(STRIPE_COMPUTE_RUN, &sh->state); fetch_block()
3375 sh->ops.target = disk_idx; fetch_block()
3376 sh->ops.target2 = -1; /* no 2nd target */ fetch_block()
3395 &sh->dev[other].flags)) fetch_block()
3400 (unsigned long long)sh->sector, fetch_block()
3402 set_bit(STRIPE_COMPUTE_RUN, &sh->state); fetch_block()
3404 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); fetch_block()
3405 set_bit(R5_Wantcompute, &sh->dev[other].flags); fetch_block()
3406 sh->ops.target = disk_idx; fetch_block()
3407 sh->ops.target2 = other; fetch_block()
3426 static void handle_stripe_fill(struct stripe_head *sh, handle_stripe_fill() argument
3436 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && handle_stripe_fill()
3437 !sh->reconstruct_state) handle_stripe_fill()
3439 if (fetch_block(sh, s, i, disks)) handle_stripe_fill()
3441 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_fill()
3452 struct stripe_head *sh, int disks, struct bio_list *return_bi) handle_stripe_clean_event()
3457 struct stripe_head *head_sh = sh; handle_stripe_clean_event()
3461 if (sh->dev[i].written) { handle_stripe_clean_event()
3462 dev = &sh->dev[i]; handle_stripe_clean_event()
3490 bitmap_endwrite(conf->mddev->bitmap, sh->sector, handle_stripe_clean_event()
3492 !test_bit(STRIPE_DEGRADED, &sh->state), handle_stripe_clean_event()
3495 sh = list_first_entry(&sh->batch_list, handle_stripe_clean_event()
3498 if (sh != head_sh) { handle_stripe_clean_event()
3499 dev = &sh->dev[i]; handle_stripe_clean_event()
3503 sh = head_sh; handle_stripe_clean_event()
3504 dev = &sh->dev[i]; handle_stripe_clean_event()
3511 r5l_stripe_write_finished(sh); handle_stripe_clean_event()
3514 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { handle_stripe_clean_event()
3516 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); handle_stripe_clean_event()
3517 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); handle_stripe_clean_event()
3518 if (sh->qd_idx >= 0) { handle_stripe_clean_event()
3519 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); handle_stripe_clean_event()
3520 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); handle_stripe_clean_event()
3523 clear_bit(STRIPE_DISCARD, &sh->state); handle_stripe_clean_event()
3530 hash = sh->hash_lock_index; handle_stripe_clean_event()
3532 remove_hash(sh); handle_stripe_clean_event()
3535 sh = list_first_entry(&sh->batch_list, handle_stripe_clean_event()
3537 if (sh != head_sh) handle_stripe_clean_event()
3540 sh = head_sh; handle_stripe_clean_event()
3542 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) handle_stripe_clean_event()
3543 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_clean_event()
3547 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) handle_stripe_clean_event()
3556 struct stripe_head *sh, handle_stripe_dirtying()
3571 (recovery_cp < MaxSector && sh->sector >= recovery_cp && handle_stripe_dirtying()
3577 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", handle_stripe_dirtying()
3579 (unsigned long long)sh->sector); handle_stripe_dirtying()
3582 struct r5dev *dev = &sh->dev[i]; handle_stripe_dirtying()
3583 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && handle_stripe_dirtying()
3594 i != sh->pd_idx && i != sh->qd_idx && handle_stripe_dirtying()
3605 (unsigned long long)sh->sector, rmw, rcw); handle_stripe_dirtying()
3606 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_dirtying()
3612 (unsigned long long)sh->sector, rmw); handle_stripe_dirtying()
3614 struct r5dev *dev = &sh->dev[i]; handle_stripe_dirtying()
3615 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && handle_stripe_dirtying()
3621 &sh->state)) { handle_stripe_dirtying()
3628 set_bit(STRIPE_DELAYED, &sh->state); handle_stripe_dirtying()
3629 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_dirtying()
3639 struct r5dev *dev = &sh->dev[i]; handle_stripe_dirtying()
3641 i != sh->pd_idx && i != sh->qd_idx && handle_stripe_dirtying()
3648 &sh->state)) { handle_stripe_dirtying()
3656 set_bit(STRIPE_DELAYED, &sh->state); handle_stripe_dirtying()
3657 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe_dirtying()
3663 (unsigned long long)sh->sector, handle_stripe_dirtying()
3664 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); handle_stripe_dirtying()
3668 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) handle_stripe_dirtying()
3669 set_bit(STRIPE_DELAYED, &sh->state); handle_stripe_dirtying()
3681 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && handle_stripe_dirtying()
3683 !test_bit(STRIPE_BIT_DELAY, &sh->state))) handle_stripe_dirtying()
3684 schedule_reconstruction(sh, s, rcw == 0, 0); handle_stripe_dirtying()
3687 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, handle_parity_checks5() argument
3692 BUG_ON(sh->batch_head); handle_parity_checks5()
3693 set_bit(STRIPE_HANDLE, &sh->state); handle_parity_checks5()
3695 switch (sh->check_state) { handle_parity_checks5()
3700 sh->check_state = check_state_run; handle_parity_checks5()
3702 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); handle_parity_checks5()
3706 dev = &sh->dev[s->failed_num[0]]; handle_parity_checks5()
3709 sh->check_state = check_state_idle; handle_parity_checks5()
3711 dev = &sh->dev[sh->pd_idx]; handle_parity_checks5()
3714 if (test_bit(STRIPE_INSYNC, &sh->state)) handle_parity_checks5()
3725 clear_bit(STRIPE_DEGRADED, &sh->state); handle_parity_checks5()
3726 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks5()
3731 sh->check_state = check_state_idle; handle_parity_checks5()
3743 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) handle_parity_checks5()
3747 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks5()
3752 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks5()
3754 sh->check_state = check_state_compute_run; handle_parity_checks5()
3755 set_bit(STRIPE_COMPUTE_RUN, &sh->state); handle_parity_checks5()
3758 &sh->dev[sh->pd_idx].flags); handle_parity_checks5()
3759 sh->ops.target = sh->pd_idx; handle_parity_checks5()
3760 sh->ops.target2 = -1; handle_parity_checks5()
3769 __func__, sh->check_state, handle_parity_checks5()
3770 (unsigned long long) sh->sector); handle_parity_checks5()
3775 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, handle_parity_checks6() argument
3779 int pd_idx = sh->pd_idx; handle_parity_checks6()
3780 int qd_idx = sh->qd_idx; handle_parity_checks6()
3783 BUG_ON(sh->batch_head); handle_parity_checks6()
3784 set_bit(STRIPE_HANDLE, &sh->state); handle_parity_checks6()
3794 switch (sh->check_state) { handle_parity_checks6()
3802 sh->check_state = check_state_run; handle_parity_checks6()
3808 if (sh->check_state == check_state_run) handle_parity_checks6()
3809 sh->check_state = check_state_run_pq; handle_parity_checks6()
3811 sh->check_state = check_state_run_q; handle_parity_checks6()
3815 sh->ops.zero_sum_result = 0; handle_parity_checks6()
3817 if (sh->check_state == check_state_run) { handle_parity_checks6()
3819 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); handle_parity_checks6()
3822 if (sh->check_state >= check_state_run && handle_parity_checks6()
3823 sh->check_state <= check_state_run_pq) { handle_parity_checks6()
3835 sh->check_state = check_state_idle; handle_parity_checks6()
3838 if (test_bit(STRIPE_INSYNC, &sh->state)) handle_parity_checks6()
3846 dev = &sh->dev[s->failed_num[1]]; handle_parity_checks6()
3852 dev = &sh->dev[s->failed_num[0]]; handle_parity_checks6()
3857 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { handle_parity_checks6()
3858 dev = &sh->dev[pd_idx]; handle_parity_checks6()
3863 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { handle_parity_checks6()
3864 dev = &sh->dev[qd_idx]; handle_parity_checks6()
3869 clear_bit(STRIPE_DEGRADED, &sh->state); handle_parity_checks6()
3871 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks6()
3878 sh->check_state = check_state_idle; handle_parity_checks6()
3884 if (sh->ops.zero_sum_result == 0) { handle_parity_checks6()
3887 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks6()
3893 sh->check_state = check_state_compute_result; handle_parity_checks6()
3904 set_bit(STRIPE_INSYNC, &sh->state); handle_parity_checks6()
3906 int *target = &sh->ops.target; handle_parity_checks6()
3908 sh->ops.target = -1; handle_parity_checks6()
3909 sh->ops.target2 = -1; handle_parity_checks6()
3910 sh->check_state = check_state_compute_run; handle_parity_checks6()
3911 set_bit(STRIPE_COMPUTE_RUN, &sh->state); handle_parity_checks6()
3913 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { handle_parity_checks6()
3915 &sh->dev[pd_idx].flags); handle_parity_checks6()
3917 target = &sh->ops.target2; handle_parity_checks6()
3920 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { handle_parity_checks6()
3922 &sh->dev[qd_idx].flags); handle_parity_checks6()
3933 __func__, sh->check_state, handle_parity_checks6()
3934 (unsigned long long) sh->sector); handle_parity_checks6()
3939 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) handle_stripe_expansion() argument
3947 BUG_ON(sh->batch_head); handle_stripe_expansion()
3948 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); handle_stripe_expansion()
3949 for (i = 0; i < sh->disks; i++) handle_stripe_expansion()
3950 if (i != sh->pd_idx && i != sh->qd_idx) { handle_stripe_expansion()
3955 sector_t bn = raid5_compute_blocknr(sh, i, 1); handle_stripe_expansion()
3975 sh->dev[i].page, 0, 0, STRIPE_SIZE, handle_stripe_expansion()
4010 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) analyse_stripe() argument
4012 struct r5conf *conf = sh->raid_conf; analyse_stripe()
4013 int disks = sh->disks; analyse_stripe()
4020 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; analyse_stripe()
4021 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; analyse_stripe()
4034 dev = &sh->dev[i]; analyse_stripe()
4045 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) analyse_stripe()
4074 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && analyse_stripe()
4075 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, analyse_stripe()
4089 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, analyse_stripe()
4116 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) analyse_stripe()
4175 if (test_bit(STRIPE_SYNCING, &sh->state)) { analyse_stripe()
4185 sh->sector >= conf->mddev->recovery_cp || analyse_stripe()
4194 static int clear_batch_ready(struct stripe_head *sh) clear_batch_ready() argument
4201 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) clear_batch_ready()
4202 return (sh->batch_head && sh->batch_head != sh); clear_batch_ready()
4203 spin_lock(&sh->stripe_lock); clear_batch_ready()
4204 if (!sh->batch_head) { clear_batch_ready()
4205 spin_unlock(&sh->stripe_lock); clear_batch_ready()
4213 if (sh->batch_head != sh) { clear_batch_ready()
4214 spin_unlock(&sh->stripe_lock); clear_batch_ready()
4217 spin_lock(&sh->batch_lock); clear_batch_ready()
4218 list_for_each_entry(tmp, &sh->batch_list, batch_list) clear_batch_ready()
4220 spin_unlock(&sh->batch_lock); clear_batch_ready()
4221 spin_unlock(&sh->stripe_lock); clear_batch_ready()
4233 struct stripe_head *sh, *next; break_stripe_batch_list() local
4237 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { break_stripe_batch_list()
4239 list_del_init(&sh->batch_list); break_stripe_batch_list()
4241 WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | break_stripe_batch_list()
4257 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | break_stripe_batch_list()
4262 sh->check_state = head_sh->check_state; break_stripe_batch_list()
4263 sh->reconstruct_state = head_sh->reconstruct_state; break_stripe_batch_list()
4264 for (i = 0; i < sh->disks; i++) { break_stripe_batch_list()
4265 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) break_stripe_batch_list()
4267 sh->dev[i].flags = head_sh->dev[i].flags & break_stripe_batch_list()
4270 spin_lock_irq(&sh->stripe_lock); break_stripe_batch_list()
4271 sh->batch_head = NULL; break_stripe_batch_list()
4272 spin_unlock_irq(&sh->stripe_lock); break_stripe_batch_list()
4274 sh->state & handle_flags) break_stripe_batch_list()
4275 set_bit(STRIPE_HANDLE, &sh->state); break_stripe_batch_list()
4276 raid5_release_stripe(sh); break_stripe_batch_list()
4291 static void handle_stripe(struct stripe_head *sh) handle_stripe() argument
4294 struct r5conf *conf = sh->raid_conf; handle_stripe()
4297 int disks = sh->disks; handle_stripe()
4300 clear_bit(STRIPE_HANDLE, &sh->state); handle_stripe()
4301 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { handle_stripe()
4304 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe()
4308 if (clear_batch_ready(sh) ) { handle_stripe()
4309 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); handle_stripe()
4313 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) handle_stripe()
4314 break_stripe_batch_list(sh, 0); handle_stripe()
4316 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { handle_stripe()
4317 spin_lock(&sh->stripe_lock); handle_stripe()
4319 if (!test_bit(STRIPE_DISCARD, &sh->state) && handle_stripe()
4320 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { handle_stripe()
4321 set_bit(STRIPE_SYNCING, &sh->state); handle_stripe()
4322 clear_bit(STRIPE_INSYNC, &sh->state); handle_stripe()
4323 clear_bit(STRIPE_REPLACED, &sh->state); handle_stripe()
4325 spin_unlock(&sh->stripe_lock); handle_stripe()
4327 clear_bit(STRIPE_DELAYED, &sh->state); handle_stripe()
4331 (unsigned long long)sh->sector, sh->state, handle_stripe()
4332 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, handle_stripe()
4333 sh->check_state, sh->reconstruct_state); handle_stripe()
4335 analyse_stripe(sh, &s); handle_stripe()
4337 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) handle_stripe()
4341 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe()
4348 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe()
4356 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { handle_stripe()
4358 set_bit(STRIPE_BIOFILL_RUN, &sh->state); handle_stripe()
4369 sh->check_state = 0; handle_stripe()
4370 sh->reconstruct_state = 0; handle_stripe()
4371 break_stripe_batch_list(sh, 0); handle_stripe()
4373 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); handle_stripe()
4375 handle_failed_sync(conf, sh, &s); handle_stripe()
4382 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) handle_stripe()
4384 if (sh->reconstruct_state == reconstruct_state_drain_result || handle_stripe()
4385 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { handle_stripe()
4386 sh->reconstruct_state = reconstruct_state_idle; handle_stripe()
4391 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && handle_stripe()
4392 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); handle_stripe()
4393 BUG_ON(sh->qd_idx >= 0 && handle_stripe()
4394 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && handle_stripe()
4395 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); handle_stripe()
4397 struct r5dev *dev = &sh->dev[i]; handle_stripe()
4399 (i == sh->pd_idx || i == sh->qd_idx || handle_stripe()
4408 ((i == sh->pd_idx || i == sh->qd_idx) && handle_stripe()
4410 set_bit(STRIPE_INSYNC, &sh->state); handle_stripe()
4413 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) handle_stripe()
4421 pdev = &sh->dev[sh->pd_idx]; handle_stripe()
4422 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) handle_stripe()
4423 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); handle_stripe()
4424 qdev = &sh->dev[sh->qd_idx]; handle_stripe()
4425 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) handle_stripe()
4426 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) handle_stripe()
4438 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); handle_stripe()
4449 handle_stripe_fill(sh, &s, disks); handle_stripe()
4457 if (s.to_write && !sh->reconstruct_state && !sh->check_state) handle_stripe()
4458 handle_stripe_dirtying(conf, sh, &s, disks); handle_stripe()
4465 if (sh->check_state || handle_stripe()
4467 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && handle_stripe()
4468 !test_bit(STRIPE_INSYNC, &sh->state))) { handle_stripe()
4470 handle_parity_checks6(conf, sh, &s, disks); handle_stripe()
4472 handle_parity_checks5(conf, sh, &s, disks); handle_stripe()
4476 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) handle_stripe()
4477 && !test_bit(STRIPE_REPLACED, &sh->state)) { handle_stripe()
4480 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { handle_stripe()
4481 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); handle_stripe()
4482 set_bit(R5_WantReplace, &sh->dev[i].flags); handle_stripe()
4483 set_bit(R5_LOCKED, &sh->dev[i].flags); handle_stripe()
4487 set_bit(STRIPE_INSYNC, &sh->state); handle_stripe()
4488 set_bit(STRIPE_REPLACED, &sh->state); handle_stripe()
4491 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && handle_stripe()
4492 test_bit(STRIPE_INSYNC, &sh->state)) { handle_stripe()
4494 clear_bit(STRIPE_SYNCING, &sh->state); handle_stripe()
4495 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) handle_stripe()
4504 struct r5dev *dev = &sh->dev[s.failed_num[i]]; handle_stripe()
4524 if (sh->reconstruct_state == reconstruct_state_result) { handle_stripe()
4526 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); handle_stripe()
4528 /* sh cannot be written until sh_src has been read. handle_stripe()
4529 * so arrange for sh to be delayed a little handle_stripe()
4531 set_bit(STRIPE_DELAYED, &sh->state); handle_stripe()
4532 set_bit(STRIPE_HANDLE, &sh->state); handle_stripe()
4542 sh->reconstruct_state = reconstruct_state_idle; handle_stripe()
4543 clear_bit(STRIPE_EXPANDING, &sh->state); handle_stripe()
4545 set_bit(R5_Wantwrite, &sh->dev[i].flags); handle_stripe()
4546 set_bit(R5_LOCKED, &sh->dev[i].flags); handle_stripe()
4551 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && handle_stripe()
4552 !sh->reconstruct_state) { handle_stripe()
4554 sh->disks = conf->raid_disks; handle_stripe()
4555 stripe_set_idx(sh->sector, conf, 0, sh); handle_stripe()
4556 schedule_reconstruction(sh, &s, 1, 1); handle_stripe()
4557 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { handle_stripe()
4558 clear_bit(STRIPE_EXPAND_READY, &sh->state); handle_stripe()
4565 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) handle_stripe()
4566 handle_stripe_expansion(conf, sh); handle_stripe()
4586 struct r5dev *dev = &sh->dev[i]; handle_stripe()
4590 if (!rdev_set_badblocks(rdev, sh->sector, handle_stripe()
4597 rdev_clear_badblocks(rdev, sh->sector, handle_stripe()
4606 rdev_clear_badblocks(rdev, sh->sector, handle_stripe()
4613 raid_run_ops(sh, s.ops_request); handle_stripe()
4615 ops_run_io(sh, &s); handle_stripe()
4638 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); handle_stripe()
4646 struct stripe_head *sh; raid5_activate_delayed() local
4647 sh = list_entry(l, struct stripe_head, lru); raid5_activate_delayed()
4649 clear_bit(STRIPE_DELAYED, &sh->state); raid5_activate_delayed()
4650 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) raid5_activate_delayed()
4652 list_add_tail(&sh->lru, &conf->hold_list); raid5_activate_delayed()
4653 raid5_wakeup_stripe_thread(sh); raid5_activate_delayed()
4666 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); activate_bit_delay() local
4668 list_del_init(&sh->lru); activate_bit_delay()
4669 atomic_inc(&sh->count); activate_bit_delay()
4670 hash = sh->hash_lock_index; activate_bit_delay()
4671 __release_stripe(conf, sh, &temp_inactive_list[hash]); activate_bit_delay()
4903 struct stripe_head *sh = NULL, *tmp; __get_priority_stripe() local
4929 sh = list_entry(handle_list->next, typeof(*sh), lru); __get_priority_stripe()
4933 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { __get_priority_stripe()
4953 sh = tmp; __get_priority_stripe()
4958 if (sh) { __get_priority_stripe()
4966 if (!sh) __get_priority_stripe()
4971 sh->group = NULL; __get_priority_stripe()
4973 list_del_init(&sh->lru); __get_priority_stripe()
4974 BUG_ON(atomic_inc_return(&sh->count) != 1); __get_priority_stripe()
4975 return sh; __get_priority_stripe()
4988 struct stripe_head *sh; raid5_unplug() local
4997 sh = list_first_entry(&cb->list, struct stripe_head, lru); raid5_unplug()
4998 list_del_init(&sh->lru); raid5_unplug()
5005 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); raid5_unplug()
5010 hash = sh->hash_lock_index; raid5_unplug()
5011 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); raid5_unplug()
5024 struct stripe_head *sh) release_stripe_plug()
5032 raid5_release_stripe(sh); release_stripe_plug()
5045 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) release_stripe_plug()
5046 list_add_tail(&sh->lru, &cb->list); release_stripe_plug()
5048 raid5_release_stripe(sh); release_stripe_plug()
5055 struct stripe_head *sh; make_discard_request() local
5083 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); make_discard_request()
5086 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); make_discard_request()
5087 if (test_bit(STRIPE_SYNCING, &sh->state)) { make_discard_request()
5088 raid5_release_stripe(sh); make_discard_request()
5092 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); make_discard_request()
5093 spin_lock_irq(&sh->stripe_lock); make_discard_request()
5095 if (d == sh->pd_idx || d == sh->qd_idx) make_discard_request()
5097 if (sh->dev[d].towrite || sh->dev[d].toread) { make_discard_request()
5098 set_bit(R5_Overlap, &sh->dev[d].flags); make_discard_request()
5099 spin_unlock_irq(&sh->stripe_lock); make_discard_request()
5100 raid5_release_stripe(sh); make_discard_request()
5105 set_bit(STRIPE_DISCARD, &sh->state); make_discard_request()
5107 sh->overwrite_disks = 0; make_discard_request()
5109 if (d == sh->pd_idx || d == sh->qd_idx) make_discard_request()
5111 sh->dev[d].towrite = bi; make_discard_request()
5112 set_bit(R5_OVERWRITE, &sh->dev[d].flags); make_discard_request()
5114 sh->overwrite_disks++; make_discard_request()
5116 spin_unlock_irq(&sh->stripe_lock); make_discard_request()
5122 sh->sector, make_discard_request()
5125 sh->bm_seq = conf->seq_flush + 1; make_discard_request()
5126 set_bit(STRIPE_BIT_DELAY, &sh->state); make_discard_request()
5129 set_bit(STRIPE_HANDLE, &sh->state); make_discard_request()
5130 clear_bit(STRIPE_DELAYED, &sh->state); make_discard_request()
5131 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) make_discard_request()
5133 release_stripe_plug(mddev, sh); make_discard_request()
5149 struct stripe_head *sh; make_request() local
5237 sh = raid5_get_active_stripe(conf, new_sector, previous, make_request()
5239 if (sh) { make_request()
5245 * 'sh', we know that if that happens, make_request()
5258 raid5_release_stripe(sh); make_request()
5268 raid5_release_stripe(sh); make_request()
5275 raid5_release_stripe(sh); make_request()
5291 if (test_bit(STRIPE_EXPANDING, &sh->state) || make_request()
5292 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { make_request()
5298 raid5_release_stripe(sh); make_request()
5303 set_bit(STRIPE_HANDLE, &sh->state); make_request()
5304 clear_bit(STRIPE_DELAYED, &sh->state); make_request()
5305 if ((!sh->batch_head || sh == sh->batch_head) && make_request()
5307 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) make_request()
5309 release_stripe_plug(mddev, sh); make_request()
5344 struct stripe_head *sh; reshape_request() local
5485 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); reshape_request()
5486 set_bit(STRIPE_EXPANDING, &sh->state); reshape_request()
5491 for (j=sh->disks; j--;) { reshape_request()
5493 if (j == sh->pd_idx) reshape_request()
5496 j == sh->qd_idx) reshape_request()
5498 s = raid5_compute_blocknr(sh, j, 0); reshape_request()
5503 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); reshape_request()
5504 set_bit(R5_Expanded, &sh->dev[j].flags); reshape_request()
5505 set_bit(R5_UPTODATE, &sh->dev[j].flags); reshape_request()
5508 set_bit(STRIPE_EXPAND_READY, &sh->state); reshape_request()
5509 set_bit(STRIPE_HANDLE, &sh->state); reshape_request()
5511 list_add(&sh->lru, &stripes); reshape_request()
5534 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); reshape_request()
5535 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); reshape_request()
5536 set_bit(STRIPE_HANDLE, &sh->state); reshape_request()
5537 raid5_release_stripe(sh); reshape_request()
5544 sh = list_entry(stripes.next, struct stripe_head, lru); reshape_request()
5545 list_del_init(&sh->lru); reshape_request()
5546 raid5_release_stripe(sh); reshape_request()
5586 struct stripe_head *sh; sync_request() local
5644 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); sync_request()
5645 if (sh == NULL) { sync_request()
5646 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); sync_request()
5667 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); sync_request()
5668 set_bit(STRIPE_HANDLE, &sh->state); sync_request()
5670 raid5_release_stripe(sh); sync_request()
5687 struct stripe_head *sh; retry_aligned_read() local
5709 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); retry_aligned_read()
5711 if (!sh) { retry_aligned_read()
5718 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { retry_aligned_read()
5719 raid5_release_stripe(sh); retry_aligned_read()
5725 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); retry_aligned_read()
5726 handle_stripe(sh); retry_aligned_read()
5727 raid5_release_stripe(sh); retry_aligned_read()
5745 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; handle_active_stripes() local
5750 (sh = __get_priority_stripe(conf, group)) != NULL) handle_active_stripes()
5751 batch[batch_size++] = sh; handle_active_stripes()
1108 async_copy_data(int frombio, struct bio *bio, struct page **page, sector_t sector, struct dma_async_tx_descriptor *tx, struct stripe_head *sh) async_copy_data() argument
1337 set_syndrome_sources(struct page **srcs, struct stripe_head *sh, int srctype) set_syndrome_sources() argument
2534 raid5_compute_sector(struct r5conf *conf, sector_t r_sector, int previous, int *dd_idx, struct stripe_head *sh) raid5_compute_sector() argument
3054 stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, struct stripe_head *sh) stripe_set_idx() argument
3451 handle_stripe_clean_event(struct r5conf *conf, struct stripe_head *sh, int disks, struct bio_list *return_bi) handle_stripe_clean_event() argument
3555 handle_stripe_dirtying(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) handle_stripe_dirtying() argument
5023 release_stripe_plug(struct mddev *mddev, struct stripe_head *sh) release_stripe_plug() argument
H A Draid5-cache.c184 struct stripe_head *sh, *next; r5l_io_run_stripes() local
186 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { r5l_io_run_stripes()
187 list_del_init(&sh->log_list); r5l_io_run_stripes()
188 set_bit(STRIPE_HANDLE, &sh->state); r5l_io_run_stripes()
189 raid5_release_stripe(sh); r5l_io_run_stripes()
373 static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, r5l_log_stripe() argument
389 for (i = 0; i < sh->disks; i++) { r5l_log_stripe()
390 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) r5l_log_stripe()
392 if (i == sh->pd_idx || i == sh->qd_idx) r5l_log_stripe()
395 raid5_compute_blocknr(sh, i, 0), r5l_log_stripe()
396 sh->dev[i].log_checksum, 0, false); r5l_log_stripe()
397 r5l_append_payload_page(log, sh->dev[i].page); r5l_log_stripe()
400 if (sh->qd_idx >= 0) { r5l_log_stripe()
402 sh->sector, sh->dev[sh->pd_idx].log_checksum, r5l_log_stripe()
403 sh->dev[sh->qd_idx].log_checksum, true); r5l_log_stripe()
404 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); r5l_log_stripe()
405 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); r5l_log_stripe()
408 sh->sector, sh->dev[sh->pd_idx].log_checksum, r5l_log_stripe()
410 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); r5l_log_stripe()
413 list_add_tail(&sh->log_list, &io->stripe_list); r5l_log_stripe()
415 sh->log_io = io; r5l_log_stripe()
423 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) r5l_write_stripe() argument
434 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || r5l_write_stripe()
435 test_bit(STRIPE_SYNCING, &sh->state)) { r5l_write_stripe()
437 clear_bit(STRIPE_LOG_TRAPPED, &sh->state); r5l_write_stripe()
441 for (i = 0; i < sh->disks; i++) { r5l_write_stripe()
444 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) r5l_write_stripe()
448 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) r5l_write_stripe()
450 addr = kmap_atomic(sh->dev[i].page); r5l_write_stripe()
451 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, r5l_write_stripe()
455 parity_pages = 1 + !!(sh->qd_idx >= 0); r5l_write_stripe()
467 set_bit(STRIPE_LOG_TRAPPED, &sh->state); r5l_write_stripe()
472 clear_bit(STRIPE_DELAYED, &sh->state); r5l_write_stripe()
473 atomic_inc(&sh->count); r5l_write_stripe()
479 r5l_log_stripe(log, sh, data_pages, parity_pages); r5l_write_stripe()
482 list_add_tail(&sh->log_list, &log->no_space_stripes); r5l_write_stripe()
522 struct stripe_head *sh; r5l_run_no_space_stripes() local
526 sh = list_first_entry(&log->no_space_stripes, r5l_run_no_space_stripes()
528 list_del_init(&sh->log_list); r5l_run_no_space_stripes()
529 set_bit(STRIPE_HANDLE, &sh->state); r5l_run_no_space_stripes()
530 raid5_release_stripe(sh); r5l_run_no_space_stripes()
585 void r5l_stripe_write_finished(struct stripe_head *sh) r5l_stripe_write_finished() argument
589 io = sh->log_io; r5l_stripe_write_finished()
590 sh->log_io = NULL; r5l_stripe_write_finished()
860 struct stripe_head *sh; r5l_recovery_flush_one_stripe() local
864 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0); r5l_recovery_flush_one_stripe()
871 &disk_index, sh); r5l_recovery_flush_one_stripe()
874 sh->dev[disk_index].page, READ, false); r5l_recovery_flush_one_stripe()
875 sh->dev[disk_index].log_checksum = r5l_recovery_flush_one_stripe()
877 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); r5l_recovery_flush_one_stripe()
880 disk_index = sh->pd_idx; r5l_recovery_flush_one_stripe()
882 sh->dev[disk_index].page, READ, false); r5l_recovery_flush_one_stripe()
883 sh->dev[disk_index].log_checksum = r5l_recovery_flush_one_stripe()
885 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags); r5l_recovery_flush_one_stripe()
887 if (sh->qd_idx >= 0) { r5l_recovery_flush_one_stripe()
888 disk_index = sh->qd_idx; r5l_recovery_flush_one_stripe()
891 PAGE_SIZE, sh->dev[disk_index].page, r5l_recovery_flush_one_stripe()
893 sh->dev[disk_index].log_checksum = r5l_recovery_flush_one_stripe()
896 &sh->dev[disk_index].flags); r5l_recovery_flush_one_stripe()
910 for (disk_index = 0; disk_index < sh->disks; disk_index++) { r5l_recovery_flush_one_stripe()
914 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) r5l_recovery_flush_one_stripe()
916 addr = kmap_atomic(sh->dev[disk_index].page); r5l_recovery_flush_one_stripe()
919 if (checksum != sh->dev[disk_index].log_checksum) r5l_recovery_flush_one_stripe()
923 for (disk_index = 0; disk_index < sh->disks; disk_index++) { r5l_recovery_flush_one_stripe()
927 &sh->dev[disk_index].flags)) r5l_recovery_flush_one_stripe()
934 sh->dev[disk_index].page, WRITE, false); r5l_recovery_flush_one_stripe()
938 sh->dev[disk_index].page, WRITE, false); r5l_recovery_flush_one_stripe()
940 raid5_release_stripe(sh); r5l_recovery_flush_one_stripe()
944 for (disk_index = 0; disk_index < sh->disks; disk_index++) r5l_recovery_flush_one_stripe()
945 sh->dev[disk_index].flags = 0; r5l_recovery_flush_one_stripe()
946 raid5_release_stripe(sh); r5l_recovery_flush_one_stripe()
/linux-4.4.14/tools/testing/selftests/futex/
H A DMakefile3 TEST_PROGS := run.sh
12 ./run.sh
25 echo "./run.sh"
/linux-4.4.14/arch/alpha/math-emu/
H A Dsfp-util.h7 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
8 ((sl) = (al) + (bl), (sh) = (ah) + (bh) + ((sl) < (al)))
10 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
11 ((sl) = (al) - (bl), (sh) = (ah) - (bh) - ((al) < (bl)))
/linux-4.4.14/arch/sh/include/mach-x3proto/mach/
H A Dhardware.h6 /* arch/sh/boards/mach-x3proto/gpio.c */
H A Dilsel.h40 /* arch/sh/boards/renesas/x3proto/ilsel.c */
/linux-4.4.14/arch/arm/mach-shmobile/include/mach/
H A Dirqs.h4 /* Stuck here until drivers/pinctl/sh-pfc gets rid of legacy code */
/linux-4.4.14/tools/power/cpupower/bench/
H A DMakefile29 install -m 755 cpufreq-bench_plot.sh $(DESTDIR)/$(bindir)/cpufreq-bench_plot.sh
31 install -m 755 cpufreq-bench_script.sh $(DESTDIR)/$(docdir)/cpufreq-bench_script.sh
/linux-4.4.14/tools/testing/selftests/lib/
H A DMakefile6 TEST_PROGS := printf.sh
/linux-4.4.14/tools/testing/selftests/static_keys/
H A DMakefile6 TEST_PROGS := test_static_keys.sh
/linux-4.4.14/tools/testing/selftests/user/
H A DMakefile6 TEST_PROGS := test_user_copy.sh
/linux-4.4.14/arch/sh/include/asm/
H A Dclock.h10 /* arch/sh/kernel/cpu/clock-cpg.c */
13 /* arch/sh/kernel/cpu/clock.c */
H A Dkdebug.h13 /* arch/sh/kernel/dumpstack.c */
H A Dtimex.h2 * linux/include/asm-sh/timex.h
4 * sh architecture timex specifications
H A Dmmzone.h28 /* arch/sh/mm/numa.c */
40 /* arch/sh/kernel/setup.c */
43 /* arch/sh/mm/init.c */
H A Dftrace.h24 /* No extra data needed on sh */
40 /* arch/sh/kernel/return_address.c */
H A Dfreq.h2 * include/asm-sh/freq.h
H A Dreboot.h18 /* arch/sh/kernel/machine_kexec.c */
H A Dprocessor.h13 * in arch/sh/mm/Kconfig, as well as an entry in arch/sh/kernel/setup.c
114 /* arch/sh/kernel/process.c */
119 /* arch/sh/mm/alignment.c */
126 /* arch/sh/mm/init.c */
129 /* arch/sh/kernel/setup.c */
H A Ddma-mapping.h19 /* arch/sh/mm/consistent.c */
H A Dperf_event.h24 /* arch/sh/kernel/perf_event.c */
H A Dshmparam.h2 * include/asm-sh/shmparam.h
H A Dsram.h9 /* arch/sh/mm/sram.c */
H A Dbugs.h22 char *p = &init_utsname()->machine[2]; /* "sh" */ check_bugs()
H A Dclkdev.h2 * Copyright (C) 2010 Paul Mundt <lethal@linux-sh.org>
H A Dmachvec.h2 * include/asm-sh/machvec.h
H A Ddma-register.h4 * extracted from arch/sh/include/asm/dma-sh.h:
H A Ddma.h2 * include/asm-sh/dma.h
111 /* arch/sh/drivers/dma/dma-api.c */
140 /* arch/sh/drivers/dma/dma-sysfs.c */
H A Dcache.h3 * include/asm-sh/cache.h
H A Dflat.h2 * include/asm-sh/flat.h
H A Dgpio.h2 * include/asm-sh/gpio.h
/linux-4.4.14/arch/sh/kernel/cpu/
H A Dclock-cpg.c59 clk_add_alias("fck", "sh-tmu-sh3.0", "peripheral_clk", NULL); cpg_clk_init()
60 clk_add_alias("fck", "sh-tmu.0", "peripheral_clk", NULL); cpg_clk_init()
61 clk_add_alias("fck", "sh-tmu.1", "peripheral_clk", NULL); cpg_clk_init()
62 clk_add_alias("fck", "sh-tmu.2", "peripheral_clk", NULL); cpg_clk_init()
63 clk_add_alias("fck", "sh-mtu2", "peripheral_clk", NULL); cpg_clk_init()
64 clk_add_alias("fck", "sh-cmt-16.0", "peripheral_clk", NULL); cpg_clk_init()
65 clk_add_alias("fck", "sh-cmt-32.0", "peripheral_clk", NULL); cpg_clk_init()
H A Dadc.c2 * linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support
H A Dclock.c2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
/linux-4.4.14/drivers/pinctrl/sh-pfc/
H A DMakefile0 sh-pfc-objs = core.o pinctrl.o
3 sh-pfc-objs += gpio.o
5 obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc.o
/linux-4.4.14/arch/s390/include/asm/
H A Dsfp-util.h6 #define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
16 (sh) = __sh; \
20 #define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
30 (sh) = __sh; \
/linux-4.4.14/arch/sh/kernel/
H A Dkdebugfs.c10 arch_debugfs_dir = debugfs_create_dir("sh", NULL); arch_kdebugfs_init()
H A Dvmlinux.lds.S7 OUTPUT_ARCH(sh:sh5)
10 OUTPUT_ARCH(sh)
H A Ddebugtraps.S2 * arch/sh/kernel/debugtraps.S
H A Dioport.c2 * arch/sh/kernel/ioport.c
H A Dreturn_address.c2 * arch/sh/kernel/return_address.c
H A Dsh_ksyms_64.c2 * arch/sh/kernel/sh_ksyms_64.c
/linux-4.4.14/arch/sh/mm/
H A Dextable_32.c2 * linux/arch/sh/mm/extable.c
H A Dcache-shx3.c2 * arch/sh/mm/cache-shx3.c - SH-X3 optimized cache ops
H A Dhugetlbpage.c2 * arch/sh/mm/hugetlbpage.c
/linux-4.4.14/tools/testing/selftests/net/
H A DMakefile13 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh
/linux-4.4.14/tools/testing/selftests/pstore/
H A DMakefile12 @sh pstore_crash_test || { echo "pstore_crash_test: [FAIL]"; exit 1; }
/linux-4.4.14/arch/s390/boot/
H A DMakefile25 sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmsmac/phy/
H A Dphy_cmn.c132 wlapi_bmac_ucode_wake_override_phyreg_set(pi->sh->physhim); wlc_phyreg_enter()
138 wlapi_bmac_ucode_wake_override_phyreg_clear(pi->sh->physhim); wlc_phyreg_exit()
144 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, MCTL_LOCK_RADIO); wlc_radioreg_enter()
155 wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0); wlc_radioreg_exit()
185 if ((D11REV_GE(pi->sh->corerev, 24)) || read_radio_reg()
186 (D11REV_IS(pi->sh->corerev, 22) read_radio_reg()
201 if ((D11REV_GE(pi->sh->corerev, 24)) || write_radio_reg()
202 (D11REV_IS(pi->sh->corerev, 22) write_radio_reg()
223 if (D11REV_GE(pi->sh->corerev, 24)) { read_radio_id()
367 struct shared_phy *sh; wlc_phy_shared_attach() local
369 sh = kzalloc(sizeof(struct shared_phy), GFP_ATOMIC); wlc_phy_shared_attach()
370 if (sh == NULL) wlc_phy_shared_attach()
373 sh->physhim = shp->physhim; wlc_phy_shared_attach()
374 sh->unit = shp->unit; wlc_phy_shared_attach()
375 sh->corerev = shp->corerev; wlc_phy_shared_attach()
377 sh->vid = shp->vid; wlc_phy_shared_attach()
378 sh->did = shp->did; wlc_phy_shared_attach()
379 sh->chip = shp->chip; wlc_phy_shared_attach()
380 sh->chiprev = shp->chiprev; wlc_phy_shared_attach()
381 sh->chippkg = shp->chippkg; wlc_phy_shared_attach()
382 sh->sromrev = shp->sromrev; wlc_phy_shared_attach()
383 sh->boardtype = shp->boardtype; wlc_phy_shared_attach()
384 sh->boardrev = shp->boardrev; wlc_phy_shared_attach()
385 sh->boardflags = shp->boardflags; wlc_phy_shared_attach()
386 sh->boardflags2 = shp->boardflags2; wlc_phy_shared_attach()
388 sh->fast_timer = PHY_SW_TIMER_FAST; wlc_phy_shared_attach()
389 sh->slow_timer = PHY_SW_TIMER_SLOW; wlc_phy_shared_attach()
390 sh->glacial_timer = PHY_SW_TIMER_GLACIAL; wlc_phy_shared_attach()
392 sh->rssi_mode = RSSI_ANT_MERGE_MAX; wlc_phy_shared_attach()
394 return sh; wlc_phy_shared_attach()
402 if (!pi->sh->up) { wlc_phy_timercb_phycal()
429 wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, wlc_phy_attach() argument
438 if (D11REV_IS(sh->corerev, 4)) wlc_phy_attach()
448 pi = sh->phy_head; wlc_phy_attach()
450 wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); wlc_phy_attach()
460 pi->sh = sh; wlc_phy_attach()
473 wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); wlc_phy_attach()
531 pi->sh->phyrxchain = 0x3; wlc_phy_attach()
559 pi->phycal_timer = wlapi_init_timer(pi->sh->physhim, wlc_phy_attach()
575 pi->next = pi->sh->phy_head; wlc_phy_attach()
576 sh->phy_head = pi; wlc_phy_attach()
600 if (pi->sh->phy_head == pi) wlc_phy_detach()
601 pi->sh->phy_head = pi->next; wlc_phy_detach()
602 else if (pi->sh->phy_head->next == pi) wlc_phy_detach()
603 pi->sh->phy_head->next = NULL; wlc_phy_detach()
729 if (!pi || !pi->sh) wlc_phy_hw_clk_state_upd()
732 pi->sh->clk = newstate; wlc_phy_hw_clk_state_upd()
739 if (!pi || !pi->sh) wlc_phy_hw_state_upd()
742 pi->sh->up = newstate; wlc_phy_hw_state_upd()
777 wlapi_bmac_bw_set(pi->sh->physhim, wlc_phy_init()
788 if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) wlc_phy_init()
794 wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi, pi->sh->rx_antdiv); wlc_phy_init()
841 if (pi->sh->chip == BCMA_CHIP_ID_BCM43224 && wlc_phy_table_addr()
842 pi->sh->chiprev == 1) { wlc_phy_table_addr()
851 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && wlc_phy_table_data_write()
852 (pi->sh->chiprev == 1) && wlc_phy_table_data_write()
885 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && wlc_phy_write_table()
886 (pi->sh->chiprev == 1) && wlc_phy_write_table()
922 if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && wlc_phy_read_table()
923 (pi->sh->chiprev == 1)) { wlc_phy_read_table()
1009 wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN, wlc_phy_do_dummy_tx()
1014 if (D11REV_GE(pi->sh->corerev, 11)) wlc_phy_do_dummy_tx()
1093 pi->nphy_perical_last = pi->sh->now - pi->sh->glacial_timer; wlc_phy_mute_upd()
1104 wlapi_bmac_write_shm(pi->sh->physhim, M_B_TSSI_0, NULL_TSSI_W); wlc_phy_clear_tssi()
1105 wlapi_bmac_write_shm(pi->sh->physhim, M_B_TSSI_1, NULL_TSSI_W); wlc_phy_clear_tssi()
1106 wlapi_bmac_write_shm(pi->sh->physhim, M_G_TSSI_0, NULL_TSSI_W); wlc_phy_clear_tssi()
1107 wlapi_bmac_write_shm(pi->sh->physhim, M_G_TSSI_1, NULL_TSSI_W); wlc_phy_clear_tssi()
1188 wlapi_bmac_write_shm(pi->sh->physhim, M_CURCHANNEL, m_cur_channel); wlc_phy_chanspec_set()
1363 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_target_set()
1369 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_target_set()
1385 if (pi->sh->up) { wlc_phy_txpower_set()
1394 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_set()
1400 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_set()
1824 pi->sh->machwcap = machwcap; wlc_phy_machwcap_set()
1875 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_limit_set()
1879 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_limit_set()
1902 if (!pi->sh->clk) wlc_phy_txpower_update_shm()
1908 wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_MAX, 63); wlc_phy_txpower_update_shm()
1909 wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_N, wlc_phy_txpower_update_shm()
1912 wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_TARGET, wlc_phy_txpower_update_shm()
1915 wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_CUR, wlc_phy_txpower_update_shm()
1923 pi->sh->physhim, wlc_phy_txpower_update_shm()
1925 wlapi_bmac_write_shm(pi->sh->physhim, offset + 6, wlc_phy_txpower_update_shm()
1927 wlapi_bmac_write_shm(pi->sh->physhim, offset + 14, wlc_phy_txpower_update_shm()
1931 wlapi_bmac_mhf(pi->sh->physhim, MHF2, MHF2_HWPWRCTL, wlc_phy_txpower_update_shm()
1939 wlapi_bmac_write_shm(pi->sh->physhim, M_OFDM_OFFSET, wlc_phy_txpower_update_shm()
1973 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_hw_ctrl_set()
1983 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_hw_ctrl_set()
2079 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_txpower_get_current()
2083 wlapi_enable_mac(pi->sh->physhim); wlc_phy_txpower_get_current()
2108 } else if (pi->hwpwrctrl && pi->sh->up) { wlc_phy_txpower_get_current()
2156 pi->sh->rx_antdiv = val; wlc_phy_ant_rxdiv_set()
2158 if (!(ISNPHY(pi) && D11REV_IS(pi->sh->corerev, 16))) { wlc_phy_ant_rxdiv_set()
2160 wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_ANTDIV, wlc_phy_ant_rxdiv_set()
2163 wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_ANTDIV, 0, wlc_phy_ant_rxdiv_set()
2170 if (!pi->sh->clk) wlc_phy_ant_rxdiv_set()
2176 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_ant_rxdiv_set()
2191 wlapi_enable_mac(pi->sh->physhim); wlc_phy_ant_rxdiv_set()
2229 pi->sh->phy_noise_window[pi->sh->phy_noise_index] = wlc_phy_noise_cb()
2231 pi->sh->phy_noise_index = wlc_phy_noise_cb()
2232 MODINC(pi->sh->phy_noise_index, MA_WINDOW_SZ); wlc_phy_noise_cb()
2256 lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP(idx)); wlc_phy_noise_read_shmem()
2257 hi = wlapi_bmac_read_shm(pi->sh->physhim, wlc_phy_noise_read_shmem()
2297 jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX); wlc_phy_noise_sample_intr()
2300 lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP0); wlc_phy_noise_sample_intr()
2301 hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP1); wlc_phy_noise_sample_intr()
2304 lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP2); wlc_phy_noise_sample_intr()
2305 hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP3); wlc_phy_noise_sample_intr()
2310 status_1 = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_0); wlc_phy_noise_sample_intr()
2329 jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX); wlc_phy_noise_sample_intr()
2364 pi->phynoise_now = pi->sh->now; wlc_phy_noise_sample_request()
2386 wlapi_bmac_write_shm(pi->sh->physhim, M_JSSI_0, 0); wlc_phy_noise_sample_request()
2387 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP0, 0); wlc_phy_noise_sample_request()
2388 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP1, 0); wlc_phy_noise_sample_request()
2389 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); wlc_phy_noise_sample_request()
2390 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); wlc_phy_noise_sample_request()
2395 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_noise_sample_request()
2399 wlapi_enable_mac(pi->sh->physhim); wlc_phy_noise_sample_request()
2406 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP0, 0); wlc_phy_noise_sample_request()
2407 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP1, 0); wlc_phy_noise_sample_request()
2408 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); wlc_phy_noise_sample_request()
2409 wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); wlc_phy_noise_sample_request()
2429 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_noise_sample_request()
2435 wlapi_enable_mac(pi->sh->physhim); wlc_phy_noise_sample_request()
2536 if ((pi->sh->corerev >= 11) wlc_phy_rssi_compute()
2596 pi->sh->now++; wlc_phy_watchdog()
2607 if (pi->phynoise_state && (pi->sh->now - pi->phynoise_now) > 5) wlc_phy_watchdog()
2611 ((pi->sh->now - pi->phycal_txpower) >= pi->sh->fast_timer)) { wlc_phy_watchdog()
2614 pi->phycal_txpower = pi->sh->now; wlc_phy_watchdog()
2625 ((pi->sh->now - pi->nphy_perical_last) >= wlc_phy_watchdog()
2626 pi->sh->glacial_timer)) wlc_phy_watchdog()
2635 ((pi->sh->now - pi->phy_lastcal) >= wlc_phy_watchdog()
2636 pi->sh->glacial_timer)) { wlc_phy_watchdog()
2659 pi->sh->phy_noise_window[i] = (s8) (rssi & 0xff); wlc_phy_BSSinit()
2662 pi->sh->phy_noise_window[i] = wlc_phy_BSSinit()
2665 pi->sh->phy_noise_index = 0; wlc_phy_BSSinit()
2809 pi->sh->hw_phytxchain = txchain; wlc_phy_stf_chain_init()
2810 pi->sh->hw_phyrxchain = rxchain; wlc_phy_stf_chain_init()
2811 pi->sh->phytxchain = txchain; wlc_phy_stf_chain_init()
2812 pi->sh->phyrxchain = rxchain; wlc_phy_stf_chain_init()
2813 pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain); wlc_phy_stf_chain_init()
2820 pi->sh->phytxchain = txchain; wlc_phy_stf_chain_set()
2825 pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain); wlc_phy_stf_chain_set()
2832 *txchain = pi->sh->phytxchain; wlc_phy_stf_chain_get()
2833 *rxchain = pi->sh->phyrxchain; wlc_phy_stf_chain_get()
2848 wlapi_suspend_mac_and_wait(pi->sh->physhim); wlc_phy_stf_chain_active_get()
2850 wlapi_enable_mac(pi->sh->physhim); wlc_phy_stf_chain_active_get()
2894 if ((pi->sh->chip == BCMA_CHIP_ID_BCM4313) && wlc_lcnphy_epa_switch()
2895 (pi->sh->boardflags & BFL_FEM)) { wlc_lcnphy_epa_switch()
2898 txant = wlapi_bmac_get_txant(pi->sh->physhim); wlc_lcnphy_epa_switch()
/linux-4.4.14/arch/sh/
H A DMakefile2 # arch/sh/Makefile
19 isa-$(CONFIG_SH_DSP) := sh
103 UTS_MACHINE := sh
128 head-y := arch/sh/kernel/head_$(BITS).o
130 core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
131 core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
155 core-y += $(addprefix arch/sh/boards/, \
159 # Common machine type headers. Not part of the arch/sh/boards/ hierarchy.
163 core-$(CONFIG_HD6446X_SERIES) += arch/sh/cchips/hd6446x/
183 drivers-y += arch/sh/drivers/
184 drivers-$(CONFIG_OPROFILE) += arch/sh/oprofile/
186 boot := arch/sh/boot
188 cflags-y += $(foreach d, $(cpuincdir-y), -Iarch/sh/include/$(d)) \
189 $(foreach d, $(machdir-y), -Iarch/sh/include/$(d))
203 libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
204 libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
219 $(Q)$(MAKE) $(build)=arch/sh/tools include/generated/machtypes.h
223 $(Q)$(MAKE) $(clean)=arch/sh/kernel/vsyscall
/linux-4.4.14/drivers/xen/events/
H A Devents_2l.c145 struct shared_info *sh, active_evtchns()
148 return sh->evtchn_pending[idx] & active_evtchns()
150 ~sh->evtchn_mask[idx]; active_evtchns()
266 struct shared_info *sh = HYPERVISOR_shared_info; xen_debug_interrupt() local
292 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
294 (int)sizeof(sh->evtchn_pending[0])*2,
295 sh->evtchn_pending[i],
298 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
300 (int)(sizeof(sh->evtchn_mask[0])*2),
301 sh->evtchn_mask[i],
305 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
307 (int)(sizeof(sh->evtchn_mask[0])*2),
308 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
318 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
319 xen_ulong_t pending = sh->evtchn_pending[i]
320 & ~sh->evtchn_mask[i]
323 (int)(sizeof(sh->evtchn_mask[0])*2),
329 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
336 !sync_test_bit(i, BM(sh->evtchn_mask))
144 active_evtchns(unsigned int cpu, struct shared_info *sh, unsigned int idx) active_evtchns() argument
/linux-4.4.14/arch/sh/boot/romimage/
H A DMakefile2 # linux/arch/sh/boot/romimage/Makefile
29 $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/zeropage.bin arch/sh/boot/zImage FORCE
/linux-4.4.14/arch/sh/kernel/cpu/sh2a/
H A Dclock-sh7269.c2 * arch/sh/kernel/cpu/sh2a/clock-sh7269.c
153 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP47]),
154 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP46]),
155 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP45]),
156 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP44]),
157 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP43]),
158 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP42]),
159 CLKDEV_ICK_ID("sci_fck", "sh-sci.6", &mstp_clks[MSTP41]),
160 CLKDEV_ICK_ID("sci_fck", "sh-sci.7", &mstp_clks[MSTP40]),
161 CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]),
163 CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]),
H A Dsetup-sh7201.c192 .name = "sh-sci",
213 .name = "sh-sci",
234 .name = "sh-sci",
255 .name = "sh-sci",
276 .name = "sh-sci",
297 .name = "sh-sci",
318 .name = "sh-sci",
339 .name = "sh-sci",
362 .name = "sh-rtc",
376 .name = "sh-mtu2",
H A Dsetup-sh7264.c245 .name = "sh-sci",
271 .name = "sh-sci",
297 .name = "sh-sci",
323 .name = "sh-sci",
349 .name = "sh-sci",
375 .name = "sh-sci",
401 .name = "sh-sci",
427 .name = "sh-sci",
447 .name = "sh-cmt-16",
463 .name = "sh-mtu2",
483 .name = "sh-rtc",
H A Dsetup-sh7203.c190 .name = "sh-sci",
213 .name = "sh-sci",
236 .name = "sh-sci",
259 .name = "sh-sci",
279 .name = "sh-cmt-16",
295 .name = "sh-mtu2",
315 .name = "sh-rtc",
H A Dclock-sh7264.c2 * arch/sh/kernel/cpu/sh2a/clock-sh7264.c
120 CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]),
122 CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]),
H A Dex.S2 * arch/sh/kernel/cpu/sh2a/ex.S
H A Dsetup-sh7269.c267 .name = "sh-sci",
293 .name = "sh-sci",
319 .name = "sh-sci",
345 .name = "sh-sci",
371 .name = "sh-sci",
397 .name = "sh-sci",
423 .name = "sh-sci",
449 .name = "sh-sci",
469 .name = "sh-cmt-16",
485 .name = "sh-mtu2",
505 .name = "sh-rtc",
/linux-4.4.14/arch/sh/kernel/cpu/sh4a/
H A Dclock-sh7786.c2 * arch/sh/kernel/cpu/sh4a/clock-sh7786.c
142 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
143 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
144 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
145 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
146 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
147 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
158 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
159 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
160 CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP010]),
161 CLKDEV_ICK_ID("fck", "sh-tmu.3", &mstp_clks[MSTP011]),
H A Dclock-shx3.c2 * arch/sh/kernel/cpu/sh4/clock-shx3.c
117 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
118 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
119 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
120 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
127 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
128 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
H A Dclock-sh7785.c2 * arch/sh/kernel/cpu/sh4a/clock-sh7785.c
135 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
136 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
137 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
138 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
139 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
140 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
149 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
150 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
H A Dclock-sh7723.c2 * arch/sh/kernel/cpu/sh4a/clock-sh7723.c
235 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]),
239 CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
240 CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
241 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
261 CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
267 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]),
268 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]),
270 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
271 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
272 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
273 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
274 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
275 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
H A Dclock-sh7757.c2 * arch/sh/kernel/cpu/sh4/clock-sh7757.c
126 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP113]),
127 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP114]),
128 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP112]),
129 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP111]),
130 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP110]),
H A Dclock-sh7724.c2 * arch/sh/kernel/cpu/sh4a/clock-sh7724.c
302 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]),
307 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]),
308 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]),
310 CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
311 CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
312 CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
314 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
315 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
316 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
317 CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
318 CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
319 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
346 CLKDEV_DEV_ID("sh-vou", &mstp_clks[HWBLK_VOU]),
H A Dclock-sh7734.c2 * arch/sh/kernel/cpu/sh4a/clock-sh7734.c
197 CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP026]),
198 CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
199 CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP024]),
200 CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP023]),
201 CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP022]),
202 CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP021]),
204 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]),
205 CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP015]),
206 CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP014]),
H A Dclock-sh7722.c2 * arch/sh/kernel/cpu/sh4a/clock-sh7722.c
206 CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU]),
208 CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
209 CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
212 CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
213 CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
214 CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
223 CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
H A Dsetup-sh7770.c30 .name = "sh-sci",
51 .name = "sh-sci",
72 .name = "sh-sci",
93 .name = "sh-sci",
114 .name = "sh-sci",
135 .name = "sh-sci",
156 .name = "sh-sci",
177 .name = "sh-sci",
198 .name = "sh-sci",
219 .name = "sh-sci",
240 .name = "sh-tmu",
261 .name = "sh-tmu",
282 .name = "sh-tmu",
H A Dsetup-sh7734.c2 * arch/sh/kernel/cpu/sh4a/setup-sh7734.c
40 .name = "sh-sci",
62 .name = "sh-sci",
84 .name = "sh-sci",
106 .name = "sh-sci",
128 .name = "sh-sci",
150 .name = "sh-sci",
174 .name = "sh-rtc",
214 .name = "sh-tmu",
235 .name = "sh-tmu",
256 .name = "sh-tmu",
/linux-4.4.14/lib/mpi/
H A Dlonglong.h114 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
117 : "=r" ((USItype)(sh)), \
123 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
126 : "=r" ((USItype)(sh)), \
176 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
179 : "=r" ((USItype)(sh)), \
185 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
188 : "=r" ((USItype)(sh)), \
263 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
266 : "=g" ((USItype)(sh)), \
272 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
275 : "=g" ((USItype)(sh)), \
300 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
303 : "=r" ((USItype)(sh)), \
309 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
312 : "=r" ((USItype)(sh)), \
397 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
400 : "=r" ((USItype)(sh)), \
406 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
409 : "=r" ((USItype)(sh)), \
450 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
454 : "=r" ((USItype)(sh)), \
460 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
464 : "=r" ((USItype)(sh)), \
515 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
518 : "=d" ((USItype)(sh)), \
524 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
527 : "=d" ((USItype)(sh)), \
594 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
597 : "=r" ((USItype)(sh)), \
603 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
606 : "=r" ((USItype)(sh)), \
739 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
743 : "=r" ((USItype)(sh)), \
750 : "=r" ((USItype)(sh)), \
757 : "=r" ((USItype)(sh)), \
764 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
768 : "=r" ((USItype)(sh)), \
775 : "=r" ((USItype)(sh)), \
782 : "=r" ((USItype)(sh)), \
789 : "=r" ((USItype)(sh)), \
796 : "=r" ((USItype)(sh)), \
857 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
860 : "=r" ((USItype)(sh)), \
866 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
869 : "=r" ((USItype)(sh)), \
892 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
895 : "=r" ((USItype)(sh)), \
901 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
904 : "=r" ((USItype)(sh)), \
968 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
971 : "=r" ((USItype)(sh)), \
978 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
981 : "=r" ((USItype)(sh)), \
1163 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1166 : "=g" ((USItype)(sh)), \
1172 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1175 : "=g" ((USItype)(sh)), \
1211 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1213 : "=r" ((unsigned int)(sh)), \
1219 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1221 : "=r" ((unsigned int)(sh)), \
1268 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1272 (sh) = (ah) + (bh) + (__x < (al)); \
1278 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1282 (sh) = (ah) - (bh) - (__x > (al)); \
/linux-4.4.14/arch/sparc/math-emu/
H A Dsfp-util_64.h14 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
20 : "=r" (sh), \
28 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
34 : "=r" (sh), \
H A Dsfp-util_32.h6 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
9 : "=r" (sh), \
16 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
19 : "=r" (sh), \
/linux-4.4.14/arch/xtensa/include/asm/
H A Dunistd.h13 * Ignore legacy system calls in the checksyscalls.sh script
/linux-4.4.14/scripts/
H A Dheaders.sh1 #!/bin/sh
H A Dmakelst1 #!/bin/sh
/linux-4.4.14/arch/sh/boards/mach-se/
H A Dboard-se7619.c2 * arch/sh/boards/se/7619/setup.c
/linux-4.4.14/arch/sh/include/cpu-common/cpu/
H A Dmmu_context.h2 * include/asm-sh/cpu-sh2/mmu_context.h
/linux-4.4.14/arch/sh/include/cpu-sh2/cpu/
H A Dfreq.h2 * include/asm-sh/cpu-sh2/freq.h
H A Dcache.h2 * include/asm-sh/cpu-sh2/cache.h
/linux-4.4.14/arch/sh/include/cpu-sh2a/cpu/
H A Dfreq.h2 * include/asm-sh/cpu-sh2a/freq.h
H A Dcache.h2 * include/asm-sh/cpu-sh2a/cache.h
/linux-4.4.14/arch/sh/include/mach-sh03/mach/
H A Dio.h2 * include/asm-sh/sh03/io.h
H A Dsh03.h5 * linux/include/asm-sh/sh03/sh03.h
/linux-4.4.14/arch/sh/math-emu/
H A Dsfp-util.h5 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
9 (sh) = (ah) + (bh) + (__x < (al)); \
13 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
17 (sh) = (ah) - (bh) - (__x > (al)); \
/linux-4.4.14/arch/frv/boot/
H A DMakefile64 sh ./install.sh $(KERNELRELEASE) Image System.map "$(INSTALL_PATH)"
67 sh ./install.sh $(KERNELRELEASE) zImage System.map "$(INSTALL_PATH)"
/linux-4.4.14/tools/testing/selftests/exec/
H A DMakefile9 echo '#!/bin/sh' > $@
/linux-4.4.14/tools/testing/selftests/futex/functional/
H A DMakefile15 TEST_PROGS := $(TARGETS) run.sh
/linux-4.4.14/tools/testing/selftests/memfd/
H A DMakefile19 @./run_fuse_test.sh || echo "fuse_test: [FAIL]"
/linux-4.4.14/arch/mips/include/asm/
H A Duasm.h207 # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh)
209 # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
210 # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
211 # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
212 # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh)
223 # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh)
225 # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
226 # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
227 # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
228 # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
/linux-4.4.14/net/netfilter/
H A Dxt_sctp.c121 const sctp_sctphdr_t *sh; sctp_mt() local
129 sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh); sctp_mt()
130 if (sh == NULL) { sctp_mt()
135 pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); sctp_mt()
137 return SCCHECK(ntohs(sh->source) >= info->spts[0] sctp_mt()
138 && ntohs(sh->source) <= info->spts[1], sctp_mt()
140 && SCCHECK(ntohs(sh->dest) >= info->dpts[0] sctp_mt()
141 && ntohs(sh->dest) <= info->dpts[1], sctp_mt()
/linux-4.4.14/drivers/sh/superhyway/
H A Dsuperhyway-sysfs.c2 * drivers/sh/superhyway/superhyway-sysfs.c
6 * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org>
/linux-4.4.14/arch/sh/boards/mach-dreamcast/
H A Dsetup.c2 * arch/sh/boards/dreamcast/setup.c
7 * Copyright (c) 2002, 2003, 2004 Paul Mundt <lethal@linux-sh.org>
/linux-4.4.14/arch/sh/drivers/pci/
H A Dfixups-snapgear.c2 * arch/sh/drivers/pci/ops-snapgear.c
6 * Ported to new API by Paul Mundt <lethal@linux-sh.org>
H A Dfixups-titan.c2 * arch/sh/drivers/pci/ops-titan.c
4 * Ported to new API by Paul Mundt <lethal@linux-sh.org>
H A Dfixups-r7780rp.c2 * arch/sh/drivers/pci/fixups-r7780rp.c
H A Dfixups-sdk7780.c2 * arch/sh/drivers/pci/fixups-sdk7780.c
/linux-4.4.14/arch/sh/include/cpu-sh4/cpu/
H A Dsq.h2 * include/asm-sh/cpu-sh4/sq.h
30 /* arch/sh/kernel/cpu/sh4/sq.c */
H A Dfpu.h2 * linux/arch/sh/kernel/cpu/sh4/sh4_fpu.h
H A Dcache.h2 * include/asm-sh/cpu-sh4/cache.h
H A Dwatchdog.h2 * include/asm-sh/cpu-sh4/watchdog.h
/linux-4.4.14/arch/sh/include/mach-common/mach/
H A Dsh7763rdp.h5 * linux/include/asm-sh/sh7763drp.h
48 /* arch/sh/boards/renesas/sh7763rdp/irq.c */
H A Dsecureedge5410.h2 * include/asm-sh/snapgear.h
/linux-4.4.14/arch/arm64/boot/
H A DMakefile38 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
42 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
/linux-4.4.14/sound/
H A DMakefile8 obj-$(CONFIG_SND) += core/ i2c/ drivers/ isa/ pci/ ppc/ arm/ sh/ synth/ usb/ \
/linux-4.4.14/include/net/sctp/
H A Dchecksum.h63 struct sctphdr *sh = sctp_hdr(skb); sctp_compute_cksum() local
64 __le32 ret, old = sh->checksum; sctp_compute_cksum()
70 sh->checksum = 0; sctp_compute_cksum()
73 sh->checksum = old; sctp_compute_cksum()
/linux-4.4.14/arch/sh/boards/
H A Dboard-shmin.c2 * arch/sh/boards/shmin/setup.c
H A Dboard-titan.c2 * arch/sh/boards/titan/setup.c - Setup for Titan
H A Dboard-edosk7705.c2 * arch/sh/boards/renesas/edosk7705/setup.c
/linux-4.4.14/arch/sh/boards/mach-lboxre2/
H A Dirq.c2 * linux/arch/sh/boards/lboxre2/irq.c
/linux-4.4.14/arch/sh/include/cpu-sh3/cpu/
H A Dfreq.h2 * include/asm-sh/cpu-sh3/freq.h
H A Dwatchdog.h2 * include/asm-sh/cpu-sh3/watchdog.h
H A Dcache.h2 * include/asm-sh/cpu-sh3/cache.h
H A Dmmu_context.h2 * include/asm-sh/cpu-sh3/mmu_context.h
/linux-4.4.14/arch/sh/include/mach-dreamcast/mach/
H A Ddma.h2 * include/asm-sh/dreamcast/dma.h
H A Dpci.h2 * include/asm-sh/dreamcast/pci.h
H A Dsysasic.h1 /* include/asm-sh/dreamcast/sysasic.h
6 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
42 /* arch/sh/boards/mach-dreamcast/irq.c */
/linux-4.4.14/arch/m32r/mm/
H A Dinit.c6 * Some code taken from sh version.
110 * orig : arch/sh/mm/init.c
132 * orig : arch/sh/mm/init.c
142 * orig : arch/sh/mm/init.c
/linux-4.4.14/arch/x86/entry/syscalls/
H A DMakefile11 syshdr := $(srctree)/$(src)/syscallhdr.sh
12 systbl := $(srctree)/$(src)/syscalltbl.sh
53 $(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh
/linux-4.4.14/arch/sh/kernel/cpu/sh3/
H A Dclock-sh3.c2 * arch/sh/kernel/cpu/sh3/clock-sh3.c
8 * FRQCR parsing hacked out of arch/sh/kernel/time.c
13 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
H A Dclock-sh7705.c2 * arch/sh/kernel/cpu/sh3/clock-sh7705.c
8 * FRQCR parsing hacked out of arch/sh/kernel/time.c
13 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
H A Dclock-sh7710.c2 * arch/sh/kernel/cpu/sh3/clock-sh7710.c
8 * FRQCR parsing hacked out of arch/sh/kernel/time.c
13 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
H A Dsetup-sh7705.c87 .name = "sh-sci",
110 .name = "sh-sci",
136 .name = "sh-rtc",
157 .name = "sh-tmu-sh3",
H A Dclock-sh7706.c2 * arch/sh/kernel/cpu/sh3/clock-sh7706.c
8 * Based on arch/sh/kernel/cpu/sh3/clock-sh7709.c
H A Dclock-sh7709.c2 * arch/sh/kernel/cpu/sh3/clock-sh7709.c
8 * Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c
H A Dclock-sh7712.c2 * arch/sh/kernel/cpu/sh3/clock-sh7712.c
8 * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
H A Dsetup-sh770x.c105 .name = "sh-rtc",
126 .name = "sh-sci",
151 .name = "sh-sci",
177 .name = "sh-sci",
199 .name = "sh-tmu-sh3",
H A Dsetup-sh7710.c91 .name = "sh-rtc",
113 .name = "sh-sci",
135 .name = "sh-sci",
156 .name = "sh-tmu-sh3",
/linux-4.4.14/arch/sh/kernel/cpu/sh4/
H A Dclock-sh4.c2 * arch/sh/kernel/cpu/sh4/clock-sh4.c
8 * FRQCR parsing hacked out of arch/sh/kernel/time.c
13 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
/linux-4.4.14/arch/sh/kernel/cpu/sh5/
H A Dsetup-sh5.c33 .name = "sh-sci",
66 .name = "sh-rtc",
87 .name = "sh-tmu",
/linux-4.4.14/arch/parisc/
H A Dinstall.sh1 #!/bin/sh
3 # arch/parisc/install.sh, derived from arch/i386/boot/install.sh
/linux-4.4.14/arch/blackfin/boot/
H A Dinstall.sh1 #!/bin/sh
3 # arch/blackfin/boot/install.sh
12 # Adapted from code in arch/i386/boot/install.sh by Mike Frysinger
/linux-4.4.14/arch/arm/boot/
H A Dinstall.sh1 #!/bin/sh
3 # arch/arm/boot/install.sh
12 # Adapted from code in arch/i386/boot/install.sh by Russell King
H A DMakefile99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
/linux-4.4.14/usr/
H A DMakefile41 initramfs := $(CONFIG_SHELL) $(srctree)/scripts/gen_initramfs_list.sh
51 # The dependency list is generated by gen_initramfs.sh -l
71 # 4) arguments to gen_initramfs.sh changes
/linux-4.4.14/net/netfilter/ipset/
H A Dip_set_getport.c42 const sctp_sctphdr_t *sh; get_port() local
44 sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh); get_port()
45 if (!sh) get_port()
49 *port = src ? sh->source : sh->dest; get_port()
/linux-4.4.14/drivers/message/fusion/
H A Dmptfc.c209 ioc->name, ioc->sh->host_no, mptfc_block_error_handler()
223 ioc->name, ioc->sh->host_no, mptfc_block_error_handler()
230 ioc->name, ioc->sh->host_no, mptfc_block_error_handler()
469 rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); mptfc_register_dev()
497 ioc->sh->host_no, mptfc_register_dev()
990 struct Scsi_Host *sh; mptfc_init_host_attr() local
998 sh = ioc->sh; mptfc_init_host_attr()
1000 sn = fc_host_symbolic_name(sh); mptfc_init_host_attr()
1006 fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN; mptfc_init_host_attr()
1008 fc_host_maxframe_size(sh) = pp0->MaxFrameSize; mptfc_init_host_attr()
1010 fc_host_node_name(sh) = mptfc_init_host_attr()
1013 fc_host_port_name(sh) = mptfc_init_host_attr()
1016 fc_host_port_id(sh) = pp0->PortIdentifier; mptfc_init_host_attr()
1025 fc_host_supported_classes(sh) = cos; mptfc_init_host_attr()
1037 fc_host_speed(sh) = speed; mptfc_init_host_attr()
1048 fc_host_supported_speeds(sh) = speed; mptfc_init_host_attr()
1055 fc_host_port_state(sh) = port_state; mptfc_init_host_attr()
1066 fc_host_port_type(sh) = port_type; mptfc_init_host_attr()
1068 fc_host_fabric_name(sh) = mptfc_init_host_attr()
1115 ioc->sh->host_no, mptfc_setup_reset()
1170 ioc->sh->host_no, mptfc_rescan_devices()
1179 struct Scsi_Host *sh; mptfc_probe() local
1231 sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); mptfc_probe()
1233 if (!sh) { mptfc_probe()
1250 ioc->sh = sh; mptfc_probe()
1252 sh->io_port = 0; mptfc_probe()
1253 sh->n_io_port = 0; mptfc_probe()
1254 sh->irq = 0; mptfc_probe()
1257 sh->max_cmd_len = 16; mptfc_probe()
1259 sh->max_id = ioc->pfacts->MaxDevices; mptfc_probe()
1260 sh->max_lun = max_lun; mptfc_probe()
1264 sh->unique_id = ioc->id; mptfc_probe()
1286 if (numSGE < sh->sg_tablesize) { mptfc_probe()
1290 ioc->name, numSGE, sh->sg_tablesize)); mptfc_probe()
1291 sh->sg_tablesize = numSGE; mptfc_probe()
1296 hd = shost_priv(sh); mptfc_probe()
1314 sh->transportt = mptfc_transport_template; mptfc_probe()
1315 error = scsi_add_host (sh, &ioc->pcidev->dev); mptfc_probe()
1325 "mptfc_wq_%d", sh->host_no); mptfc_probe()
1382 if (ioc->sh == NULL || mptfc_event_process()
1383 ((hd = shost_priv(ioc->sh)) == NULL)) mptfc_event_process()
1512 fc_remove_host(ioc->sh); mptfc_remove()
H A Dmptspi.c1122 struct Scsi_Host *shost = ioc->sh; mpt_work_wrapper()
1153 scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1);
1163 shost_printk(KERN_ERR, ioc->sh, MYIOC_s_FMT mpt_dv_raid()
1179 struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); mptspi_event_process()
1276 shost_for_each_device(sdev, ioc->sh) { mptspi_dv_renegotiate_work()
1287 shost_for_each_device(sdev, ioc->sh) mptspi_dv_renegotiate_work()
1319 * if we get an ioc fault on bringup, ioc->sh will be NULL */ mptspi_ioc_reset()
1321 ioc->sh) { mptspi_ioc_reset()
1322 struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); mptspi_ioc_reset()
1338 struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); mptspi_resume()
1360 struct Scsi_Host *sh; mptspi_probe() local
1412 sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST)); mptspi_probe()
1414 if (!sh) { mptspi_probe()
1425 sh->no_write_same = 1; mptspi_probe()
1431 ioc->sh = sh; mptspi_probe()
1433 sh->io_port = 0; mptspi_probe()
1434 sh->n_io_port = 0; mptspi_probe()
1435 sh->irq = 0; mptspi_probe()
1438 sh->max_cmd_len = 16; mptspi_probe()
1450 sh->max_id = ioc->devices_per_bus; mptspi_probe()
1452 sh->max_lun = MPT_LAST_LUN + 1; mptspi_probe()
1457 sh->max_channel = 1; mptspi_probe()
1459 sh->max_channel = 0; mptspi_probe()
1460 sh->this_id = ioc->pfacts[0].PortSCSIID; mptspi_probe()
1464 sh->unique_id = ioc->id; mptspi_probe()
1486 if (numSGE < sh->sg_tablesize) { mptspi_probe()
1490 ioc->name, numSGE, sh->sg_tablesize)); mptspi_probe()
1491 sh->sg_tablesize = numSGE; mptspi_probe()
1496 hd = shost_priv(sh); mptspi_probe()
1525 sh->transportt = mptspi_transport_template; mptspi_probe()
1527 error = scsi_add_host (sh, &ioc->pcidev->dev); mptspi_probe()
1542 scsi_scan_host(sh); mptspi_probe()
/linux-4.4.14/lib/
H A Ddigsig.c194 struct signature_hdr *sh = (struct signature_hdr *)sig; digsig_verify() local
200 if (siglen < sizeof(*sh) + 2) digsig_verify()
203 if (sh->algo != PUBKEY_ALGO_RSA) digsig_verify()
206 sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid)); digsig_verify()
235 crypto_shash_update(desc, sig, sizeof(*sh)); digsig_verify()
241 err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh), digsig_verify()
/linux-4.4.14/arch/powerpc/include/asm/
H A Dsfp-machine.h209 * #define add_ssaaaa(sh,sl,ah,al,bh,bl) (sh = ah+bh+ (( sl = al+bl) < al))
216 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
220 : "=r" ((USItype)(sh)), \
227 : "=r" ((USItype)(sh)), \
234 : "=r" ((USItype)(sh)), \
243 * #define sub_ddmmss(sh, sl, ah, al, bh, bl) (sh = ah-bh - ((sl = al-bl) > al))
251 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
255 : "=r" ((USItype)(sh)), \
262 : "=r" ((USItype)(sh)), \
269 : "=r" ((USItype)(sh)), \
276 : "=r" ((USItype)(sh)), \
283 : "=r" ((USItype)(sh)), \
/linux-4.4.14/drivers/target/
H A Dtarget_core_pscsi.c111 struct Scsi_Host *sh = phv->phv_lld_host; pscsi_pmode_enable_hba() local
116 if (!sh) pscsi_pmode_enable_hba()
123 " %s\n", hba->hba_id, (sh->hostt->name) ? pscsi_pmode_enable_hba()
124 (sh->hostt->name) : "Unknown"); pscsi_pmode_enable_hba()
126 scsi_host_put(sh); pscsi_pmode_enable_hba()
133 sh = scsi_host_lookup(phv->phv_host_id); pscsi_pmode_enable_hba()
134 if (!sh) { pscsi_pmode_enable_hba()
140 phv->phv_lld_host = sh; pscsi_pmode_enable_hba()
144 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); pscsi_pmode_enable_hba()
366 __releases(sh->host_lock)
370 struct Scsi_Host *sh = sd->host; variable in typeref:struct:Scsi_Host
376 sh->host_no, sd->channel, sd->id, sd->lun);
377 spin_unlock_irq(sh->host_lock);
380 spin_unlock_irq(sh->host_lock);
402 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
410 __releases(sh->host_lock)
413 struct Scsi_Host *sh = sd->host; variable in typeref:struct:Scsi_Host
418 sh->host_no, sd->channel, sd->id, sd->lun);
419 spin_unlock_irq(sh->host_lock);
422 spin_unlock_irq(sh->host_lock);
430 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
441 __releases(sh->host_lock)
444 struct Scsi_Host *sh = sd->host; variable in typeref:struct:Scsi_Host
447 spin_unlock_irq(sh->host_lock);
453 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
464 struct Scsi_Host *sh = phv->phv_lld_host; pscsi_configure_device() local
480 if (!sh) { pscsi_configure_device()
512 sh = phv->phv_lld_host; pscsi_configure_device()
514 sh = scsi_host_lookup(pdv->pdv_host_id); pscsi_configure_device()
515 if (!sh) { pscsi_configure_device()
520 pdv->pdv_lld_host = sh; pscsi_configure_device()
530 spin_lock_irq(sh->host_lock); pscsi_configure_device()
531 list_for_each_entry(sd, &sh->__devices, siblings) { pscsi_configure_device()
555 scsi_host_put(sh); pscsi_configure_device()
565 spin_unlock_irq(sh->host_lock); pscsi_configure_device()
567 pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, pscsi_configure_device()
571 scsi_host_put(sh); pscsi_configure_device()
/linux-4.4.14/include/linux/
H A Dsuperhyway.h6 * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org>
97 /* drivers/sh/superhyway/superhyway.c */
103 /* drivers/sh/superhyway/superhyway-sysfs.c */
/linux-4.4.14/drivers/spi/
H A DMakefile77 obj-$(CONFIG_SPI_SH) += spi-sh.o
78 obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
79 obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
80 obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
/linux-4.4.14/drivers/lguest/
H A DMakefile18 @sh ../../tools/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
/linux-4.4.14/arch/sh/boards/mach-se/7721/
H A Dirq.c2 * linux/arch/sh/boards/se/7721/irq.c
/linux-4.4.14/arch/sh/boards/mach-se/7751/
H A Dirq.c2 * linux/arch/sh/boards/se/7751/irq.c
H A Dsetup.c2 * linux/arch/sh/boards/se/7751/setup.c
/linux-4.4.14/arch/sh/boards/mach-sh7763rdp/
H A Dirq.c2 * linux/arch/sh/boards/renesas/sh7763rdp/irq.c
/linux-4.4.14/arch/sh/kernel/cpu/sh2/
H A Dex.S2 * arch/sh/kernel/cpu/sh2/ex.S
H A Dprobe.c2 * arch/sh/kernel/cpu/sh2/probe.c
H A Dsetup-sh7619.c75 .name = "sh-sci",
96 .name = "sh-sci",
117 .name = "sh-sci",
166 .name = "sh-cmt-16",
/linux-4.4.14/arch/sh/lib64/
H A Dstrlen.S4 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
H A Dudelay.c2 * arch/sh/lib64/udelay.c
/linux-4.4.14/arch/x86/kernel/cpu/
H A DMakefile65 cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
70 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
H A Dmkcapflags.sh1 #!/bin/sh
/linux-4.4.14/arch/sh/boards/mach-sh03/
H A Dsetup.c2 * linux/arch/sh/boards/sh03/setup.c
24 /* arch/sh/boards/sh03/rtc.c */
/linux-4.4.14/arch/sh/drivers/dma/
H A Ddma-pvr2.c2 * arch/sh/drivers/dma/dma-pvr2.c
106 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
/linux-4.4.14/arch/sh/include/mach-se/mach/
H A Dse7724.h5 * linux/include/asm-sh/se7724.h
67 /* arch/sh/boards/se/7724/irq.c */
/linux-4.4.14/arch/sparc/boot/
H A DMakefile74 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/zImage \
H A Dinstall.sh1 #!/bin/sh
/linux-4.4.14/arch/nios2/boot/
H A DMakefile59 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
H A Dinstall.sh1 #!/bin/sh
/linux-4.4.14/arch/avr32/boot/images/
H A DMakefile54 sh $(srctree)/install-kernel.sh $<
/linux-4.4.14/tools/perf/config/
H A Dutilities.mak141 # (It's necessary to use `sh -c' because GNU make messes up by
144 lookup = $(call unescape-nl,$(shell sh -c $(_l-sh)))
145 _l-sh = $(call shell-sq,command -v $(shell-sq) | $(call shell-escape-nl,))
151 # (It's necessary to use `sh -c' because GNU make messes up by
155 _is-executable-helper = $(shell sh -c $(_is-executable-sh))
156 _is-executable-sh = $(call shell-sq,test -f $(1) -a -x $(1) && echo y)
/linux-4.4.14/tools/testing/selftests/x86/
H A DMakefile18 CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
19 CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
/linux-4.4.14/fs/logfs/
H A Dsuper.c143 static void set_segment_header(struct logfs_segment_header *sh, u8 type, set_segment_header() argument
146 sh->pad = 0; set_segment_header()
147 sh->type = type; set_segment_header()
148 sh->level = level; set_segment_header()
149 sh->segno = cpu_to_be32(segno); set_segment_header()
150 sh->ec = cpu_to_be32(ec); set_segment_header()
151 sh->gec = cpu_to_be64(segno); set_segment_header()
152 sh->crc = logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4); set_segment_header()
159 struct logfs_segment_header *sh = &ds->ds_sh; logfs_write_ds() local
163 set_segment_header(sh, SEG_SUPER, 0, segno, ec); logfs_write_ds()
347 struct logfs_segment_header *sh = &ds->ds_sh; logfs_check_ds() local
351 if (sh->crc != logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4)) logfs_check_ds()
/linux-4.4.14/arch/x86/entry/vdso/
H A Dvdso2c.h66 ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) + go()
68 if (GET_LE(&sh->sh_type) == SHT_SYMTAB) go()
69 symtab_hdr = sh; go()
71 if (!strcmp(secstrings + GET_LE(&sh->sh_name), go()
73 alt_sec = sh; go()
/linux-4.4.14/tools/testing/selftests/rcutorture/bin/
H A Dkvm.sh10 # Usage: kvm.sh [ options ]
33 T=/tmp/kvm.sh.$$
51 . functions.sh
193 cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF1`
335 print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &"
365 print "cat " rd cfr[j] "/kvm-test-1-run.sh.out";
366 print "cat " rd cfr[j] "/kvm-test-1-run.sh.out >> " rd "/log";
400 kvm-recheck.sh $resdir/$ds
416 sh $T/script
/linux-4.4.14/arch/powerpc/lib/
H A Dsstep.c648 unsigned int mb, me, sh; analyse_instr() local
897 sh = rb | ((instr & 2) << 4); analyse_instr()
898 val = ROTATE(val, sh); analyse_instr()
907 regs->gpr[ra] = val & MASK64(mb, 63 - sh); analyse_instr()
910 imm = MASK64(mb, 63 - sh); analyse_instr()
916 sh = regs->gpr[rb] & 0x3f; analyse_instr()
917 val = ROTATE(val, sh); analyse_instr()
977 for (sh = 0; sh < 8; ++sh) { analyse_instr()
978 if (instr & (0x80000 >> sh)) analyse_instr()
1221 sh = regs->gpr[rb] & 0x3f; analyse_instr()
1222 if (sh < 32) analyse_instr()
1223 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL; analyse_instr()
1229 sh = regs->gpr[rb] & 0x3f; analyse_instr()
1230 if (sh < 32) analyse_instr()
1231 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh; analyse_instr()
1237 sh = regs->gpr[rb] & 0x3f; analyse_instr()
1239 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); analyse_instr()
1240 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) analyse_instr()
1247 sh = rb; analyse_instr()
1249 regs->gpr[ra] = ival >> sh; analyse_instr()
1250 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) analyse_instr()
1258 sh = regs->gpr[rb] & 0x7f; analyse_instr()
1259 if (sh < 64) analyse_instr()
1260 regs->gpr[ra] = regs->gpr[rd] << sh; analyse_instr()
1266 sh = regs->gpr[rb] & 0x7f; analyse_instr()
1267 if (sh < 64) analyse_instr()
1268 regs->gpr[ra] = regs->gpr[rd] >> sh; analyse_instr()
1274 sh = regs->gpr[rb] & 0x7f; analyse_instr()
1276 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); analyse_instr()
1277 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) analyse_instr()
1285 sh = rb | ((instr & 2) << 4); analyse_instr()
1287 regs->gpr[ra] = ival >> sh; analyse_instr()
1288 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) analyse_instr()
/linux-4.4.14/drivers/target/loopback/
H A Dtcm_loop.c188 static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) tcm_loop_queuecommand() argument
385 struct Scsi_Host *sh; tcm_loop_driver_probe() local
390 sh = scsi_host_alloc(&tcm_loop_driver_template, tcm_loop_driver_probe()
392 if (!sh) { tcm_loop_driver_probe()
396 tl_hba->sh = sh; tcm_loop_driver_probe()
401 *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; tcm_loop_driver_probe()
405 sh->max_id = 2; tcm_loop_driver_probe()
406 sh->max_lun = 0; tcm_loop_driver_probe()
407 sh->max_channel = 0; tcm_loop_driver_probe()
408 sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; tcm_loop_driver_probe()
414 scsi_host_set_prot(sh, host_prot); tcm_loop_driver_probe()
415 scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); tcm_loop_driver_probe()
417 error = scsi_add_host(sh, &tl_hba->dev); tcm_loop_driver_probe()
420 scsi_host_put(sh); tcm_loop_driver_probe()
429 struct Scsi_Host *sh; tcm_loop_driver_remove() local
432 sh = tl_hba->sh; tcm_loop_driver_remove()
434 scsi_remove_host(sh); tcm_loop_driver_remove()
435 scsi_host_put(sh); tcm_loop_driver_remove()
727 scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); tcm_loop_port_link()
744 sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, tcm_loop_port_unlink()
1129 struct Scsi_Host *sh; tcm_loop_make_scsi_hba() local
1174 * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after tcm_loop_make_scsi_hba()
1181 sh = tl_hba->sh; tcm_loop_make_scsi_hba()
1185 tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); tcm_loop_make_scsi_hba()
1202 tl_hba->sh->host_no); tcm_loop_drop_scsi_hba()
H A Dtcm_loop.h51 struct Scsi_Host *sh; member in struct:tcm_loop_hba
/linux-4.4.14/security/
H A Dlsm_audit.c90 struct sctphdr *sh = sctp_hdr(skb); ipv4_skb_to_auditdata() local
91 if (sh == NULL) ipv4_skb_to_auditdata()
93 ad->u.net->sport = sh->source; ipv4_skb_to_auditdata()
94 ad->u.net->dport = sh->dest; ipv4_skb_to_auditdata()
170 struct sctphdr _sctph, *sh; ipv6_skb_to_auditdata() local
172 sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph); ipv6_skb_to_auditdata()
173 if (sh == NULL) ipv6_skb_to_auditdata()
175 ad->u.net->sport = sh->source; ipv6_skb_to_auditdata()
176 ad->u.net->dport = sh->dest; ipv6_skb_to_auditdata()
/linux-4.4.14/arch/sh/boards/mach-sdk7780/
H A Dirq.c2 * linux/arch/sh/boards/renesas/sdk7780/irq.c
/linux-4.4.14/arch/sh/boards/mach-se/7206/
H A Dsetup.c3 * linux/arch/sh/boards/se/7206/setup.c
/linux-4.4.14/arch/sh/include/uapi/asm/
H A Dcpu-features.h8 * arch/sh/kernel/setup.c in sync so symbolic name
/linux-4.4.14/arch/sh/oprofile/
H A Dcommon.c2 * arch/sh/oprofile/init.c
/linux-4.4.14/arch/m68k/
H A Dinstall.sh1 #!/bin/sh
/linux-4.4.14/arch/metag/oprofile/
H A Dcommon.c6 * Based on arch/sh/oprofile/common.c:
/linux-4.4.14/arch/h8300/boot/compressed/
H A DMakefile2 # linux/arch/sh/boot/compressed/Makefile
/linux-4.4.14/arch/ia64/include/asm/
H A Dunistd.h17 * The following defines stop scripts/checksyscalls.sh from complaining about
/linux-4.4.14/arch/avr32/include/asm/
H A Daddrspace.h3 * include/asm-sh/addrspace.h
/linux-4.4.14/tools/build/tests/
H A Drun.sh1 #!/bin/sh
/linux-4.4.14/sound/soc/
H A DMakefile40 obj-$(CONFIG_SND_SOC) += sh/
/linux-4.4.14/tools/perf/arch/
H A Dcommon.c32 "sh-unknown-linux-gnu-",
125 if (!strncmp(arch, "sh", 2) && isdigit(arch[2])) normalize_arch()
126 return "sh"; normalize_arch()
172 else if (!strcmp(arch, "sh")) perf_env__lookup_binutils_path()
/linux-4.4.14/tools/testing/fault-injection/
H A Dfailcmd.sh4 # failcmd.sh - run a command with injecting slab/page allocation failures
7 # failcmd.sh --help
8 # failcmd.sh [<options>] command [arguments]
96 TEMP=`getopt -o p:i:t:s:v:h --long $LONGOPTS -n 'failcmd.sh' -- "$@"`
/linux-4.4.14/tools/testing/selftests/
H A Dgen_kselftest_tar.sh47 ./kselftest_install.sh
/linux-4.4.14/arch/x86/um/vdso/
H A DMakefile75 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'

Completed in 3524 milliseconds

1234