ls 192 arch/arm/include/asm/assembler.h .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo ls 449 arch/arm/include/asm/assembler.h .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo ls 616 arch/arm/kernel/setup.c u32 fs[3], bits[3], ls, mask = 0; ls 635 arch/arm/kernel/setup.c ls = fls(affinity); ls 637 arch/arm/kernel/setup.c bits[i] = ls - fs[i]; ls 114 arch/arm64/kernel/setup.c u32 i, affinity, fs[4], bits[4], ls; ls 134 arch/arm64/kernel/setup.c ls = fls(affinity); ls 136 arch/arm64/kernel/setup.c bits[i] = ls - fs[i]; ls 70 arch/arm64/net/bpf_jit.h #define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \ ls 73 arch/arm64/net/bpf_jit.h AARCH64_INSN_LDST_##ls##_PAIR_##type) ls 1035 arch/ia64/kernel/unwind.c struct unw_labeled_state *ls; ls 1037 arch/ia64/kernel/unwind.c for (ls = sr->labeled_states; ls; ls = ls->next) { ls 1038 arch/ia64/kernel/unwind.c if (ls->label == label) { ls 1040 arch/ia64/kernel/unwind.c memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr)); ls 1041 arch/ia64/kernel/unwind.c sr->curr.next = dup_state_stack(ls->saved_state.next); ls 1051 arch/ia64/kernel/unwind.c struct unw_labeled_state *ls; ls 1053 arch/ia64/kernel/unwind.c ls = alloc_labeled_state(); ls 1054 arch/ia64/kernel/unwind.c if (!ls) { ls 1058 arch/ia64/kernel/unwind.c ls->label = label; ls 1059 arch/ia64/kernel/unwind.c memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state)); ls 1060 arch/ia64/kernel/unwind.c ls->saved_state.next = dup_state_stack(sr->curr.next); ls 1063 arch/ia64/kernel/unwind.c ls->next = sr->labeled_states; ls 1064 arch/ia64/kernel/unwind.c sr->labeled_states = ls; ls 1531 arch/ia64/kernel/unwind.c struct unw_labeled_state *ls, *next; ls 1701 arch/ia64/kernel/unwind.c for (ls = sr.labeled_states; ls; ls = next) { ls 1702 arch/ia64/kernel/unwind.c next = ls->next; ls 1703 arch/ia64/kernel/unwind.c free_state_stack(&ls->saved_state); ls 1704 arch/ia64/kernel/unwind.c free_labeled_state(ls); ls 34 arch/m68k/lib/memset.c long *ls = s; ls 37 arch/m68k/lib/memset.c *ls++ = c; ls 58 arch/m68k/lib/memset.c : "=a" (ls), "=d" (temp), "=&d" (temp1) ls 59 arch/m68k/lib/memset.c : "d" (c), "0" (ls), "1" (temp)); ls 61 arch/m68k/lib/memset.c s = ls; ls 277 arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) && ls 316 arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0)) ls 2241 arch/mips/include/asm/octeon/cvmx-gmxx-defs.h uint64_t ls:2; ls 2249 arch/mips/include/asm/octeon/cvmx-gmxx-defs.h uint64_t ls:2; ls 189 arch/mips/include/asm/octeon/cvmx-pciercx-defs.h __BITFIELD_FIELD(uint32_t ls:4, ls 49 arch/mips/lasat/lasat_board.c unsigned long *ls = lasat_board_info.li_flashpart_size; ls 52 arch/mips/lasat/lasat_board.c ls[LASAT_MTD_BOOTLOADER] = 0x40000; ls 53 arch/mips/lasat/lasat_board.c ls[LASAT_MTD_SERVICE] = 0xC0000; ls 54 arch/mips/lasat/lasat_board.c ls[LASAT_MTD_NORMAL] = 0x100000; ls 62 arch/mips/lasat/lasat_board.c ls[LASAT_MTD_CONFIG] = 0x100000; ls 63 arch/mips/lasat/lasat_board.c ls[LASAT_MTD_FS] = 0x500000; ls 70 arch/mips/lasat/lasat_board.c ls[LASAT_MTD_CONFIG] = 0x100000; ls 72 arch/mips/lasat/lasat_board.c ls[LASAT_MTD_FS] = ls 78 arch/mips/lasat/lasat_board.c lb[i] = lb[i-1] + ls[i-1]; ls 1408 arch/mips/mm/tlbex.c const enum label_id ls = label_tlb_huge_update; ls 1410 arch/mips/mm/tlbex.c const enum label_id ls = label_vmalloc; ls 1416 arch/mips/mm/tlbex.c for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) ls 1430 arch/mips/pci/pcie-octeon.c pr_notice("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls); ls 90 arch/powerpc/include/asm/spu_csa.h unsigned char ls[LS_SIZE] __attribute__((aligned(65536))); ls 259 arch/powerpc/platforms/cell/spufs/backing_ops.c return ctx->csa.lscsa->ls; ls 242 arch/powerpc/platforms/cell/spufs/file.c pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); ls 32 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) ls 46 arch/powerpc/platforms/cell/spufs/lscsa_alloc.c for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE) ls 313 arch/powerpc/platforms/cell/spufs/run.c void __iomem *ls; ls 319 arch/powerpc/platforms/cell/spufs/run.c ls = (void __iomem *)ctx->ops->get_ls(ctx); ls 320 arch/powerpc/platforms/cell/spufs/run.c ls_pointer = in_be32(ls + npc); ls 323 arch/powerpc/platforms/cell/spufs/run.c memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); ls 344 arch/powerpc/platforms/cell/spufs/run.c ls = (void __iomem *)ctx->ops->get_ls(ctx); ls 347 arch/powerpc/platforms/cell/spufs/run.c memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); ls 34 arch/powerpc/platforms/cell/spufs/spu_restore.c unsigned int ls = (unsigned int)®s_spill[0]; ls 39 arch/powerpc/platforms/cell/spufs/spu_restore.c spu_writech(MFC_LSA, ls); ls 49 arch/powerpc/platforms/cell/spufs/spu_restore.c unsigned int ls = 16384; ls 59 arch/powerpc/platforms/cell/spufs/spu_restore.c spu_writech(MFC_LSA, ls); ls 49 arch/powerpc/platforms/cell/spufs/spu_save.c unsigned int ls = 16384; ls 59 arch/powerpc/platforms/cell/spufs/spu_save.c spu_writech(MFC_LSA, ls); ls 106 arch/powerpc/platforms/cell/spufs/spu_save.c unsigned int ls = (unsigned int)®s_spill[0]; ls 115 arch/powerpc/platforms/cell/spufs/spu_save.c spu_writech(MFC_LSA, ls); ls 89 arch/powerpc/platforms/cell/spufs/spu_utils.h ea_low += LSCSA_BYTE_OFFSET(ls[16384]); ls 99 arch/powerpc/platforms/cell/spufs/spu_utils.h unsigned int ls = 0; ls 110 arch/powerpc/platforms/cell/spufs/spu_utils.h spu_writech(MFC_LSA, ls); ls 800 arch/powerpc/platforms/cell/spufs/switch.c unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; ls 1370 arch/powerpc/platforms/cell/spufs/switch.c unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; ls 124 arch/powerpc/platforms/ps3/spu.c unsigned long problem, unsigned long ls, unsigned long shadow, ls 130 arch/powerpc/platforms/ps3/spu.c pr_debug("%s:%d: ls: %lxh\n", func, line, ls); ls 1538 arch/powerpc/xmon/ppc-opc.c unsigned long ls = (insn >> 21) & 0x03; ls 1542 arch/powerpc/xmon/ppc-opc.c if (((dialect & PPC_OPCODE_E6500) != 0 && ls > 1) ls 1543 arch/powerpc/xmon/ppc-opc.c || ((dialect & PPC_OPCODE_POWER9) != 0 && ls > 2)) ls 1548 arch/powerpc/xmon/ppc-opc.c if ((ls & ~0x1) ls 1549 arch/powerpc/xmon/ppc-opc.c || (((value >> 1) & 0x1) ^ ls) == 0) ls 33 drivers/clk/bcm/clk-cygnus.c #define ASIU_DIV_VAL(o, es, hs, hw, ls, lw) \ ls 35 drivers/clk/bcm/clk-cygnus.c .high_width = hw, .low_shift = ls, .low_width = lw } ls 237 drivers/leds/leds-pca955x.c u8 ls; ls 250 drivers/leds/leds-pca955x.c ret = pca955x_read_ls(pca955x->client, chip_ls, &ls); ls 256 drivers/leds/leds-pca955x.c ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON); ls 259 drivers/leds/leds-pca955x.c ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_OFF); ls 262 drivers/leds/leds-pca955x.c ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK0); ls 275 drivers/leds/leds-pca955x.c ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1); ls 279 drivers/leds/leds-pca955x.c ret = pca955x_write_ls(pca955x->client, chip_ls, ls); ls 120 drivers/md/dm-thin.c static void build_key(struct dm_thin_device *td, enum lock_space ls, ls 123 drivers/md/dm-thin.c key->virtual = (ls == VIRTUAL); ls 20 drivers/md/md-cluster.c dlm_lockspace_t *ls; ls 128 drivers/md/md-cluster.c ret = dlm_lock(res->ls, mode, &res->lksb, ls 154 drivers/md/md-cluster.c ret = dlm_lock(res->ls, mode, &res->lksb, ls 169 drivers/md/md-cluster.c ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_CANCEL, ls 195 drivers/md/md-cluster.c res->ls = cinfo->lockspace; ls 246 drivers/md/md-cluster.c ret = dlm_unlock(res->ls, res->lksb.sb_lkid, DLM_LKF_FORCEUNLOCK, ls 596 drivers/net/ethernet/cavium/liquidio/lio_main.c union oct_link_status *ls) ls 599 drivers/net/ethernet/cavium/liquidio/lio_main.c int changed = (lio->linfo.link.u64 != ls->u64); ls 604 drivers/net/ethernet/cavium/liquidio/lio_main.c __func__, lio->linfo.link.u64, ls->u64); ls 605 drivers/net/ethernet/cavium/liquidio/lio_main.c lio->linfo.link.u64 = ls->u64; ls 3310 drivers/net/ethernet/cavium/liquidio/lio_main.c union oct_link_status *ls; ls 3313 drivers/net/ethernet/cavium/liquidio/lio_main.c if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { ls 3321 drivers/net/ethernet/cavium/liquidio/lio_main.c ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + ls 3324 drivers/net/ethernet/cavium/liquidio/lio_main.c octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); ls 3327 drivers/net/ethernet/cavium/liquidio/lio_main.c update_link_status(oct->props[i].netdev, ls); ls 340 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c union oct_link_status *ls) ls 346 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { ls 347 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c lio->linfo.link.u64 = ls->u64; ls 1885 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c union oct_link_status *ls; ls 1889 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { ls 1897 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + ls 1900 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); ls 1904 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c update_link_status(oct->props[i].netdev, ls); ls 53 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c struct i40e_link_status *ls = &pf->hw.phy.link_info; ls 69 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ls->link_info & I40E_AQ_LINK_UP; ls 71 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c i40e_virtchnl_link_speed(ls->link_speed); ls 3566 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c struct i40e_link_status *ls = &pf->hw.phy.link_info; ls 3631 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c switch (ls->link_speed) { ls 122 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c struct ice_link_status *ls; ls 127 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ls = &hw->port_info->phy.link_info; ls 138 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info & ls 3229 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c struct ice_link_status *ls; ls 3240 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ls = &pf->hw.port_info->phy.link_info; ls 3253 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c vf->link_up = ls->link_info & ICE_AQ_LINK_UP; ls 3270 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up); ls 846 drivers/net/ethernet/neterion/vxge/vxge-config.h #define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls) ls 265 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c u32 sts, ls; ls 298 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts); ls 299 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED) ls 302 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN || ls 303 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c ls >= ARRAY_SIZE(ls_to_ethtool)) ls 306 drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c cmd->base.speed = ls_to_ethtool[ls]; ls 743 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_lif_stats *ls; ls 746 drivers/net/ethernet/pensando/ionic/ionic_lif.c ls = &lif->info->stats; ls 748 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + ls 749 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_mcast_packets) + ls 750 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_bcast_packets); ls 752 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + ls 753 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_mcast_packets) + ls 754 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_bcast_packets); ls 756 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + ls 757 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_mcast_bytes) + ls 758 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_bcast_bytes); ls 760 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + ls 761 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_mcast_bytes) + ls 762 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_bcast_bytes); ls 764 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + ls 765 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_mcast_drop_packets) + ls 766 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_bcast_drop_packets); ls 768 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + ls 769 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_mcast_drop_packets) + ls 770 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_bcast_drop_packets); ls 772 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->multicast = le64_to_cpu(ls->rx_mcast_packets); ls 774 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); ls 776 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + ls 777 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_queue_disabled) + ls 778 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_desc_fetch_error) + ls 779 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->rx_desc_data_error); ls 781 drivers/net/ethernet/pensando/ionic/ionic_lif.c ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + ls 782 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_queue_disabled) + ls 783 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_desc_fetch_error) + ls 784 drivers/net/ethernet/pensando/ionic/ionic_lif.c le64_to_cpu(ls->tx_desc_data_error); ls 351 drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h void (*set_eee_timer)(void __iomem *ioaddr, const int ls, ls 224 drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c const int ls, const int tw) ls 226 drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); ls 394 drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) ls 397 drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); ls 382 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) ls 385 drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16); ls 311 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c bool ls, unsigned int tot_pkt_len) ls 328 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c if (ls) ls 349 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c bool ls, unsigned int tcphdrlen, ls 371 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c if (ls) ls 406 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw) ls 411 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c value = (tw & 0xffff) | ((ls & 0x3ff) << 16); ls 145 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c bool ls, unsigned int tot_pkt_len) ls 162 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c if (ls) ls 183 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c bool ls, unsigned int tcphdrlen, ls 202 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c if (ls) ls 318 drivers/net/ethernet/stmicro/stmmac/enh_desc.c bool ls, unsigned int tot_pkt_len) ls 337 drivers/net/ethernet/stmicro/stmmac/enh_desc.c if (ls) ls 42 drivers/net/ethernet/stmicro/stmmac/hwif.h bool csum_flag, int mode, bool tx_own, bool ls, ls 45 drivers/net/ethernet/stmicro/stmmac/hwif.h int len2, bool tx_own, bool ls, unsigned int tcphdrlen, ls 330 drivers/net/ethernet/stmicro/stmmac/hwif.h void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); ls 186 drivers/net/ethernet/stmicro/stmmac/norm_desc.c bool ls, unsigned int tot_pkt_len) ls 200 drivers/net/ethernet/stmicro/stmmac/norm_desc.c if (ls) ls 2122 drivers/net/fddi/defxx.c bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; ls 2123 drivers/net/fddi/defxx.c bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; ls 2201 drivers/net/fddi/defxx.c bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; ls 2202 drivers/net/fddi/defxx.c bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; ls 2203 drivers/net/fddi/defxx.c bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; ls 2204 drivers/net/fddi/defxx.c bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; ls 2205 drivers/net/fddi/defxx.c bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; ls 2206 drivers/net/fddi/defxx.c bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; ls 2207 drivers/net/fddi/defxx.c bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; ls 2208 drivers/net/fddi/defxx.c bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; ls 2209 drivers/net/fddi/defxx.c bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; ls 2210 drivers/net/fddi/defxx.c bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; ls 2211 drivers/net/fddi/defxx.c bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; ls 46 drivers/net/fddi/defxx.h PI_UINT32 ls; ls 202 drivers/net/fddi/skfp/pcmplc.c static void sm_ph_linestate(struct s_smc *smc, int phy, int ls); ls 1030 drivers/net/fddi/skfp/pcmplc.c static void sm_ph_linestate(struct s_smc *smc, int phy, int ls) ls 1038 drivers/net/fddi/skfp/pcmplc.c switch(ls) { ls 1908 drivers/net/fddi/skfp/pcmplc.c char *ls = "" ; ls 1913 drivers/net/fddi/skfp/pcmplc.c case PL_L_NLS : ls = "NOISE" ; break ; ls 1914 drivers/net/fddi/skfp/pcmplc.c case PL_L_ALS : ls = "ACTIV" ; break ; ls 1915 drivers/net/fddi/skfp/pcmplc.c case PL_L_UND : ls = "UNDEF" ; break ; ls 1916 drivers/net/fddi/skfp/pcmplc.c case PL_L_ILS4: ls = "ILS 4" ; break ; ls 1917 drivers/net/fddi/skfp/pcmplc.c case PL_L_QLS : ls = "QLS" ; break ; ls 1918 drivers/net/fddi/skfp/pcmplc.c case PL_L_MLS : ls = "MLS" ; break ; ls 1919 drivers/net/fddi/skfp/pcmplc.c case PL_L_HLS : ls = "HLS" ; break ; ls 1920 drivers/net/fddi/skfp/pcmplc.c case PL_L_ILS16:ls = "ILS16" ; break ; ls 1922 drivers/net/fddi/skfp/pcmplc.c default: ls = "unknown" ; break ; ls 1925 drivers/net/fddi/skfp/pcmplc.c return ls; ls 705 drivers/net/fddi/skfp/skfddi.c bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; ls 706 drivers/net/fddi/skfp/skfddi.c bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; ls 779 drivers/net/fddi/skfp/skfddi.c bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; ls 780 drivers/net/fddi/skfp/skfddi.c bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; ls 781 drivers/net/fddi/skfp/skfddi.c bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; ls 782 drivers/net/fddi/skfp/skfddi.c bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; ls 783 drivers/net/fddi/skfp/skfddi.c bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; ls 784 drivers/net/fddi/skfp/skfddi.c bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; ls 785 drivers/net/fddi/skfp/skfddi.c bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; ls 786 drivers/net/fddi/skfp/skfddi.c bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; ls 787 drivers/net/fddi/skfp/skfddi.c bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; ls 788 drivers/net/fddi/skfp/skfddi.c bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; ls 789 drivers/net/fddi/skfp/skfddi.c bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; ls 114 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h u32 ls:1; ls 172 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h u32 ls:1; ls 252 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h u32 ls:1; ls 308 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h u32 ls:1; ls 479 drivers/net/wireless/realtek/rtw88/main.h bool ls; ls 57 drivers/net/wireless/realtek/rtw88/tx.c SET_TX_DESC_LS(txdesc, pkt_info->ls); ls 343 drivers/net/wireless/realtek/rtw88/tx.c pkt_info->ls = true; ls 366 drivers/net/wireless/realtek/rtw88/tx.c pkt_info->ls = true; ls 2166 drivers/pinctrl/tegra/pinctrl-tegra20.c PULL_PG(ls, 0xac, 20), ls 3896 drivers/scsi/lpfc/lpfc_hw.h uint32_t ls:1; ls 3908 drivers/scsi/lpfc/lpfc_hw.h uint32_t ls:1; ls 4324 drivers/scsi/qla2xxx/qla_gs.c int ls; ls 4326 drivers/scsi/qla2xxx/qla_gs.c ls = atomic_read(&vha->loop_state); ls 4327 drivers/scsi/qla2xxx/qla_gs.c if (((ls != LOOP_READY) && (ls != LOOP_UP)) || ls 4453 drivers/scsi/qla2xxx/qla_gs.c int ls; ls 4455 drivers/scsi/qla2xxx/qla_gs.c ls = atomic_read(&vha->loop_state); ls 4456 drivers/scsi/qla2xxx/qla_gs.c if (((ls != LOOP_READY) && (ls != LOOP_UP)) || ls 1410 drivers/scsi/qla2xxx/qla_init.c uint8_t ls; ls 1425 drivers/scsi/qla2xxx/qla_init.c ls = pd->current_login_state >> 4; ls 1427 drivers/scsi/qla2xxx/qla_init.c ls = pd->current_login_state & 0xf; ls 1442 drivers/scsi/qla2xxx/qla_init.c switch (ls) { ls 1322 drivers/target/iscsi/iscsi_target_util.c struct iscsi_login_stats *ls; ls 1328 drivers/target/iscsi/iscsi_target_util.c ls = &tiqn->login_stats; ls 1330 drivers/target/iscsi/iscsi_target_util.c spin_lock(&ls->lock); ls 1332 drivers/target/iscsi/iscsi_target_util.c ls->accepts++; ls 1334 drivers/target/iscsi/iscsi_target_util.c ls->redirects++; ls 1335 drivers/target/iscsi/iscsi_target_util.c ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT; ls 1338 drivers/target/iscsi/iscsi_target_util.c ls->authenticate_fails++; ls 1339 drivers/target/iscsi/iscsi_target_util.c ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE; ls 1342 drivers/target/iscsi/iscsi_target_util.c ls->authorize_fails++; ls 1343 drivers/target/iscsi/iscsi_target_util.c ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE; ls 1346 drivers/target/iscsi/iscsi_target_util.c ls->negotiate_fails++; ls 1347 drivers/target/iscsi/iscsi_target_util.c ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE; ls 1349 drivers/target/iscsi/iscsi_target_util.c ls->other_fails++; ls 1350 drivers/target/iscsi/iscsi_target_util.c ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER; ls 1358 drivers/target/iscsi/iscsi_target_util.c strlcpy(ls->last_intr_fail_name, ls 1360 drivers/target/iscsi/iscsi_target_util.c sizeof(ls->last_intr_fail_name)); ls 1362 drivers/target/iscsi/iscsi_target_util.c ls->last_intr_fail_ip_family = conn->login_family; ls 1364 drivers/target/iscsi/iscsi_target_util.c ls->last_intr_fail_sockaddr = conn->login_sockaddr; ls 1365 drivers/target/iscsi/iscsi_target_util.c ls->last_fail_time = get_jiffies_64(); ls 1368 drivers/target/iscsi/iscsi_target_util.c spin_unlock(&ls->lock); ls 304 fs/ceph/inode.c struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; ls 306 fs/ceph/inode.c return ceph_frag_compare(le32_to_cpu(ls->frag), ls 54 fs/dlm/ast.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 79 fs/dlm/ast.c log_debug(ls, "skip %x add bast %llu mode %d " ls 101 fs/dlm/ast.c log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x", ls 112 fs/dlm/ast.c int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 147 fs/dlm/ast.c log_debug(ls, "skip %x bast %llu mode %d " ls 176 fs/dlm/ast.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 201 fs/dlm/ast.c if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { ls 202 fs/dlm/ast.c mutex_lock(&ls->ls_cb_mutex); ls 203 fs/dlm/ast.c list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); ls 204 fs/dlm/ast.c mutex_unlock(&ls->ls_cb_mutex); ls 206 fs/dlm/ast.c queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); ls 216 fs/dlm/ast.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 227 fs/dlm/ast.c log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id); ls 233 fs/dlm/ast.c rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid); ls 240 fs/dlm/ast.c log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id, ls 268 fs/dlm/ast.c int dlm_callback_start(struct dlm_ls *ls) ls 270 fs/dlm/ast.c ls->ls_callback_wq = alloc_workqueue("dlm_callback", ls 272 fs/dlm/ast.c if (!ls->ls_callback_wq) { ls 279 fs/dlm/ast.c void dlm_callback_stop(struct dlm_ls *ls) ls 281 fs/dlm/ast.c if (ls->ls_callback_wq) ls 282 fs/dlm/ast.c destroy_workqueue(ls->ls_callback_wq); ls 285 fs/dlm/ast.c void dlm_callback_suspend(struct dlm_ls *ls) ls 287 fs/dlm/ast.c set_bit(LSFL_CB_DELAY, &ls->ls_flags); ls 289 fs/dlm/ast.c if (ls->ls_callback_wq) ls 290 fs/dlm/ast.c flush_workqueue(ls->ls_callback_wq); ls 295 fs/dlm/ast.c void dlm_callback_resume(struct dlm_ls *ls) ls 300 fs/dlm/ast.c clear_bit(LSFL_CB_DELAY, &ls->ls_flags); ls 302 fs/dlm/ast.c if (!ls->ls_callback_wq) ls 306 fs/dlm/ast.c mutex_lock(&ls->ls_cb_mutex); ls 307 fs/dlm/ast.c list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { ls 309 fs/dlm/ast.c queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); ls 314 fs/dlm/ast.c mutex_unlock(&ls->ls_cb_mutex); ls 317 fs/dlm/ast.c log_rinfo(ls, "dlm_callback_resume %d", count); ls 17 fs/dlm/ast.h int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 23 fs/dlm/ast.h int dlm_callback_start(struct dlm_ls *ls); ls 24 fs/dlm/ast.h void dlm_callback_stop(struct dlm_ls *ls); ls 25 fs/dlm/ast.h void dlm_callback_suspend(struct dlm_ls *ls); ls 26 fs/dlm/ast.h void dlm_callback_resume(struct dlm_ls *ls); ls 423 fs/dlm/debug_fs.c struct dlm_ls *ls = seq->private; ls 433 fs/dlm/debug_fs.c if (bucket >= ls->ls_rsbtbl_size) ls 450 fs/dlm/debug_fs.c tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; ls 452 fs/dlm/debug_fs.c spin_lock(&ls->ls_rsbtbl[bucket].lock); ls 460 fs/dlm/debug_fs.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 465 fs/dlm/debug_fs.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 478 fs/dlm/debug_fs.c if (bucket >= ls->ls_rsbtbl_size) { ls 482 fs/dlm/debug_fs.c tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; ls 484 fs/dlm/debug_fs.c spin_lock(&ls->ls_rsbtbl[bucket].lock); ls 491 fs/dlm/debug_fs.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 495 fs/dlm/debug_fs.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 501 fs/dlm/debug_fs.c struct dlm_ls *ls = seq->private; ls 516 fs/dlm/debug_fs.c spin_lock(&ls->ls_rsbtbl[bucket].lock); ls 524 fs/dlm/debug_fs.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 529 fs/dlm/debug_fs.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 543 fs/dlm/debug_fs.c if (bucket >= ls->ls_rsbtbl_size) { ls 547 fs/dlm/debug_fs.c tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; ls 549 fs/dlm/debug_fs.c spin_lock(&ls->ls_rsbtbl[bucket].lock); ls 556 fs/dlm/debug_fs.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 560 fs/dlm/debug_fs.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 701 fs/dlm/debug_fs.c struct dlm_ls *ls = file->private_data; ls 706 fs/dlm/debug_fs.c mutex_lock(&ls->ls_waiters_mutex); ls 709 fs/dlm/debug_fs.c list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { ls 717 fs/dlm/debug_fs.c mutex_unlock(&ls->ls_waiters_mutex); ls 731 fs/dlm/debug_fs.c void dlm_delete_debug_file(struct dlm_ls *ls) ls 733 fs/dlm/debug_fs.c debugfs_remove(ls->ls_debug_rsb_dentry); ls 734 fs/dlm/debug_fs.c debugfs_remove(ls->ls_debug_waiters_dentry); ls 735 fs/dlm/debug_fs.c debugfs_remove(ls->ls_debug_locks_dentry); ls 736 fs/dlm/debug_fs.c debugfs_remove(ls->ls_debug_all_dentry); ls 737 fs/dlm/debug_fs.c debugfs_remove(ls->ls_debug_toss_dentry); ls 740 fs/dlm/debug_fs.c void dlm_create_debug_file(struct dlm_ls *ls) ls 746 fs/dlm/debug_fs.c ls->ls_debug_rsb_dentry = debugfs_create_file(ls->ls_name, ls 749 fs/dlm/debug_fs.c ls, ls 755 fs/dlm/debug_fs.c snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_locks", ls->ls_name); ls 757 fs/dlm/debug_fs.c ls->ls_debug_locks_dentry = debugfs_create_file(name, ls 760 fs/dlm/debug_fs.c ls, ls 766 fs/dlm/debug_fs.c snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_all", ls->ls_name); ls 768 fs/dlm/debug_fs.c ls->ls_debug_all_dentry = debugfs_create_file(name, ls 771 fs/dlm/debug_fs.c ls, ls 777 fs/dlm/debug_fs.c snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_toss", ls->ls_name); ls 779 fs/dlm/debug_fs.c ls->ls_debug_toss_dentry = debugfs_create_file(name, ls 782 fs/dlm/debug_fs.c ls, ls 786 fs/dlm/debug_fs.c snprintf(name, DLM_LOCKSPACE_LEN + 8, "%s_waiters", ls->ls_name); ls 788 fs/dlm/debug_fs.c ls->ls_debug_waiters_dentry = debugfs_create_file(name, ls 791 fs/dlm/debug_fs.c ls, ls 33 fs/dlm/dir.c int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash) ls 37 fs/dlm/dir.c if (ls->ls_num_nodes == 1) ls 40 fs/dlm/dir.c node = (hash >> 16) % ls->ls_total_weight; ls 41 fs/dlm/dir.c return ls->ls_node_array[node]; ls 50 fs/dlm/dir.c void dlm_recover_dir_nodeid(struct dlm_ls *ls) ls 54 fs/dlm/dir.c down_read(&ls->ls_root_sem); ls 55 fs/dlm/dir.c list_for_each_entry(r, &ls->ls_root_list, res_root_list) { ls 56 fs/dlm/dir.c r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash); ls 58 fs/dlm/dir.c up_read(&ls->ls_root_sem); ls 61 fs/dlm/dir.c int dlm_recover_directory(struct dlm_ls *ls) ls 69 fs/dlm/dir.c log_rinfo(ls, "dlm_recover_directory"); ls 71 fs/dlm/dir.c if (dlm_no_directory(ls)) ls 78 fs/dlm/dir.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 87 fs/dlm/dir.c error = dlm_recovery_stopped(ls); ls 91 fs/dlm/dir.c error = dlm_rcom_names(ls, memb->nodeid, ls 102 fs/dlm/dir.c b = ls->ls_recover_buf->rc_buf; ls 103 fs/dlm/dir.c left = ls->ls_recover_buf->rc_header.h_length; ls 133 fs/dlm/dir.c error = dlm_master_lookup(ls, memb->nodeid, ls 138 fs/dlm/dir.c log_error(ls, "recover_dir lookup %d", ls 151 fs/dlm/dir.c log_error(ls, "recover_dir lookup %d " ls 188 fs/dlm/dir.c dlm_set_recover_status(ls, DLM_RS_DIR); ls 190 fs/dlm/dir.c log_rinfo(ls, "dlm_recover_directory %u in %u new", ls 198 fs/dlm/dir.c static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len) ls 205 fs/dlm/dir.c bucket = hash & (ls->ls_rsbtbl_size - 1); ls 207 fs/dlm/dir.c spin_lock(&ls->ls_rsbtbl[bucket].lock); ls 208 fs/dlm/dir.c rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); ls 210 fs/dlm/dir.c rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, ls 212 fs/dlm/dir.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 217 fs/dlm/dir.c down_read(&ls->ls_root_sem); ls 218 fs/dlm/dir.c list_for_each_entry(r, &ls->ls_root_list, res_root_list) { ls 220 fs/dlm/dir.c up_read(&ls->ls_root_sem); ls 221 fs/dlm/dir.c log_debug(ls, "find_rsb_root revert to root_list %s", ls 226 fs/dlm/dir.c up_read(&ls->ls_root_sem); ls 234 fs/dlm/dir.c void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, ls 242 fs/dlm/dir.c down_read(&ls->ls_root_sem); ls 245 fs/dlm/dir.c r = find_rsb_root(ls, inbuf, inlen); ls 248 fs/dlm/dir.c log_error(ls, "copy_master_names from %d start %d %s", ls 254 fs/dlm/dir.c list = ls->ls_root_list.next; ls 257 fs/dlm/dir.c for (offset = 0; list != &ls->ls_root_list; list = list->next) { ls 279 fs/dlm/dir.c ls->ls_recover_dir_sent_msg++; ls 288 fs/dlm/dir.c ls->ls_recover_dir_sent_res++; ls 296 fs/dlm/dir.c if ((list == &ls->ls_root_list) && ls 301 fs/dlm/dir.c ls->ls_recover_dir_sent_msg++; ls 304 fs/dlm/dir.c up_read(&ls->ls_root_sem); ls 16 fs/dlm/dir.h int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash); ls 17 fs/dlm/dir.h void dlm_recover_dir_nodeid(struct dlm_ls *ls); ls 18 fs/dlm/dir.h int dlm_recover_directory(struct dlm_ls *ls); ls 19 fs/dlm/dir.h void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, ls 63 fs/dlm/dlm_internal.h #define log_error(ls, fmt, args...) \ ls 64 fs/dlm/dlm_internal.h printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args) ls 66 fs/dlm/dlm_internal.h #define log_rinfo(ls, fmt, args...) \ ls 70 fs/dlm/dlm_internal.h (ls)->ls_name, ##args); \ ls 73 fs/dlm/dlm_internal.h (ls)->ls_name , ##args); \ ls 76 fs/dlm/dlm_internal.h #define log_debug(ls, fmt, args...) \ ls 80 fs/dlm/dlm_internal.h (ls)->ls_name , ##args); \ ls 83 fs/dlm/dlm_internal.h #define log_limit(ls, fmt, args...) \ ls 87 fs/dlm/dlm_internal.h (ls)->ls_name , ##args); \ ls 700 fs/dlm/dlm_internal.h static inline int dlm_locking_stopped(struct dlm_ls *ls) ls 702 fs/dlm/dlm_internal.h return !test_bit(LSFL_RUNNING, &ls->ls_flags); ls 705 fs/dlm/dlm_internal.h static inline int dlm_recovery_stopped(struct dlm_ls *ls) ls 707 fs/dlm/dlm_internal.h return test_bit(LSFL_RECOVER_STOP, &ls->ls_flags); ls 710 fs/dlm/dlm_internal.h static inline int dlm_no_directory(struct dlm_ls *ls) ls 712 fs/dlm/dlm_internal.h return test_bit(LSFL_NODIR, &ls->ls_flags); ls 724 fs/dlm/dlm_internal.h void dlm_create_debug_file(struct dlm_ls *ls); ls 725 fs/dlm/dlm_internal.h void dlm_delete_debug_file(struct dlm_ls *ls); ls 729 fs/dlm/dlm_internal.h static inline void dlm_create_debug_file(struct dlm_ls *ls) { } ls 730 fs/dlm/dlm_internal.h static inline void dlm_delete_debug_file(struct dlm_ls *ls) { } ls 89 fs/dlm/lock.c static void do_purge(struct dlm_ls *ls, int nodeid, int pid); ls 203 fs/dlm/lock.c static inline void dlm_lock_recovery(struct dlm_ls *ls) ls 205 fs/dlm/lock.c down_read(&ls->ls_in_recovery); ls 208 fs/dlm/lock.c void dlm_unlock_recovery(struct dlm_ls *ls) ls 210 fs/dlm/lock.c up_read(&ls->ls_in_recovery); ls 213 fs/dlm/lock.c int dlm_lock_recovery_try(struct dlm_ls *ls) ls 215 fs/dlm/lock.c return down_read_trylock(&ls->ls_in_recovery); ls 349 fs/dlm/lock.c struct dlm_ls *ls = r->res_ls; ls 352 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[bucket].lock); ls 354 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 362 fs/dlm/lock.c static int pre_rsb_struct(struct dlm_ls *ls) ls 367 fs/dlm/lock.c spin_lock(&ls->ls_new_rsb_spin); ls 368 fs/dlm/lock.c if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { ls 369 fs/dlm/lock.c spin_unlock(&ls->ls_new_rsb_spin); ls 372 fs/dlm/lock.c spin_unlock(&ls->ls_new_rsb_spin); ls 374 fs/dlm/lock.c r1 = dlm_allocate_rsb(ls); ls 375 fs/dlm/lock.c r2 = dlm_allocate_rsb(ls); ls 377 fs/dlm/lock.c spin_lock(&ls->ls_new_rsb_spin); ls 379 fs/dlm/lock.c list_add(&r1->res_hashchain, &ls->ls_new_rsb); ls 380 fs/dlm/lock.c ls->ls_new_rsb_count++; ls 383 fs/dlm/lock.c list_add(&r2->res_hashchain, &ls->ls_new_rsb); ls 384 fs/dlm/lock.c ls->ls_new_rsb_count++; ls 386 fs/dlm/lock.c count = ls->ls_new_rsb_count; ls 387 fs/dlm/lock.c spin_unlock(&ls->ls_new_rsb_spin); ls 398 fs/dlm/lock.c static int get_rsb_struct(struct dlm_ls *ls, char *name, int len, ls 404 fs/dlm/lock.c spin_lock(&ls->ls_new_rsb_spin); ls 405 fs/dlm/lock.c if (list_empty(&ls->ls_new_rsb)) { ls 406 fs/dlm/lock.c count = ls->ls_new_rsb_count; ls 407 fs/dlm/lock.c spin_unlock(&ls->ls_new_rsb_spin); ls 408 fs/dlm/lock.c log_debug(ls, "find_rsb retry %d %d %s", ls 413 fs/dlm/lock.c r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); ls 417 fs/dlm/lock.c ls->ls_new_rsb_count--; ls 418 fs/dlm/lock.c spin_unlock(&ls->ls_new_rsb_spin); ls 420 fs/dlm/lock.c r->res_ls = ls; ls 543 fs/dlm/lock.c static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, ls 587 fs/dlm/lock.c error = pre_rsb_struct(ls); ls 592 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[b].lock); ls 594 fs/dlm/lock.c error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); ls 608 fs/dlm/lock.c error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); ls 622 fs/dlm/lock.c log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s", ls 631 fs/dlm/lock.c log_error(ls, "find_rsb toss from_dir %d master %d", ls 648 fs/dlm/lock.c rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); ls 649 fs/dlm/lock.c error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); ls 661 fs/dlm/lock.c error = get_rsb_struct(ls, name, len, &r); ls 663 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 676 fs/dlm/lock.c log_debug(ls, "find_rsb new from_dir %d recreate %s", ls 685 fs/dlm/lock.c log_error(ls, "find_rsb new from_other %d dir %d our %d %s", ls 694 fs/dlm/lock.c log_debug(ls, "find_rsb new from_other %d dir %d %s", ls 710 fs/dlm/lock.c error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); ls 712 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 722 fs/dlm/lock.c static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len, ls 733 fs/dlm/lock.c error = pre_rsb_struct(ls); ls 737 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[b].lock); ls 739 fs/dlm/lock.c error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); ls 752 fs/dlm/lock.c error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); ls 765 fs/dlm/lock.c log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d", ls 776 fs/dlm/lock.c log_error(ls, "find_rsb toss our %d master %d dir %d", ls 783 fs/dlm/lock.c rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); ls 784 fs/dlm/lock.c error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); ls 793 fs/dlm/lock.c error = get_rsb_struct(ls, name, len, &r); ls 795 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 808 fs/dlm/lock.c error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); ls 810 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 816 fs/dlm/lock.c static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid, ls 826 fs/dlm/lock.c b = hash & (ls->ls_rsbtbl_size - 1); ls 828 fs/dlm/lock.c dir_nodeid = dlm_hash2nodeid(ls, hash); ls 830 fs/dlm/lock.c if (dlm_no_directory(ls)) ls 831 fs/dlm/lock.c return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid, ls 834 fs/dlm/lock.c return find_rsb_dir(ls, name, len, hash, b, dir_nodeid, ls 841 fs/dlm/lock.c static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r, ls 844 fs/dlm/lock.c if (dlm_no_directory(ls)) { ls 845 fs/dlm/lock.c log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d", ls 858 fs/dlm/lock.c log_debug(ls, "validate master from_other %d master %d " ls 869 fs/dlm/lock.c log_error(ls, "validate master from_dir %d master %d " ls 910 fs/dlm/lock.c int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len, ls 924 fs/dlm/lock.c log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x", ls 930 fs/dlm/lock.c b = hash & (ls->ls_rsbtbl_size - 1); ls 932 fs/dlm/lock.c dir_nodeid = dlm_hash2nodeid(ls, hash); ls 934 fs/dlm/lock.c log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d", ls 936 fs/dlm/lock.c ls->ls_num_nodes); ls 942 fs/dlm/lock.c error = pre_rsb_struct(ls); ls 946 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[b].lock); ls 947 fs/dlm/lock.c error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); ls 953 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 958 fs/dlm/lock.c error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); ls 969 fs/dlm/lock.c log_error(ls, "dlm_master_lookup res_dir %d our %d %s", ls 974 fs/dlm/lock.c if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) { ls 986 fs/dlm/lock.c log_error(ls, "dlm_master_lookup fix_master on toss"); ls 996 fs/dlm/lock.c log_limit(ls, "dlm_master_lookup from_master %d " ls 1002 fs/dlm/lock.c log_error(ls, "from_master %d our_master", from_nodeid); ls 1016 fs/dlm/lock.c log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s", ls 1028 fs/dlm/lock.c log_limit(ls, "dlm_master_lookup from master %d flags %x " ls 1041 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1050 fs/dlm/lock.c error = get_rsb_struct(ls, name, len, &r); ls 1052 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1066 fs/dlm/lock.c error = rsb_insert(r, &ls->ls_rsbtbl[b].toss); ls 1070 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1079 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1083 fs/dlm/lock.c static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) ls 1089 fs/dlm/lock.c for (i = 0; i < ls->ls_rsbtbl_size; i++) { ls 1090 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[i].lock); ls 1091 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { ls 1096 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[i].lock); ls 1100 fs/dlm/lock.c void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len) ls 1107 fs/dlm/lock.c b = hash & (ls->ls_rsbtbl_size - 1); ls 1109 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[b].lock); ls 1110 fs/dlm/lock.c error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); ls 1114 fs/dlm/lock.c error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); ls 1120 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1126 fs/dlm/lock.c struct dlm_ls *ls = r->res_ls; ls 1130 fs/dlm/lock.c rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep); ls 1131 fs/dlm/lock.c rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss); ls 1133 fs/dlm/lock.c ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK; ls 1181 fs/dlm/lock.c static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) ls 1186 fs/dlm/lock.c lkb = dlm_allocate_lkb(ls); ls 1201 fs/dlm/lock.c spin_lock(&ls->ls_lkbidr_spin); ls 1202 fs/dlm/lock.c rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT); ls 1205 fs/dlm/lock.c spin_unlock(&ls->ls_lkbidr_spin); ls 1209 fs/dlm/lock.c log_error(ls, "create_lkb idr error %d", rv); ls 1218 fs/dlm/lock.c static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) ls 1222 fs/dlm/lock.c spin_lock(&ls->ls_lkbidr_spin); ls 1223 fs/dlm/lock.c lkb = idr_find(&ls->ls_lkbidr, lkid); ls 1226 fs/dlm/lock.c spin_unlock(&ls->ls_lkbidr_spin); ls 1245 fs/dlm/lock.c static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) ls 1249 fs/dlm/lock.c spin_lock(&ls->ls_lkbidr_spin); ls 1251 fs/dlm/lock.c idr_remove(&ls->ls_lkbidr, lkid); ls 1252 fs/dlm/lock.c spin_unlock(&ls->ls_lkbidr_spin); ls 1262 fs/dlm/lock.c spin_unlock(&ls->ls_lkbidr_spin); ls 1269 fs/dlm/lock.c struct dlm_ls *ls; ls 1274 fs/dlm/lock.c ls = lkb->lkb_resource->res_ls; ls 1275 fs/dlm/lock.c return __put_lkb(ls, lkb); ls 1393 fs/dlm/lock.c void dlm_scan_waiters(struct dlm_ls *ls) ls 1406 fs/dlm/lock.c mutex_lock(&ls->ls_waiters_mutex); ls 1408 fs/dlm/lock.c list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { ls 1426 fs/dlm/lock.c num_nodes = ls->ls_num_nodes; ls 1434 fs/dlm/lock.c log_error(ls, "waitwarn %x %lld %d us check connection to " ls 1438 fs/dlm/lock.c mutex_unlock(&ls->ls_waiters_mutex); ls 1442 fs/dlm/lock.c log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us", ls 1452 fs/dlm/lock.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 1455 fs/dlm/lock.c mutex_lock(&ls->ls_waiters_mutex); ls 1478 fs/dlm/lock.c log_debug(ls, "addwait %x cur %d overlap %d count %d f %x", ls 1493 fs/dlm/lock.c list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); ls 1496 fs/dlm/lock.c log_error(ls, "addwait error %x %d flags %x %d %d %s", ls 1499 fs/dlm/lock.c mutex_unlock(&ls->ls_waiters_mutex); ls 1511 fs/dlm/lock.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 1515 fs/dlm/lock.c log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id); ls 1522 fs/dlm/lock.c log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id); ls 1533 fs/dlm/lock.c log_debug(ls, "remwait %x cancel_reply wait_type %d", ls 1549 fs/dlm/lock.c log_debug(ls, "remwait %x convert_reply zap overlap_cancel", ls 1565 fs/dlm/lock.c log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait", ls 1577 fs/dlm/lock.c log_error(ls, "remwait error %x reply %d wait_type %d overlap", ls 1595 fs/dlm/lock.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 1598 fs/dlm/lock.c mutex_lock(&ls->ls_waiters_mutex); ls 1600 fs/dlm/lock.c mutex_unlock(&ls->ls_waiters_mutex); ls 1609 fs/dlm/lock.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 1613 fs/dlm/lock.c mutex_lock(&ls->ls_waiters_mutex); ls 1616 fs/dlm/lock.c mutex_unlock(&ls->ls_waiters_mutex); ls 1627 fs/dlm/lock.c struct dlm_ls *ls = r->res_ls; ls 1629 fs/dlm/lock.c spin_lock(&ls->ls_remove_spin); ls 1630 fs/dlm/lock.c if (ls->ls_remove_len && ls 1631 fs/dlm/lock.c !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) { ls 1632 fs/dlm/lock.c log_debug(ls, "delay lookup for remove dir %d %s", ls 1634 fs/dlm/lock.c spin_unlock(&ls->ls_remove_spin); ls 1638 fs/dlm/lock.c spin_unlock(&ls->ls_remove_spin); ls 1648 fs/dlm/lock.c static void shrink_bucket(struct dlm_ls *ls, int b) ls 1658 fs/dlm/lock.c memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); ls 1660 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[b].lock); ls 1662 fs/dlm/lock.c if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) { ls 1663 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1667 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) { ls 1676 fs/dlm/lock.c if (!dlm_no_directory(ls) && ls 1689 fs/dlm/lock.c if (!dlm_no_directory(ls) && ls 1697 fs/dlm/lock.c ls->ls_remove_lens[remote_count] = r->res_length; ls 1698 fs/dlm/lock.c memcpy(ls->ls_remove_names[remote_count], r->res_name, ls 1708 fs/dlm/lock.c log_error(ls, "tossed rsb in use %s", r->res_name); ls 1712 fs/dlm/lock.c rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); ls 1717 fs/dlm/lock.c ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK; ls 1719 fs/dlm/lock.c ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK; ls 1720 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1737 fs/dlm/lock.c name = ls->ls_remove_names[i]; ls 1738 fs/dlm/lock.c len = ls->ls_remove_lens[i]; ls 1740 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[b].lock); ls 1741 fs/dlm/lock.c rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); ls 1743 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1744 fs/dlm/lock.c log_debug(ls, "remove_name not toss %s", name); ls 1749 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1750 fs/dlm/lock.c log_debug(ls, "remove_name master %d dir %d our %d %s", ls 1758 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1759 fs/dlm/lock.c log_error(ls, "remove_name dir %d master %d our %d %s", ls 1767 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1768 fs/dlm/lock.c log_debug(ls, "remove_name toss_time %lu now %lu %s", ls 1774 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1775 fs/dlm/lock.c log_error(ls, "remove_name in use %s", name); ls 1779 fs/dlm/lock.c rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); ls 1782 fs/dlm/lock.c spin_lock(&ls->ls_remove_spin); ls 1783 fs/dlm/lock.c ls->ls_remove_len = len; ls 1784 fs/dlm/lock.c memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN); ls 1785 fs/dlm/lock.c spin_unlock(&ls->ls_remove_spin); ls 1786 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 1791 fs/dlm/lock.c spin_lock(&ls->ls_remove_spin); ls 1792 fs/dlm/lock.c ls->ls_remove_len = 0; ls 1793 fs/dlm/lock.c memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); ls 1794 fs/dlm/lock.c spin_unlock(&ls->ls_remove_spin); ls 1800 fs/dlm/lock.c void dlm_scan_rsbs(struct dlm_ls *ls) ls 1804 fs/dlm/lock.c for (i = 0; i < ls->ls_rsbtbl_size; i++) { ls 1805 fs/dlm/lock.c shrink_bucket(ls, i); ls 1806 fs/dlm/lock.c if (dlm_locking_stopped(ls)) ls 1814 fs/dlm/lock.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 1819 fs/dlm/lock.c if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) && ls 1830 fs/dlm/lock.c mutex_lock(&ls->ls_timeout_mutex); ls 1832 fs/dlm/lock.c list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout); ls 1833 fs/dlm/lock.c mutex_unlock(&ls->ls_timeout_mutex); ls 1838 fs/dlm/lock.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 1840 fs/dlm/lock.c mutex_lock(&ls->ls_timeout_mutex); ls 1845 fs/dlm/lock.c mutex_unlock(&ls->ls_timeout_mutex); ls 1854 fs/dlm/lock.c void dlm_scan_timeout(struct dlm_ls *ls) ls 1862 fs/dlm/lock.c if (dlm_locking_stopped(ls)) ls 1867 fs/dlm/lock.c mutex_lock(&ls->ls_timeout_mutex); ls 1868 fs/dlm/lock.c list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) { ls 1886 fs/dlm/lock.c mutex_unlock(&ls->ls_timeout_mutex); ls 1904 fs/dlm/lock.c log_debug(ls, "timeout cancel %x node %d %s", ls 1921 fs/dlm/lock.c void dlm_adjust_timeouts(struct dlm_ls *ls) ls 1924 fs/dlm/lock.c u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin); ls 1926 fs/dlm/lock.c ls->ls_recover_begin = 0; ls 1927 fs/dlm/lock.c mutex_lock(&ls->ls_timeout_mutex); ls 1928 fs/dlm/lock.c list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) ls 1930 fs/dlm/lock.c mutex_unlock(&ls->ls_timeout_mutex); ls 1935 fs/dlm/lock.c mutex_lock(&ls->ls_waiters_mutex); ls 1936 fs/dlm/lock.c list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { ls 1940 fs/dlm/lock.c mutex_unlock(&ls->ls_waiters_mutex); ls 2886 fs/dlm/lock.c static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 2923 fs/dlm/lock.c log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s", ls 2939 fs/dlm/lock.c struct dlm_ls *ls = lkb->lkb_resource->res_ls; ls 2943 fs/dlm/lock.c log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id); ls 2953 fs/dlm/lock.c log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id); ls 2963 fs/dlm/lock.c log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id); ls 3060 fs/dlm/lock.c log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv, ls 3314 fs/dlm/lock.c static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name, ls 3320 fs/dlm/lock.c error = validate_lock_args(ls, lkb, args); ls 3324 fs/dlm/lock.c error = find_rsb(ls, name, len, 0, R_REQUEST, &r); ls 3340 fs/dlm/lock.c static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 3351 fs/dlm/lock.c error = validate_lock_args(ls, lkb, args); ls 3362 fs/dlm/lock.c static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 3384 fs/dlm/lock.c static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 3421 fs/dlm/lock.c struct dlm_ls *ls; ls 3426 fs/dlm/lock.c ls = dlm_find_lockspace_local(lockspace); ls 3427 fs/dlm/lock.c if (!ls) ls 3430 fs/dlm/lock.c dlm_lock_recovery(ls); ls 3433 fs/dlm/lock.c error = find_lkb(ls, lksb->sb_lkid, &lkb); ls 3435 fs/dlm/lock.c error = create_lkb(ls, &lkb); ls 3446 fs/dlm/lock.c error = convert_lock(ls, lkb, &args); ls 3448 fs/dlm/lock.c error = request_lock(ls, lkb, name, namelen, &args); ls 3454 fs/dlm/lock.c __put_lkb(ls, lkb); ls 3458 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 3459 fs/dlm/lock.c dlm_put_lockspace(ls); ls 3469 fs/dlm/lock.c struct dlm_ls *ls; ls 3474 fs/dlm/lock.c ls = dlm_find_lockspace_local(lockspace); ls 3475 fs/dlm/lock.c if (!ls) ls 3478 fs/dlm/lock.c dlm_lock_recovery(ls); ls 3480 fs/dlm/lock.c error = find_lkb(ls, lkid, &lkb); ls 3489 fs/dlm/lock.c error = cancel_lock(ls, lkb, &args); ls 3491 fs/dlm/lock.c error = unlock_lock(ls, lkb, &args); ls 3500 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 3501 fs/dlm/lock.c dlm_put_lockspace(ls); ls 3527 fs/dlm/lock.c static int _create_message(struct dlm_ls *ls, int mb_len, ls 3549 fs/dlm/lock.c ms->m_header.h_lockspace = ls->ls_global_id; ls 3839 fs/dlm/lock.c static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in, ls 3842 fs/dlm/lock.c struct dlm_rsb *r = &ls->ls_stub_rsb; ls 3887 fs/dlm/lock.c static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 3894 fs/dlm/lock.c lkb->lkb_lvbptr = dlm_allocate_lvb(ls); ls 3898 fs/dlm/lock.c if (len > ls->ls_lvblen) ls 3899 fs/dlm/lock.c len = ls->ls_lvblen; ls 3915 fs/dlm/lock.c static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 3929 fs/dlm/lock.c lkb->lkb_lvbptr = dlm_allocate_lvb(ls); ls 3937 fs/dlm/lock.c static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 3943 fs/dlm/lock.c if (receive_lvb(ls, lkb, ms)) ls 3952 fs/dlm/lock.c static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 3955 fs/dlm/lock.c if (receive_lvb(ls, lkb, ms)) ls 3963 fs/dlm/lock.c static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms) ls 3965 fs/dlm/lock.c struct dlm_lkb *lkb = &ls->ls_stub_lkb; ls 4014 fs/dlm/lock.c static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len) ls 4027 fs/dlm/lock.c b = hash & (ls->ls_rsbtbl_size - 1); ls 4029 fs/dlm/lock.c dir_nodeid = dlm_hash2nodeid(ls, hash); ls 4031 fs/dlm/lock.c log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name); ls 4033 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[b].lock); ls 4034 fs/dlm/lock.c rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); ls 4036 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4037 fs/dlm/lock.c log_error(ls, "repeat_remove on keep %s", name); ls 4041 fs/dlm/lock.c rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); ls 4043 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4044 fs/dlm/lock.c log_error(ls, "repeat_remove on toss %s", name); ls 4050 fs/dlm/lock.c spin_lock(&ls->ls_remove_spin); ls 4051 fs/dlm/lock.c ls->ls_remove_len = len; ls 4052 fs/dlm/lock.c memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN); ls 4053 fs/dlm/lock.c spin_unlock(&ls->ls_remove_spin); ls 4054 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4056 fs/dlm/lock.c rv = _create_message(ls, sizeof(struct dlm_message) + len, ls 4066 fs/dlm/lock.c spin_lock(&ls->ls_remove_spin); ls 4067 fs/dlm/lock.c ls->ls_remove_len = 0; ls 4068 fs/dlm/lock.c memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); ls 4069 fs/dlm/lock.c spin_unlock(&ls->ls_remove_spin); ls 4072 fs/dlm/lock.c static int receive_request(struct dlm_ls *ls, struct dlm_message *ms) ls 4081 fs/dlm/lock.c error = create_lkb(ls, &lkb); ls 4087 fs/dlm/lock.c error = receive_request_args(ls, lkb, ms); ls 4089 fs/dlm/lock.c __put_lkb(ls, lkb); ls 4101 fs/dlm/lock.c error = find_rsb(ls, ms->m_extra, namelen, from_nodeid, ls 4104 fs/dlm/lock.c __put_lkb(ls, lkb); ls 4111 fs/dlm/lock.c error = validate_master_nodeid(ls, r, from_nodeid); ls 4115 fs/dlm/lock.c __put_lkb(ls, lkb); ls 4151 fs/dlm/lock.c log_limit(ls, "receive_request %x from %d %d", ls 4156 fs/dlm/lock.c send_repeat_remove(ls, ms->m_extra, namelen); ls 4160 fs/dlm/lock.c setup_stub_lkb(ls, ms); ls 4161 fs/dlm/lock.c send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); ls 4165 fs/dlm/lock.c static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms) ls 4171 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4176 fs/dlm/lock.c log_error(ls, "receive_convert %x remid %x recover_seq %llu " ls 4196 fs/dlm/lock.c error = receive_convert_args(ls, lkb, ms); ls 4215 fs/dlm/lock.c setup_stub_lkb(ls, ms); ls 4216 fs/dlm/lock.c send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); ls 4220 fs/dlm/lock.c static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) ls 4226 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4231 fs/dlm/lock.c log_error(ls, "receive_unlock %x remid %x remote %d %x", ls 4250 fs/dlm/lock.c error = receive_unlock_args(ls, lkb, ms); ls 4266 fs/dlm/lock.c setup_stub_lkb(ls, ms); ls 4267 fs/dlm/lock.c send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); ls 4271 fs/dlm/lock.c static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms) ls 4277 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4302 fs/dlm/lock.c setup_stub_lkb(ls, ms); ls 4303 fs/dlm/lock.c send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); ls 4307 fs/dlm/lock.c static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms) ls 4313 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4338 fs/dlm/lock.c static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms) ls 4344 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4366 fs/dlm/lock.c static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms) ls 4375 fs/dlm/lock.c error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0, ls 4380 fs/dlm/lock.c receive_request(ls, ms); ls 4383 fs/dlm/lock.c send_lookup_reply(ls, ms, ret_nodeid, error); ls 4386 fs/dlm/lock.c static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms) ls 4398 fs/dlm/lock.c log_error(ls, "receive_remove from %d bad len %d", ls 4403 fs/dlm/lock.c dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash); ls 4405 fs/dlm/lock.c log_error(ls, "receive_remove from %d bad nodeid %d", ls 4423 fs/dlm/lock.c b = hash & (ls->ls_rsbtbl_size - 1); ls 4425 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[b].lock); ls 4427 fs/dlm/lock.c rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); ls 4430 fs/dlm/lock.c rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); ls 4433 fs/dlm/lock.c log_error(ls, "receive_remove from %d not found %s", ls 4435 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4440 fs/dlm/lock.c log_error(ls, "receive_remove keep from %d master %d", ls 4443 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4447 fs/dlm/lock.c log_debug(ls, "receive_remove from %d master %d first %x %s", ls 4450 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4455 fs/dlm/lock.c log_error(ls, "receive_remove toss from %d master %d", ls 4458 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4463 fs/dlm/lock.c rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); ls 4464 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4467 fs/dlm/lock.c log_error(ls, "receive_remove from %d rsb ref error", ls 4470 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[b].lock); ls 4474 fs/dlm/lock.c static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms) ls 4476 fs/dlm/lock.c do_purge(ls, ms->m_nodeid, ms->m_pid); ls 4479 fs/dlm/lock.c static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) ls 4486 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4501 fs/dlm/lock.c log_error(ls, "receive_request_reply %x remote %d %x result %d", ls 4546 fs/dlm/lock.c log_limit(ls, "receive_request_reply %x from %d %d " ls 4573 fs/dlm/lock.c log_error(ls, "receive_request_reply %x error %d", ls 4578 fs/dlm/lock.c log_debug(ls, "receive_request_reply %x result %d unlock", ls 4584 fs/dlm/lock.c log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id); ls 4666 fs/dlm/lock.c static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms) ls 4671 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4716 fs/dlm/lock.c static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms) ls 4721 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4766 fs/dlm/lock.c static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms) ls 4771 fs/dlm/lock.c error = find_lkb(ls, ms->m_remid, &lkb); ls 4780 fs/dlm/lock.c static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) ls 4787 fs/dlm/lock.c error = find_lkb(ls, ms->m_lkid, &lkb); ls 4789 fs/dlm/lock.c log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid); ls 4814 fs/dlm/lock.c log_error(ls, "receive_lookup_reply %x from %d ret %d " ls 4828 fs/dlm/lock.c log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid", ls 4840 fs/dlm/lock.c log_debug(ls, "receive_lookup_reply %x unlock %x", ls 4858 fs/dlm/lock.c static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms, ls 4863 fs/dlm/lock.c if (!dlm_is_member(ls, ms->m_header.h_nodeid)) { ls 4864 fs/dlm/lock.c log_limit(ls, "receive %d from non-member %d %x %x %d", ls 4875 fs/dlm/lock.c error = receive_request(ls, ms); ls 4879 fs/dlm/lock.c error = receive_convert(ls, ms); ls 4883 fs/dlm/lock.c error = receive_unlock(ls, ms); ls 4888 fs/dlm/lock.c error = receive_cancel(ls, ms); ls 4894 fs/dlm/lock.c error = receive_request_reply(ls, ms); ls 4898 fs/dlm/lock.c error = receive_convert_reply(ls, ms); ls 4902 fs/dlm/lock.c error = receive_unlock_reply(ls, ms); ls 4906 fs/dlm/lock.c error = receive_cancel_reply(ls, ms); ls 4913 fs/dlm/lock.c error = receive_grant(ls, ms); ls 4918 fs/dlm/lock.c error = receive_bast(ls, ms); ls 4924 fs/dlm/lock.c receive_lookup(ls, ms); ls 4928 fs/dlm/lock.c receive_remove(ls, ms); ls 4934 fs/dlm/lock.c receive_lookup_reply(ls, ms); ls 4940 fs/dlm/lock.c receive_purge(ls, ms); ls 4944 fs/dlm/lock.c log_error(ls, "unknown message type %d", ms->m_type); ls 4959 fs/dlm/lock.c log_debug(ls, "receive %d no %x remote %d %x saved_seq %u", ls 4963 fs/dlm/lock.c log_error(ls, "receive %d no %x remote %d %x saved_seq %u", ls 4968 fs/dlm/lock.c dlm_dump_rsb_hash(ls, ms->m_hash); ls 4972 fs/dlm/lock.c log_error(ls, "receive %d inval from %d lkid %x remid %x " ls 4987 fs/dlm/lock.c static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms, ls 4990 fs/dlm/lock.c if (dlm_locking_stopped(ls)) { ls 4994 fs/dlm/lock.c if (!ls->ls_generation) { ls 4995 fs/dlm/lock.c log_limit(ls, "receive %d from %d ignore old gen", ls 5000 fs/dlm/lock.c dlm_add_requestqueue(ls, nodeid, ms); ls 5002 fs/dlm/lock.c dlm_wait_requestqueue(ls); ls 5003 fs/dlm/lock.c _receive_message(ls, ms, 0); ls 5010 fs/dlm/lock.c void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms, ls 5013 fs/dlm/lock.c _receive_message(ls, ms, saved_seq); ls 5024 fs/dlm/lock.c struct dlm_ls *ls; ls 5047 fs/dlm/lock.c ls = dlm_find_lockspace_global(hd->h_lockspace); ls 5048 fs/dlm/lock.c if (!ls) { ls 5063 fs/dlm/lock.c down_read(&ls->ls_recv_active); ls 5065 fs/dlm/lock.c dlm_receive_message(ls, &p->message, nodeid); ls 5067 fs/dlm/lock.c dlm_receive_rcom(ls, &p->rcom, nodeid); ls 5068 fs/dlm/lock.c up_read(&ls->ls_recv_active); ls 5070 fs/dlm/lock.c dlm_put_lockspace(ls); ls 5073 fs/dlm/lock.c static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 5101 fs/dlm/lock.c static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 5104 fs/dlm/lock.c if (dlm_no_directory(ls)) ls 5107 fs/dlm/lock.c if (dlm_is_removed(ls, lkb->lkb_wait_nodeid)) ls 5119 fs/dlm/lock.c void dlm_recover_waiters_pre(struct dlm_ls *ls) ls 5130 fs/dlm/lock.c mutex_lock(&ls->ls_waiters_mutex); ls 5132 fs/dlm/lock.c list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { ls 5140 fs/dlm/lock.c log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " ls 5159 fs/dlm/lock.c if (!waiter_needs_recovery(ls, lkb, dir_nodeid)) ls 5183 fs/dlm/lock.c log_debug(ls, "rwpre overlap %x %x %d %d %d", ls 5195 fs/dlm/lock.c recover_convert_waiter(ls, lkb, ms_stub); ls 5221 fs/dlm/lock.c log_error(ls, "invalid lkb wait_type %d %d", ls 5226 fs/dlm/lock.c mutex_unlock(&ls->ls_waiters_mutex); ls 5230 fs/dlm/lock.c static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) ls 5235 fs/dlm/lock.c mutex_lock(&ls->ls_waiters_mutex); ls 5236 fs/dlm/lock.c list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { ls 5243 fs/dlm/lock.c mutex_unlock(&ls->ls_waiters_mutex); ls 5266 fs/dlm/lock.c int dlm_recover_waiters_post(struct dlm_ls *ls) ls 5273 fs/dlm/lock.c if (dlm_locking_stopped(ls)) { ls 5274 fs/dlm/lock.c log_debug(ls, "recover_waiters_post aborted"); ls 5279 fs/dlm/lock.c lkb = find_resend_waiter(ls); ls 5292 fs/dlm/lock.c log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " ls 5307 fs/dlm/lock.c mutex_lock(&ls->ls_waiters_mutex); ls 5309 fs/dlm/lock.c mutex_unlock(&ls->ls_waiters_mutex); ls 5349 fs/dlm/lock.c log_error(ls, "waiter %x msg %d r_nodeid %d " ls 5362 fs/dlm/lock.c static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r, ls 5374 fs/dlm/lock.c if (lkb->lkb_recover_seq == ls->ls_recover_seq) ls 5381 fs/dlm/lock.c log_error(ls, "purged mstcpy lkb not released"); ls 5387 fs/dlm/lock.c struct dlm_ls *ls = r->res_ls; ls 5389 fs/dlm/lock.c purge_mstcpy_list(ls, r, &r->res_grantqueue); ls 5390 fs/dlm/lock.c purge_mstcpy_list(ls, r, &r->res_convertqueue); ls 5391 fs/dlm/lock.c purge_mstcpy_list(ls, r, &r->res_waitqueue); ls 5394 fs/dlm/lock.c static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r, ls 5405 fs/dlm/lock.c dlm_is_removed(ls, lkb->lkb_nodeid)) { ls 5418 fs/dlm/lock.c log_error(ls, "purged dead lkb not released"); ls 5429 fs/dlm/lock.c void dlm_recover_purge(struct dlm_ls *ls) ls 5440 fs/dlm/lock.c list_for_each_entry(memb, &ls->ls_nodes_gone, list) { ls 5448 fs/dlm/lock.c down_write(&ls->ls_root_sem); ls 5449 fs/dlm/lock.c list_for_each_entry(r, &ls->ls_root_list, res_root_list) { ls 5453 fs/dlm/lock.c purge_dead_list(ls, r, &r->res_grantqueue, ls 5455 fs/dlm/lock.c purge_dead_list(ls, r, &r->res_convertqueue, ls 5457 fs/dlm/lock.c purge_dead_list(ls, r, &r->res_waitqueue, ls 5464 fs/dlm/lock.c up_write(&ls->ls_root_sem); ls 5467 fs/dlm/lock.c log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes", ls 5471 fs/dlm/lock.c static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) ls 5476 fs/dlm/lock.c spin_lock(&ls->ls_rsbtbl[bucket].lock); ls 5477 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { ls 5487 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 5490 fs/dlm/lock.c spin_unlock(&ls->ls_rsbtbl[bucket].lock); ls 5511 fs/dlm/lock.c void dlm_recover_grant(struct dlm_ls *ls) ls 5520 fs/dlm/lock.c r = find_grant_rsb(ls, bucket); ls 5522 fs/dlm/lock.c if (bucket == ls->ls_rsbtbl_size - 1) ls 5541 fs/dlm/lock.c log_rinfo(ls, "dlm_recover_grant %u locks on %u resources", ls 5575 fs/dlm/lock.c static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, ls 5597 fs/dlm/lock.c if (lvblen > ls->ls_lvblen) ls 5599 fs/dlm/lock.c lkb->lkb_lvbptr = dlm_allocate_lvb(ls); ls 5626 fs/dlm/lock.c int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) ls 5650 fs/dlm/lock.c error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), ls 5657 fs/dlm/lock.c if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) { ls 5658 fs/dlm/lock.c log_error(ls, "dlm_recover_master_copy remote %d %x not dir", ls 5670 fs/dlm/lock.c error = create_lkb(ls, &lkb); ls 5674 fs/dlm/lock.c error = receive_rcom_lock_args(ls, lkb, r, rc); ls 5676 fs/dlm/lock.c __put_lkb(ls, lkb); ls 5683 fs/dlm/lock.c ls->ls_recover_locks_in++; ls 5693 fs/dlm/lock.c lkb->lkb_recover_seq = ls->ls_recover_seq; ls 5700 fs/dlm/lock.c log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d", ls 5707 fs/dlm/lock.c int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) ls 5719 fs/dlm/lock.c error = find_lkb(ls, lkid, &lkb); ls 5721 fs/dlm/lock.c log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d", ls 5731 fs/dlm/lock.c log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d", ls 5746 fs/dlm/lock.c log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d", ls 5756 fs/dlm/lock.c log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk", ls 5771 fs/dlm/lock.c int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, ls 5779 fs/dlm/lock.c dlm_lock_recovery(ls); ls 5781 fs/dlm/lock.c error = create_lkb(ls, &lkb); ls 5791 fs/dlm/lock.c __put_lkb(ls, lkb); ls 5802 fs/dlm/lock.c __put_lkb(ls, lkb); ls 5810 fs/dlm/lock.c error = request_lock(ls, lkb, name, namelen, &args); ls 5822 fs/dlm/lock.c __put_lkb(ls, lkb); ls 5832 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 5836 fs/dlm/lock.c int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ls 5845 fs/dlm/lock.c dlm_lock_recovery(ls); ls 5847 fs/dlm/lock.c error = find_lkb(ls, lkid, &lkb); ls 5878 fs/dlm/lock.c error = convert_lock(ls, lkb, &args); ls 5885 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 5896 fs/dlm/lock.c int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ls 5906 fs/dlm/lock.c mutex_lock(&ls->ls_orphans_mutex); ls 5907 fs/dlm/lock.c list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) { ls 5923 fs/dlm/lock.c mutex_unlock(&ls->ls_orphans_mutex); ls 5962 fs/dlm/lock.c int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ls 5970 fs/dlm/lock.c dlm_lock_recovery(ls); ls 5972 fs/dlm/lock.c error = find_lkb(ls, lkid, &lkb); ls 5988 fs/dlm/lock.c error = unlock_lock(ls, lkb, &args); ls 6006 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 6011 fs/dlm/lock.c int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ls 6019 fs/dlm/lock.c dlm_lock_recovery(ls); ls 6021 fs/dlm/lock.c error = find_lkb(ls, lkid, &lkb); ls 6034 fs/dlm/lock.c error = cancel_lock(ls, lkb, &args); ls 6044 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 6049 fs/dlm/lock.c int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid) ls 6057 fs/dlm/lock.c dlm_lock_recovery(ls); ls 6059 fs/dlm/lock.c error = find_lkb(ls, lkid, &lkb); ls 6093 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 6100 fs/dlm/lock.c static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) ls 6106 fs/dlm/lock.c mutex_lock(&ls->ls_orphans_mutex); ls 6107 fs/dlm/lock.c list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); ls 6108 fs/dlm/lock.c mutex_unlock(&ls->ls_orphans_mutex); ls 6112 fs/dlm/lock.c error = cancel_lock(ls, lkb, &args); ls 6123 fs/dlm/lock.c static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) ls 6131 fs/dlm/lock.c error = unlock_lock(ls, lkb, &args); ls 6141 fs/dlm/lock.c static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, ls 6146 fs/dlm/lock.c mutex_lock(&ls->ls_clear_proc_locks); ls 6158 fs/dlm/lock.c mutex_unlock(&ls->ls_clear_proc_locks); ls 6172 fs/dlm/lock.c void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) ls 6176 fs/dlm/lock.c dlm_lock_recovery(ls); ls 6179 fs/dlm/lock.c lkb = del_proc_lock(ls, proc); ls 6184 fs/dlm/lock.c orphan_proc_lock(ls, lkb); ls 6186 fs/dlm/lock.c unlock_proc_lock(ls, lkb); ls 6195 fs/dlm/lock.c mutex_lock(&ls->ls_clear_proc_locks); ls 6211 fs/dlm/lock.c mutex_unlock(&ls->ls_clear_proc_locks); ls 6212 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 6215 fs/dlm/lock.c static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) ls 6233 fs/dlm/lock.c unlock_proc_lock(ls, lkb); ls 6257 fs/dlm/lock.c static void do_purge(struct dlm_ls *ls, int nodeid, int pid) ls 6261 fs/dlm/lock.c mutex_lock(&ls->ls_orphans_mutex); ls 6262 fs/dlm/lock.c list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) { ls 6265 fs/dlm/lock.c unlock_proc_lock(ls, lkb); ls 6269 fs/dlm/lock.c mutex_unlock(&ls->ls_orphans_mutex); ls 6272 fs/dlm/lock.c static int send_purge(struct dlm_ls *ls, int nodeid, int pid) ls 6278 fs/dlm/lock.c error = _create_message(ls, sizeof(struct dlm_message), nodeid, ls 6288 fs/dlm/lock.c int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, ls 6294 fs/dlm/lock.c error = send_purge(ls, nodeid, pid); ls 6296 fs/dlm/lock.c dlm_lock_recovery(ls); ls 6298 fs/dlm/lock.c purge_proc_locks(ls, proc); ls 6300 fs/dlm/lock.c do_purge(ls, nodeid, pid); ls 6301 fs/dlm/lock.c dlm_unlock_recovery(ls); ls 15 fs/dlm/lock.h void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len); ls 17 fs/dlm/lock.h void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms, ls 24 fs/dlm/lock.h void dlm_scan_rsbs(struct dlm_ls *ls); ls 25 fs/dlm/lock.h int dlm_lock_recovery_try(struct dlm_ls *ls); ls 26 fs/dlm/lock.h void dlm_unlock_recovery(struct dlm_ls *ls); ls 27 fs/dlm/lock.h void dlm_scan_waiters(struct dlm_ls *ls); ls 28 fs/dlm/lock.h void dlm_scan_timeout(struct dlm_ls *ls); ls 29 fs/dlm/lock.h void dlm_adjust_timeouts(struct dlm_ls *ls); ls 30 fs/dlm/lock.h int dlm_master_lookup(struct dlm_ls *ls, int nodeid, char *name, int len, ls 36 fs/dlm/lock.h void dlm_recover_purge(struct dlm_ls *ls); ls 38 fs/dlm/lock.h void dlm_recover_grant(struct dlm_ls *ls); ls 39 fs/dlm/lock.h int dlm_recover_waiters_post(struct dlm_ls *ls); ls 40 fs/dlm/lock.h void dlm_recover_waiters_pre(struct dlm_ls *ls); ls 41 fs/dlm/lock.h int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc); ls 42 fs/dlm/lock.h int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc); ls 44 fs/dlm/lock.h int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode, ls 47 fs/dlm/lock.h int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ls 50 fs/dlm/lock.h int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ls 53 fs/dlm/lock.h int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ls 55 fs/dlm/lock.h int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ls 57 fs/dlm/lock.h int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, ls 59 fs/dlm/lock.h int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid); ls 60 fs/dlm/lock.h void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc); ls 35 fs/dlm/lockspace.c static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len) ls 43 fs/dlm/lockspace.c ls = dlm_find_lockspace_local(ls->ls_local_handle); ls 44 fs/dlm/lockspace.c if (!ls) ls 49 fs/dlm/lockspace.c dlm_ls_stop(ls); ls 52 fs/dlm/lockspace.c dlm_ls_start(ls); ls 57 fs/dlm/lockspace.c dlm_put_lockspace(ls); ls 61 fs/dlm/lockspace.c static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len) ls 63 fs/dlm/lockspace.c int rc = kstrtoint(buf, 0, &ls->ls_uevent_result); ls 67 fs/dlm/lockspace.c set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags); ls 68 fs/dlm/lockspace.c wake_up(&ls->ls_uevent_wait); ls 72 fs/dlm/lockspace.c static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf) ls 74 fs/dlm/lockspace.c return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id); ls 77 fs/dlm/lockspace.c static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len) ls 79 fs/dlm/lockspace.c int rc = kstrtouint(buf, 0, &ls->ls_global_id); ls 86 fs/dlm/lockspace.c static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf) ls 88 fs/dlm/lockspace.c return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls)); ls 91 fs/dlm/lockspace.c static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len) ls 99 fs/dlm/lockspace.c set_bit(LSFL_NODIR, &ls->ls_flags); ls 103 fs/dlm/lockspace.c static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf) ls 105 fs/dlm/lockspace.c uint32_t status = dlm_recover_status(ls); ls 109 fs/dlm/lockspace.c static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf) ls 111 fs/dlm/lockspace.c return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid); ls 166 fs/dlm/lockspace.c struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); ls 168 fs/dlm/lockspace.c return a->show ? a->show(ls, buf) : 0; ls 174 fs/dlm/lockspace.c struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); ls 176 fs/dlm/lockspace.c return a->store ? a->store(ls, buf, len) : len; ls 181 fs/dlm/lockspace.c struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj); ls 182 fs/dlm/lockspace.c kfree(ls); ls 198 fs/dlm/lockspace.c static int do_uevent(struct dlm_ls *ls, int in) ls 203 fs/dlm/lockspace.c kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE); ls 205 fs/dlm/lockspace.c kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE); ls 207 fs/dlm/lockspace.c log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving"); ls 212 fs/dlm/lockspace.c error = wait_event_interruptible(ls->ls_uevent_wait, ls 213 fs/dlm/lockspace.c test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags)); ls 215 fs/dlm/lockspace.c log_rinfo(ls, "group event done %d %d", error, ls->ls_uevent_result); ls 220 fs/dlm/lockspace.c error = ls->ls_uevent_result; ls 223 fs/dlm/lockspace.c log_error(ls, "group %s failed %d %d", in ? "join" : "leave", ls 224 fs/dlm/lockspace.c error, ls->ls_uevent_result); ls 231 fs/dlm/lockspace.c struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); ls 233 fs/dlm/lockspace.c add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name); ls 263 fs/dlm/lockspace.c struct dlm_ls *ls; ls 266 fs/dlm/lockspace.c list_for_each_entry(ls, &lslist, ls_list) { ls 267 fs/dlm/lockspace.c if (time_after_eq(jiffies, ls->ls_scan_time + ls 270 fs/dlm/lockspace.c return ls; ls 279 fs/dlm/lockspace.c struct dlm_ls *ls; ls 282 fs/dlm/lockspace.c ls = find_ls_to_scan(); ls 283 fs/dlm/lockspace.c if (ls) { ls 284 fs/dlm/lockspace.c if (dlm_lock_recovery_try(ls)) { ls 285 fs/dlm/lockspace.c ls->ls_scan_time = jiffies; ls 286 fs/dlm/lockspace.c dlm_scan_rsbs(ls); ls 287 fs/dlm/lockspace.c dlm_scan_timeout(ls); ls 288 fs/dlm/lockspace.c dlm_scan_waiters(ls); ls 289 fs/dlm/lockspace.c dlm_unlock_recovery(ls); ls 291 fs/dlm/lockspace.c ls->ls_scan_time += HZ; ls 320 fs/dlm/lockspace.c struct dlm_ls *ls; ls 324 fs/dlm/lockspace.c list_for_each_entry(ls, &lslist, ls_list) { ls 325 fs/dlm/lockspace.c if (ls->ls_global_id == id) { ls 326 fs/dlm/lockspace.c ls->ls_count++; ls 330 fs/dlm/lockspace.c ls = NULL; ls 333 fs/dlm/lockspace.c return ls; ls 338 fs/dlm/lockspace.c struct dlm_ls *ls; ls 341 fs/dlm/lockspace.c list_for_each_entry(ls, &lslist, ls_list) { ls 342 fs/dlm/lockspace.c if (ls->ls_local_handle == lockspace) { ls 343 fs/dlm/lockspace.c ls->ls_count++; ls 347 fs/dlm/lockspace.c ls = NULL; ls 350 fs/dlm/lockspace.c return ls; ls 355 fs/dlm/lockspace.c struct dlm_ls *ls; ls 358 fs/dlm/lockspace.c list_for_each_entry(ls, &lslist, ls_list) { ls 359 fs/dlm/lockspace.c if (ls->ls_device.minor == minor) { ls 360 fs/dlm/lockspace.c ls->ls_count++; ls 364 fs/dlm/lockspace.c ls = NULL; ls 367 fs/dlm/lockspace.c return ls; ls 370 fs/dlm/lockspace.c void dlm_put_lockspace(struct dlm_ls *ls) ls 373 fs/dlm/lockspace.c ls->ls_count--; ls 377 fs/dlm/lockspace.c static void remove_lockspace(struct dlm_ls *ls) ls 381 fs/dlm/lockspace.c if (ls->ls_count == 0) { ls 382 fs/dlm/lockspace.c WARN_ON(ls->ls_create_count != 0); ls 383 fs/dlm/lockspace.c list_del(&ls->ls_list); ls 428 fs/dlm/lockspace.c struct dlm_ls *ls; ls 471 fs/dlm/lockspace.c list_for_each_entry(ls, &lslist, ls_list) { ls 472 fs/dlm/lockspace.c WARN_ON(ls->ls_create_count <= 0); ls 473 fs/dlm/lockspace.c if (ls->ls_namelen != namelen) ls 475 fs/dlm/lockspace.c if (memcmp(ls->ls_name, name, namelen)) ls 481 fs/dlm/lockspace.c ls->ls_create_count++; ls 482 fs/dlm/lockspace.c *lockspace = ls; ls 493 fs/dlm/lockspace.c ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS); ls 494 fs/dlm/lockspace.c if (!ls) ls 496 fs/dlm/lockspace.c memcpy(ls->ls_name, name, namelen); ls 497 fs/dlm/lockspace.c ls->ls_namelen = namelen; ls 498 fs/dlm/lockspace.c ls->ls_lvblen = lvblen; ls 499 fs/dlm/lockspace.c ls->ls_count = 0; ls 500 fs/dlm/lockspace.c ls->ls_flags = 0; ls 501 fs/dlm/lockspace.c ls->ls_scan_time = jiffies; ls 504 fs/dlm/lockspace.c ls->ls_ops = ops; ls 505 fs/dlm/lockspace.c ls->ls_ops_arg = ops_arg; ls 509 fs/dlm/lockspace.c set_bit(LSFL_TIMEWARN, &ls->ls_flags); ls 513 fs/dlm/lockspace.c ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS | ls 517 fs/dlm/lockspace.c ls->ls_rsbtbl_size = size; ls 519 fs/dlm/lockspace.c ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable))); ls 520 fs/dlm/lockspace.c if (!ls->ls_rsbtbl) ls 523 fs/dlm/lockspace.c ls->ls_rsbtbl[i].keep.rb_node = NULL; ls 524 fs/dlm/lockspace.c ls->ls_rsbtbl[i].toss.rb_node = NULL; ls 525 fs/dlm/lockspace.c spin_lock_init(&ls->ls_rsbtbl[i].lock); ls 528 fs/dlm/lockspace.c spin_lock_init(&ls->ls_remove_spin); ls 531 fs/dlm/lockspace.c ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1, ls 533 fs/dlm/lockspace.c if (!ls->ls_remove_names[i]) ls 537 fs/dlm/lockspace.c idr_init(&ls->ls_lkbidr); ls 538 fs/dlm/lockspace.c spin_lock_init(&ls->ls_lkbidr_spin); ls 540 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_waiters); ls 541 fs/dlm/lockspace.c mutex_init(&ls->ls_waiters_mutex); ls 542 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_orphans); ls 543 fs/dlm/lockspace.c mutex_init(&ls->ls_orphans_mutex); ls 544 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_timeout); ls 545 fs/dlm/lockspace.c mutex_init(&ls->ls_timeout_mutex); ls 547 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_new_rsb); ls 548 fs/dlm/lockspace.c spin_lock_init(&ls->ls_new_rsb_spin); ls 550 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_nodes); ls 551 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_nodes_gone); ls 552 fs/dlm/lockspace.c ls->ls_num_nodes = 0; ls 553 fs/dlm/lockspace.c ls->ls_low_nodeid = 0; ls 554 fs/dlm/lockspace.c ls->ls_total_weight = 0; ls 555 fs/dlm/lockspace.c ls->ls_node_array = NULL; ls 557 fs/dlm/lockspace.c memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb)); ls 558 fs/dlm/lockspace.c ls->ls_stub_rsb.res_ls = ls; ls 560 fs/dlm/lockspace.c ls->ls_debug_rsb_dentry = NULL; ls 561 fs/dlm/lockspace.c ls->ls_debug_waiters_dentry = NULL; ls 563 fs/dlm/lockspace.c init_waitqueue_head(&ls->ls_uevent_wait); ls 564 fs/dlm/lockspace.c ls->ls_uevent_result = 0; ls 565 fs/dlm/lockspace.c init_completion(&ls->ls_members_done); ls 566 fs/dlm/lockspace.c ls->ls_members_result = -1; ls 568 fs/dlm/lockspace.c mutex_init(&ls->ls_cb_mutex); ls 569 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_cb_delay); ls 571 fs/dlm/lockspace.c ls->ls_recoverd_task = NULL; ls 572 fs/dlm/lockspace.c mutex_init(&ls->ls_recoverd_active); ls 573 fs/dlm/lockspace.c spin_lock_init(&ls->ls_recover_lock); ls 574 fs/dlm/lockspace.c spin_lock_init(&ls->ls_rcom_spin); ls 575 fs/dlm/lockspace.c get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t)); ls 576 fs/dlm/lockspace.c ls->ls_recover_status = 0; ls 577 fs/dlm/lockspace.c ls->ls_recover_seq = 0; ls 578 fs/dlm/lockspace.c ls->ls_recover_args = NULL; ls 579 fs/dlm/lockspace.c init_rwsem(&ls->ls_in_recovery); ls 580 fs/dlm/lockspace.c init_rwsem(&ls->ls_recv_active); ls 581 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_requestqueue); ls 582 fs/dlm/lockspace.c mutex_init(&ls->ls_requestqueue_mutex); ls 583 fs/dlm/lockspace.c mutex_init(&ls->ls_clear_proc_locks); ls 585 fs/dlm/lockspace.c ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS); ls 586 fs/dlm/lockspace.c if (!ls->ls_recover_buf) ls 589 fs/dlm/lockspace.c ls->ls_slot = 0; ls 590 fs/dlm/lockspace.c ls->ls_num_slots = 0; ls 591 fs/dlm/lockspace.c ls->ls_slots_size = 0; ls 592 fs/dlm/lockspace.c ls->ls_slots = NULL; ls 594 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_recover_list); ls 595 fs/dlm/lockspace.c spin_lock_init(&ls->ls_recover_list_lock); ls 596 fs/dlm/lockspace.c idr_init(&ls->ls_recover_idr); ls 597 fs/dlm/lockspace.c spin_lock_init(&ls->ls_recover_idr_lock); ls 598 fs/dlm/lockspace.c ls->ls_recover_list_count = 0; ls 599 fs/dlm/lockspace.c ls->ls_local_handle = ls; ls 600 fs/dlm/lockspace.c init_waitqueue_head(&ls->ls_wait_general); ls 601 fs/dlm/lockspace.c INIT_LIST_HEAD(&ls->ls_root_list); ls 602 fs/dlm/lockspace.c init_rwsem(&ls->ls_root_sem); ls 605 fs/dlm/lockspace.c ls->ls_create_count = 1; ls 606 fs/dlm/lockspace.c list_add(&ls->ls_list, &lslist); ls 610 fs/dlm/lockspace.c error = dlm_callback_start(ls); ls 612 fs/dlm/lockspace.c log_error(ls, "can't start dlm_callback %d", error); ls 617 fs/dlm/lockspace.c init_waitqueue_head(&ls->ls_recover_lock_wait); ls 626 fs/dlm/lockspace.c error = dlm_recoverd_start(ls); ls 628 fs/dlm/lockspace.c log_error(ls, "can't start dlm_recoverd %d", error); ls 632 fs/dlm/lockspace.c wait_event(ls->ls_recover_lock_wait, ls 633 fs/dlm/lockspace.c test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); ls 635 fs/dlm/lockspace.c ls->ls_kobj.kset = dlm_kset; ls 636 fs/dlm/lockspace.c error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, ls 637 fs/dlm/lockspace.c "%s", ls->ls_name); ls 640 fs/dlm/lockspace.c kobject_uevent(&ls->ls_kobj, KOBJ_ADD); ls 651 fs/dlm/lockspace.c error = do_uevent(ls, 1); ls 655 fs/dlm/lockspace.c wait_for_completion(&ls->ls_members_done); ls 656 fs/dlm/lockspace.c error = ls->ls_members_result; ls 660 fs/dlm/lockspace.c dlm_create_debug_file(ls); ls 662 fs/dlm/lockspace.c log_rinfo(ls, "join complete"); ls 663 fs/dlm/lockspace.c *lockspace = ls; ls 667 fs/dlm/lockspace.c do_uevent(ls, 0); ls 668 fs/dlm/lockspace.c dlm_clear_members(ls); ls 669 fs/dlm/lockspace.c kfree(ls->ls_node_array); ls 671 fs/dlm/lockspace.c dlm_recoverd_stop(ls); ls 673 fs/dlm/lockspace.c dlm_callback_stop(ls); ls 676 fs/dlm/lockspace.c list_del(&ls->ls_list); ls 678 fs/dlm/lockspace.c idr_destroy(&ls->ls_recover_idr); ls 679 fs/dlm/lockspace.c kfree(ls->ls_recover_buf); ls 681 fs/dlm/lockspace.c idr_destroy(&ls->ls_lkbidr); ls 684 fs/dlm/lockspace.c kfree(ls->ls_remove_names[i]); ls 685 fs/dlm/lockspace.c vfree(ls->ls_rsbtbl); ls 688 fs/dlm/lockspace.c kobject_put(&ls->ls_kobj); ls 690 fs/dlm/lockspace.c kfree(ls); ls 749 fs/dlm/lockspace.c static int lockspace_busy(struct dlm_ls *ls, int force) ls 753 fs/dlm/lockspace.c spin_lock(&ls->ls_lkbidr_spin); ls 755 fs/dlm/lockspace.c rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); ls 757 fs/dlm/lockspace.c rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls); ls 761 fs/dlm/lockspace.c spin_unlock(&ls->ls_lkbidr_spin); ls 765 fs/dlm/lockspace.c static int release_lockspace(struct dlm_ls *ls, int force) ls 771 fs/dlm/lockspace.c busy = lockspace_busy(ls, force); ls 774 fs/dlm/lockspace.c if (ls->ls_create_count == 1) { ls 779 fs/dlm/lockspace.c ls->ls_create_count = 0; ls 782 fs/dlm/lockspace.c } else if (ls->ls_create_count > 1) { ls 783 fs/dlm/lockspace.c rv = --ls->ls_create_count; ls 790 fs/dlm/lockspace.c log_debug(ls, "release_lockspace no remove %d", rv); ls 794 fs/dlm/lockspace.c dlm_device_deregister(ls); ls 797 fs/dlm/lockspace.c do_uevent(ls, 0); ls 799 fs/dlm/lockspace.c dlm_recoverd_stop(ls); ls 801 fs/dlm/lockspace.c dlm_callback_stop(ls); ls 803 fs/dlm/lockspace.c remove_lockspace(ls); ls 805 fs/dlm/lockspace.c dlm_delete_debug_file(ls); ls 807 fs/dlm/lockspace.c idr_destroy(&ls->ls_recover_idr); ls 808 fs/dlm/lockspace.c kfree(ls->ls_recover_buf); ls 814 fs/dlm/lockspace.c idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls); ls 815 fs/dlm/lockspace.c idr_destroy(&ls->ls_lkbidr); ls 821 fs/dlm/lockspace.c for (i = 0; i < ls->ls_rsbtbl_size; i++) { ls 822 fs/dlm/lockspace.c while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) { ls 824 fs/dlm/lockspace.c rb_erase(n, &ls->ls_rsbtbl[i].keep); ls 828 fs/dlm/lockspace.c while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) { ls 830 fs/dlm/lockspace.c rb_erase(n, &ls->ls_rsbtbl[i].toss); ls 835 fs/dlm/lockspace.c vfree(ls->ls_rsbtbl); ls 838 fs/dlm/lockspace.c kfree(ls->ls_remove_names[i]); ls 840 fs/dlm/lockspace.c while (!list_empty(&ls->ls_new_rsb)) { ls 841 fs/dlm/lockspace.c rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, ls 851 fs/dlm/lockspace.c dlm_purge_requestqueue(ls); ls 852 fs/dlm/lockspace.c kfree(ls->ls_recover_args); ls 853 fs/dlm/lockspace.c dlm_clear_members(ls); ls 854 fs/dlm/lockspace.c dlm_clear_members_gone(ls); ls 855 fs/dlm/lockspace.c kfree(ls->ls_node_array); ls 856 fs/dlm/lockspace.c log_rinfo(ls, "release_lockspace final free"); ls 857 fs/dlm/lockspace.c kobject_put(&ls->ls_kobj); ls 880 fs/dlm/lockspace.c struct dlm_ls *ls; ls 883 fs/dlm/lockspace.c ls = dlm_find_lockspace_local(lockspace); ls 884 fs/dlm/lockspace.c if (!ls) ls 886 fs/dlm/lockspace.c dlm_put_lockspace(ls); ls 889 fs/dlm/lockspace.c error = release_lockspace(ls, force); ls 901 fs/dlm/lockspace.c struct dlm_ls *ls; ls 907 fs/dlm/lockspace.c list_for_each_entry(ls, &lslist, ls_list) { ls 908 fs/dlm/lockspace.c if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { ls 913 fs/dlm/lockspace.c log_error(ls, "no userland control daemon, stopping lockspace"); ls 914 fs/dlm/lockspace.c dlm_ls_stop(ls); ls 20 fs/dlm/lockspace.h void dlm_put_lockspace(struct dlm_ls *ls); ls 27 fs/dlm/member.c void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc, ls 39 fs/dlm/member.c void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc) ls 49 fs/dlm/member.c for (i = 0; i < ls->ls_slots_size; i++) { ls 50 fs/dlm/member.c slot = &ls->ls_slots[i]; ls 61 fs/dlm/member.c static void log_slots(struct dlm_ls *ls, uint32_t gen, int num_slots, ls 93 fs/dlm/member.c log_rinfo(ls, "generation %u slots %d%s", gen, num_slots, line); ls 96 fs/dlm/member.c int dlm_slots_copy_in(struct dlm_ls *ls) ls 99 fs/dlm/member.c struct dlm_rcom *rc = ls->ls_recover_buf; ls 110 fs/dlm/member.c if (gen <= ls->ls_generation) { ls 111 fs/dlm/member.c log_error(ls, "dlm_slots_copy_in gen %u old %u", ls 112 fs/dlm/member.c gen, ls->ls_generation); ls 114 fs/dlm/member.c ls->ls_generation = gen; ls 127 fs/dlm/member.c log_slots(ls, gen, num_slots, ro0, NULL, 0); ls 129 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 139 fs/dlm/member.c if (ls->ls_slot && ls->ls_slot != memb->slot) { ls 140 fs/dlm/member.c log_error(ls, "dlm_slots_copy_in our slot " ls 141 fs/dlm/member.c "changed %d %d", ls->ls_slot, ls 146 fs/dlm/member.c if (!ls->ls_slot) ls 147 fs/dlm/member.c ls->ls_slot = memb->slot; ls 151 fs/dlm/member.c log_error(ls, "dlm_slots_copy_in nodeid %d no slot", ls 164 fs/dlm/member.c int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size, ls 178 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 180 fs/dlm/member.c memb->slot = ls->ls_slot; ls 181 fs/dlm/member.c memb->generation = ls->ls_generation; ls 186 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 210 fs/dlm/member.c log_error(ls, "nodeid %d slot changed %d %d", ls 226 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 231 fs/dlm/member.c log_error(ls, "invalid slot number %d", memb->slot); ls 243 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 257 fs/dlm/member.c if (!ls->ls_slot && memb->nodeid == our_nodeid) ls 258 fs/dlm/member.c ls->ls_slot = memb->slot; ls 263 fs/dlm/member.c log_error(ls, "no free slot found"); ls 271 fs/dlm/member.c log_slots(ls, gen, num, NULL, array, array_size); ls 277 fs/dlm/member.c log_error(ls, "num_slots %d exceeds max_slots %d", ls 290 fs/dlm/member.c static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new) ls 295 fs/dlm/member.c struct list_head *head = &ls->ls_nodes; ls 314 fs/dlm/member.c static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node) ls 332 fs/dlm/member.c add_ordered_member(ls, memb); ls 333 fs/dlm/member.c ls->ls_num_nodes++; ls 348 fs/dlm/member.c int dlm_is_member(struct dlm_ls *ls, int nodeid) ls 350 fs/dlm/member.c if (find_memb(&ls->ls_nodes, nodeid)) ls 355 fs/dlm/member.c int dlm_is_removed(struct dlm_ls *ls, int nodeid) ls 357 fs/dlm/member.c if (find_memb(&ls->ls_nodes_gone, nodeid)) ls 373 fs/dlm/member.c void dlm_clear_members(struct dlm_ls *ls) ls 375 fs/dlm/member.c clear_memb_list(&ls->ls_nodes); ls 376 fs/dlm/member.c ls->ls_num_nodes = 0; ls 379 fs/dlm/member.c void dlm_clear_members_gone(struct dlm_ls *ls) ls 381 fs/dlm/member.c clear_memb_list(&ls->ls_nodes_gone); ls 384 fs/dlm/member.c static void make_member_array(struct dlm_ls *ls) ls 389 fs/dlm/member.c kfree(ls->ls_node_array); ls 390 fs/dlm/member.c ls->ls_node_array = NULL; ls 392 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 400 fs/dlm/member.c total = ls->ls_num_nodes; ls 404 fs/dlm/member.c ls->ls_total_weight = total; ls 409 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 424 fs/dlm/member.c ls->ls_node_array = array; ls 429 fs/dlm/member.c static int ping_members(struct dlm_ls *ls) ls 434 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 435 fs/dlm/member.c error = dlm_recovery_stopped(ls); ls 438 fs/dlm/member.c error = dlm_rcom_status(ls, memb->nodeid, 0); ls 443 fs/dlm/member.c log_rinfo(ls, "ping_members aborted %d last nodeid %d", ls 444 fs/dlm/member.c error, ls->ls_recover_nodeid); ls 448 fs/dlm/member.c static void dlm_lsop_recover_prep(struct dlm_ls *ls) ls 450 fs/dlm/member.c if (!ls->ls_ops || !ls->ls_ops->recover_prep) ls 452 fs/dlm/member.c ls->ls_ops->recover_prep(ls->ls_ops_arg); ls 455 fs/dlm/member.c static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb) ls 461 fs/dlm/member.c if (!ls->ls_ops || !ls->ls_ops->recover_slot) ls 478 fs/dlm/member.c ls->ls_ops->recover_slot(ls->ls_ops_arg, &slot); ls 481 fs/dlm/member.c void dlm_lsop_recover_done(struct dlm_ls *ls) ls 487 fs/dlm/member.c if (!ls->ls_ops || !ls->ls_ops->recover_done) ls 490 fs/dlm/member.c num = ls->ls_num_nodes; ls 496 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 498 fs/dlm/member.c log_error(ls, "dlm_lsop_recover_done bad num %d", num); ls 506 fs/dlm/member.c ls->ls_ops->recover_done(ls->ls_ops_arg, slots, num, ls 507 fs/dlm/member.c ls->ls_slot, ls->ls_generation); ls 524 fs/dlm/member.c int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out) ls 533 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes_gone, list) { ls 534 fs/dlm/member.c log_rinfo(ls, "prev removed member %d", memb->nodeid); ls 540 fs/dlm/member.c list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) { ls 546 fs/dlm/member.c log_rinfo(ls, "remove member %d", memb->nodeid); ls 549 fs/dlm/member.c log_rinfo(ls, "remove member %d comm_seq %u %u", ls 554 fs/dlm/member.c list_move(&memb->list, &ls->ls_nodes_gone); ls 555 fs/dlm/member.c ls->ls_num_nodes--; ls 556 fs/dlm/member.c dlm_lsop_recover_slot(ls, memb); ls 563 fs/dlm/member.c if (dlm_is_member(ls, node->nodeid)) ls 565 fs/dlm/member.c dlm_add_member(ls, node); ls 566 fs/dlm/member.c log_rinfo(ls, "add member %d", node->nodeid); ls 569 fs/dlm/member.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 573 fs/dlm/member.c ls->ls_low_nodeid = low; ls 575 fs/dlm/member.c make_member_array(ls); ls 578 fs/dlm/member.c error = ping_members(ls); ls 582 fs/dlm/member.c ls->ls_members_result = error; ls 583 fs/dlm/member.c complete(&ls->ls_members_done); ls 586 fs/dlm/member.c log_rinfo(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes); ls 593 fs/dlm/member.c int dlm_ls_stop(struct dlm_ls *ls) ls 608 fs/dlm/member.c down_write(&ls->ls_recv_active); ls 616 fs/dlm/member.c spin_lock(&ls->ls_recover_lock); ls 617 fs/dlm/member.c set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); ls 618 fs/dlm/member.c new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); ls 619 fs/dlm/member.c ls->ls_recover_seq++; ls 620 fs/dlm/member.c spin_unlock(&ls->ls_recover_lock); ls 627 fs/dlm/member.c up_write(&ls->ls_recv_active); ls 638 fs/dlm/member.c set_bit(LSFL_RECOVER_DOWN, &ls->ls_flags); ls 639 fs/dlm/member.c wake_up_process(ls->ls_recoverd_task); ls 640 fs/dlm/member.c wait_event(ls->ls_recover_lock_wait, ls 641 fs/dlm/member.c test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); ls 650 fs/dlm/member.c dlm_recoverd_suspend(ls); ls 652 fs/dlm/member.c spin_lock(&ls->ls_recover_lock); ls 653 fs/dlm/member.c kfree(ls->ls_slots); ls 654 fs/dlm/member.c ls->ls_slots = NULL; ls 655 fs/dlm/member.c ls->ls_num_slots = 0; ls 656 fs/dlm/member.c ls->ls_slots_size = 0; ls 657 fs/dlm/member.c ls->ls_recover_status = 0; ls 658 fs/dlm/member.c spin_unlock(&ls->ls_recover_lock); ls 660 fs/dlm/member.c dlm_recoverd_resume(ls); ls 662 fs/dlm/member.c if (!ls->ls_recover_begin) ls 663 fs/dlm/member.c ls->ls_recover_begin = jiffies; ls 665 fs/dlm/member.c dlm_lsop_recover_prep(ls); ls 669 fs/dlm/member.c int dlm_ls_start(struct dlm_ls *ls) ls 679 fs/dlm/member.c error = dlm_config_nodes(ls->ls_name, &nodes, &count); ls 683 fs/dlm/member.c spin_lock(&ls->ls_recover_lock); ls 687 fs/dlm/member.c if (!dlm_locking_stopped(ls)) { ls 688 fs/dlm/member.c spin_unlock(&ls->ls_recover_lock); ls 689 fs/dlm/member.c log_error(ls, "start ignored: lockspace running"); ls 696 fs/dlm/member.c rv->seq = ++ls->ls_recover_seq; ls 697 fs/dlm/member.c rv_old = ls->ls_recover_args; ls 698 fs/dlm/member.c ls->ls_recover_args = rv; ls 699 fs/dlm/member.c spin_unlock(&ls->ls_recover_lock); ls 702 fs/dlm/member.c log_error(ls, "unused recovery %llx %d", ls 708 fs/dlm/member.c set_bit(LSFL_RECOVER_WORK, &ls->ls_flags); ls 709 fs/dlm/member.c wake_up_process(ls->ls_recoverd_task); ls 14 fs/dlm/member.h int dlm_ls_stop(struct dlm_ls *ls); ls 15 fs/dlm/member.h int dlm_ls_start(struct dlm_ls *ls); ls 16 fs/dlm/member.h void dlm_clear_members(struct dlm_ls *ls); ls 17 fs/dlm/member.h void dlm_clear_members_gone(struct dlm_ls *ls); ls 18 fs/dlm/member.h int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out); ls 19 fs/dlm/member.h int dlm_is_removed(struct dlm_ls *ls, int nodeid); ls 20 fs/dlm/member.h int dlm_is_member(struct dlm_ls *ls, int nodeid); ls 22 fs/dlm/member.h void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc, ls 24 fs/dlm/member.h void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc); ls 25 fs/dlm/member.h int dlm_slots_copy_in(struct dlm_ls *ls); ls 26 fs/dlm/member.h int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size, ls 28 fs/dlm/member.h void dlm_lsop_recover_done(struct dlm_ls *ls); ls 43 fs/dlm/memory.c char *dlm_allocate_lvb(struct dlm_ls *ls) ls 47 fs/dlm/memory.c p = kzalloc(ls->ls_lvblen, GFP_NOFS); ls 56 fs/dlm/memory.c struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls) ls 71 fs/dlm/memory.c struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls) ls 17 fs/dlm/memory.h struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls); ls 19 fs/dlm/memory.h struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls); ls 21 fs/dlm/memory.h char *dlm_allocate_lvb(struct dlm_ls *ls); ls 77 fs/dlm/plock.c static void do_unlock_close(struct dlm_ls *ls, u64 number, ls 88 fs/dlm/plock.c op->info.fsid = ls->ls_global_id; ls 104 fs/dlm/plock.c struct dlm_ls *ls; ls 109 fs/dlm/plock.c ls = dlm_find_lockspace_local(lockspace); ls 110 fs/dlm/plock.c if (!ls) ls 124 fs/dlm/plock.c op->info.fsid = ls->ls_global_id; ls 147 fs/dlm/plock.c log_debug(ls, "dlm_posix_lock: wait killed %llx", ls 153 fs/dlm/plock.c do_unlock_close(ls, number, file, fl); ls 163 fs/dlm/plock.c log_error(ls, "dlm_posix_lock: op on list %llx", ls 173 fs/dlm/plock.c log_error(ls, "dlm_posix_lock: vfs lock error %llx", ls 179 fs/dlm/plock.c dlm_put_lockspace(ls); ls 244 fs/dlm/plock.c struct dlm_ls *ls; ls 249 fs/dlm/plock.c ls = dlm_find_lockspace_local(lockspace); ls 250 fs/dlm/plock.c if (!ls) ls 268 fs/dlm/plock.c log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx", ls 274 fs/dlm/plock.c op->info.fsid = ls->ls_global_id; ls 295 fs/dlm/plock.c log_error(ls, "dlm_posix_unlock: op on list %llx", ls 309 fs/dlm/plock.c dlm_put_lockspace(ls); ls 318 fs/dlm/plock.c struct dlm_ls *ls; ls 322 fs/dlm/plock.c ls = dlm_find_lockspace_local(lockspace); ls 323 fs/dlm/plock.c if (!ls) ls 335 fs/dlm/plock.c op->info.fsid = ls->ls_global_id; ls 349 fs/dlm/plock.c log_error(ls, "dlm_posix_get: op on list %llx", ls 375 fs/dlm/plock.c dlm_put_lockspace(ls); ls 25 fs/dlm/rcom.c static int rcom_response(struct dlm_ls *ls) ls 27 fs/dlm/rcom.c return test_bit(LSFL_RCOM_READY, &ls->ls_flags); ls 30 fs/dlm/rcom.c static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len, ls 49 fs/dlm/rcom.c rc->rc_header.h_lockspace = ls->ls_global_id; ls 56 fs/dlm/rcom.c spin_lock(&ls->ls_recover_lock); ls 57 fs/dlm/rcom.c rc->rc_seq = ls->ls_recover_seq; ls 58 fs/dlm/rcom.c spin_unlock(&ls->ls_recover_lock); ls 65 fs/dlm/rcom.c static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh, ls 72 fs/dlm/rcom.c static void set_rcom_status(struct dlm_ls *ls, struct rcom_status *rs, ls 82 fs/dlm/rcom.c static void set_rcom_config(struct dlm_ls *ls, struct rcom_config *rf, ls 85 fs/dlm/rcom.c rf->rf_lvblen = cpu_to_le32(ls->ls_lvblen); ls 86 fs/dlm/rcom.c rf->rf_lsflags = cpu_to_le32(ls->ls_exflags); ls 88 fs/dlm/rcom.c rf->rf_our_slot = cpu_to_le16(ls->ls_slot); ls 90 fs/dlm/rcom.c rf->rf_generation = cpu_to_le32(ls->ls_generation); ls 93 fs/dlm/rcom.c static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) ls 98 fs/dlm/rcom.c log_error(ls, "version mismatch: %x nodeid %d: %x", ls 104 fs/dlm/rcom.c if (le32_to_cpu(rf->rf_lvblen) != ls->ls_lvblen || ls 105 fs/dlm/rcom.c le32_to_cpu(rf->rf_lsflags) != ls->ls_exflags) { ls 106 fs/dlm/rcom.c log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", ls 107 fs/dlm/rcom.c ls->ls_lvblen, ls->ls_exflags, nodeid, ls 115 fs/dlm/rcom.c static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq) ls 117 fs/dlm/rcom.c spin_lock(&ls->ls_rcom_spin); ls 118 fs/dlm/rcom.c *new_seq = ++ls->ls_rcom_seq; ls 119 fs/dlm/rcom.c set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); ls 120 fs/dlm/rcom.c spin_unlock(&ls->ls_rcom_spin); ls 123 fs/dlm/rcom.c static void disallow_sync_reply(struct dlm_ls *ls) ls 125 fs/dlm/rcom.c spin_lock(&ls->ls_rcom_spin); ls 126 fs/dlm/rcom.c clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); ls 127 fs/dlm/rcom.c clear_bit(LSFL_RCOM_READY, &ls->ls_flags); ls 128 fs/dlm/rcom.c spin_unlock(&ls->ls_rcom_spin); ls 142 fs/dlm/rcom.c int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags) ls 148 fs/dlm/rcom.c ls->ls_recover_nodeid = nodeid; ls 151 fs/dlm/rcom.c rc = ls->ls_recover_buf; ls 152 fs/dlm/rcom.c rc->rc_result = dlm_recover_status(ls); ls 157 fs/dlm/rcom.c error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, ls 162 fs/dlm/rcom.c set_rcom_status(ls, (struct rcom_status *)rc->rc_buf, status_flags); ls 164 fs/dlm/rcom.c allow_sync_reply(ls, &rc->rc_id); ls 165 fs/dlm/rcom.c memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size); ls 167 fs/dlm/rcom.c send_rcom(ls, mh, rc); ls 169 fs/dlm/rcom.c error = dlm_wait_function(ls, &rcom_response); ls 170 fs/dlm/rcom.c disallow_sync_reply(ls); ls 176 fs/dlm/rcom.c rc = ls->ls_recover_buf; ls 180 fs/dlm/rcom.c log_debug(ls, "remote node %d not ready", nodeid); ls 184 fs/dlm/rcom.c error = check_rcom_config(ls, rc, nodeid); ls 192 fs/dlm/rcom.c static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in) ls 204 fs/dlm/rcom.c status = dlm_recover_status(ls); ls 211 fs/dlm/rcom.c status = dlm_recover_status(ls); ls 215 fs/dlm/rcom.c spin_lock(&ls->ls_recover_lock); ls 216 fs/dlm/rcom.c status = ls->ls_recover_status; ls 217 fs/dlm/rcom.c num_slots = ls->ls_num_slots; ls 218 fs/dlm/rcom.c spin_unlock(&ls->ls_recover_lock); ls 222 fs/dlm/rcom.c error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY, ls 231 fs/dlm/rcom.c set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots); ls 236 fs/dlm/rcom.c spin_lock(&ls->ls_recover_lock); ls 237 fs/dlm/rcom.c if (ls->ls_num_slots != num_slots) { ls 238 fs/dlm/rcom.c spin_unlock(&ls->ls_recover_lock); ls 239 fs/dlm/rcom.c log_debug(ls, "receive_rcom_status num_slots %d to %d", ls 240 fs/dlm/rcom.c num_slots, ls->ls_num_slots); ls 242 fs/dlm/rcom.c set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, 0); ls 246 fs/dlm/rcom.c dlm_slots_copy_out(ls, rc); ls 247 fs/dlm/rcom.c spin_unlock(&ls->ls_recover_lock); ls 250 fs/dlm/rcom.c send_rcom(ls, mh, rc); ls 253 fs/dlm/rcom.c static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) ls 255 fs/dlm/rcom.c spin_lock(&ls->ls_rcom_spin); ls 256 fs/dlm/rcom.c if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || ls 257 fs/dlm/rcom.c rc_in->rc_id != ls->ls_rcom_seq) { ls 258 fs/dlm/rcom.c log_debug(ls, "reject reply %d from %d seq %llx expect %llx", ls 261 fs/dlm/rcom.c (unsigned long long)ls->ls_rcom_seq); ls 264 fs/dlm/rcom.c memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length); ls 265 fs/dlm/rcom.c set_bit(LSFL_RCOM_READY, &ls->ls_flags); ls 266 fs/dlm/rcom.c clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); ls 267 fs/dlm/rcom.c wake_up(&ls->ls_wait_general); ls 269 fs/dlm/rcom.c spin_unlock(&ls->ls_rcom_spin); ls 272 fs/dlm/rcom.c int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len) ls 278 fs/dlm/rcom.c ls->ls_recover_nodeid = nodeid; ls 281 fs/dlm/rcom.c error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh); ls 286 fs/dlm/rcom.c allow_sync_reply(ls, &rc->rc_id); ls 287 fs/dlm/rcom.c memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size); ls 289 fs/dlm/rcom.c send_rcom(ls, mh, rc); ls 291 fs/dlm/rcom.c error = dlm_wait_function(ls, &rcom_response); ls 292 fs/dlm/rcom.c disallow_sync_reply(ls); ls 299 fs/dlm/rcom.c static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in) ls 309 fs/dlm/rcom.c error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh); ls 315 fs/dlm/rcom.c dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen, ls 317 fs/dlm/rcom.c send_rcom(ls, mh, rc); ls 324 fs/dlm/rcom.c struct dlm_ls *ls = r->res_ls; ls 327 fs/dlm/rcom.c error = create_rcom(ls, dir_nodeid, DLM_RCOM_LOOKUP, r->res_length, ls 334 fs/dlm/rcom.c send_rcom(ls, mh, rc); ls 339 fs/dlm/rcom.c static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in) ls 346 fs/dlm/rcom.c error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh); ls 352 fs/dlm/rcom.c log_error(ls, "receive_rcom_lookup dump from %d", nodeid); ls 353 fs/dlm/rcom.c dlm_dump_rsb_name(ls, rc_in->rc_buf, len); ls 357 fs/dlm/rcom.c error = dlm_master_lookup(ls, nodeid, rc_in->rc_buf, len, ls 365 fs/dlm/rcom.c send_rcom(ls, mh, rc); ls 368 fs/dlm/rcom.c static void receive_rcom_lookup_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) ls 370 fs/dlm/rcom.c dlm_recover_master_reply(ls, rc_in); ls 405 fs/dlm/rcom.c struct dlm_ls *ls = r->res_ls; ls 412 fs/dlm/rcom.c len += ls->ls_lvblen; ls 414 fs/dlm/rcom.c error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh); ls 422 fs/dlm/rcom.c send_rcom(ls, mh, rc); ls 428 fs/dlm/rcom.c static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in) ls 434 fs/dlm/rcom.c dlm_recover_master_copy(ls, rc_in); ls 436 fs/dlm/rcom.c error = create_rcom(ls, nodeid, DLM_RCOM_LOCK_REPLY, ls 448 fs/dlm/rcom.c send_rcom(ls, mh, rc); ls 534 fs/dlm/rcom.c void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) ls 568 fs/dlm/rcom.c spin_lock(&ls->ls_recover_lock); ls 569 fs/dlm/rcom.c status = ls->ls_recover_status; ls 570 fs/dlm/rcom.c stop = test_bit(LSFL_RECOVER_STOP, &ls->ls_flags); ls 571 fs/dlm/rcom.c seq = ls->ls_recover_seq; ls 572 fs/dlm/rcom.c spin_unlock(&ls->ls_recover_lock); ls 588 fs/dlm/rcom.c receive_rcom_status(ls, rc); ls 592 fs/dlm/rcom.c receive_rcom_names(ls, rc); ls 596 fs/dlm/rcom.c receive_rcom_lookup(ls, rc); ls 602 fs/dlm/rcom.c receive_rcom_lock(ls, rc); ls 606 fs/dlm/rcom.c receive_sync_reply(ls, rc); ls 610 fs/dlm/rcom.c receive_sync_reply(ls, rc); ls 614 fs/dlm/rcom.c receive_rcom_lookup_reply(ls, rc); ls 620 fs/dlm/rcom.c dlm_recover_process_copy(ls, rc); ls 624 fs/dlm/rcom.c log_error(ls, "receive_rcom bad type %d", rc->rc_type); ls 629 fs/dlm/rcom.c log_limit(ls, "dlm_receive_rcom ignore msg %d " ls 636 fs/dlm/rcom.c status, ls->ls_generation); ls 639 fs/dlm/rcom.c log_error(ls, "recovery message %d from %d is too short", ls 15 fs/dlm/rcom.h int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags); ls 16 fs/dlm/rcom.h int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len); ls 19 fs/dlm/rcom.h void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid); ls 42 fs/dlm/recover.c int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) ls 48 fs/dlm/recover.c rv = wait_event_timeout(ls->ls_wait_general, ls 49 fs/dlm/recover.c testfn(ls) || dlm_recovery_stopped(ls), ls 53 fs/dlm/recover.c if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) { ls 54 fs/dlm/recover.c log_debug(ls, "dlm_wait_function timed out"); ls 59 fs/dlm/recover.c if (dlm_recovery_stopped(ls)) { ls 60 fs/dlm/recover.c log_debug(ls, "dlm_wait_function aborted"); ls 74 fs/dlm/recover.c uint32_t dlm_recover_status(struct dlm_ls *ls) ls 77 fs/dlm/recover.c spin_lock(&ls->ls_recover_lock); ls 78 fs/dlm/recover.c status = ls->ls_recover_status; ls 79 fs/dlm/recover.c spin_unlock(&ls->ls_recover_lock); ls 83 fs/dlm/recover.c static void _set_recover_status(struct dlm_ls *ls, uint32_t status) ls 85 fs/dlm/recover.c ls->ls_recover_status |= status; ls 88 fs/dlm/recover.c void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) ls 90 fs/dlm/recover.c spin_lock(&ls->ls_recover_lock); ls 91 fs/dlm/recover.c _set_recover_status(ls, status); ls 92 fs/dlm/recover.c spin_unlock(&ls->ls_recover_lock); ls 95 fs/dlm/recover.c static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, ls 98 fs/dlm/recover.c struct dlm_rcom *rc = ls->ls_recover_buf; ls 102 fs/dlm/recover.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 105 fs/dlm/recover.c if (dlm_recovery_stopped(ls)) { ls 110 fs/dlm/recover.c error = dlm_rcom_status(ls, memb->nodeid, 0); ls 115 fs/dlm/recover.c dlm_slot_save(ls, rc, memb); ls 128 fs/dlm/recover.c static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, ls 131 fs/dlm/recover.c struct dlm_rcom *rc = ls->ls_recover_buf; ls 132 fs/dlm/recover.c int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; ls 135 fs/dlm/recover.c if (dlm_recovery_stopped(ls)) { ls 140 fs/dlm/recover.c error = dlm_rcom_status(ls, nodeid, status_flags); ls 154 fs/dlm/recover.c static int wait_status(struct dlm_ls *ls, uint32_t status) ls 159 fs/dlm/recover.c if (ls->ls_low_nodeid == dlm_our_nodeid()) { ls 160 fs/dlm/recover.c error = wait_status_all(ls, status, 0); ls 162 fs/dlm/recover.c dlm_set_recover_status(ls, status_all); ls 164 fs/dlm/recover.c error = wait_status_low(ls, status_all, 0); ls 169 fs/dlm/recover.c int dlm_recover_members_wait(struct dlm_ls *ls) ls 177 fs/dlm/recover.c list_for_each_entry(memb, &ls->ls_nodes, list) { ls 182 fs/dlm/recover.c if (ls->ls_low_nodeid == dlm_our_nodeid()) { ls 183 fs/dlm/recover.c error = wait_status_all(ls, DLM_RS_NODES, 1); ls 189 fs/dlm/recover.c rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); ls 191 fs/dlm/recover.c spin_lock(&ls->ls_recover_lock); ls 192 fs/dlm/recover.c _set_recover_status(ls, DLM_RS_NODES_ALL); ls 193 fs/dlm/recover.c ls->ls_num_slots = num_slots; ls 194 fs/dlm/recover.c ls->ls_slots_size = slots_size; ls 195 fs/dlm/recover.c ls->ls_slots = slots; ls 196 fs/dlm/recover.c ls->ls_generation = gen; ls 197 fs/dlm/recover.c spin_unlock(&ls->ls_recover_lock); ls 199 fs/dlm/recover.c dlm_set_recover_status(ls, DLM_RS_NODES_ALL); ls 202 fs/dlm/recover.c error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS); ls 206 fs/dlm/recover.c dlm_slots_copy_in(ls); ls 212 fs/dlm/recover.c int dlm_recover_directory_wait(struct dlm_ls *ls) ls 214 fs/dlm/recover.c return wait_status(ls, DLM_RS_DIR); ls 217 fs/dlm/recover.c int dlm_recover_locks_wait(struct dlm_ls *ls) ls 219 fs/dlm/recover.c return wait_status(ls, DLM_RS_LOCKS); ls 222 fs/dlm/recover.c int dlm_recover_done_wait(struct dlm_ls *ls) ls 224 fs/dlm/recover.c return wait_status(ls, DLM_RS_DONE); ls 239 fs/dlm/recover.c static int recover_list_empty(struct dlm_ls *ls) ls 243 fs/dlm/recover.c spin_lock(&ls->ls_recover_list_lock); ls 244 fs/dlm/recover.c empty = list_empty(&ls->ls_recover_list); ls 245 fs/dlm/recover.c spin_unlock(&ls->ls_recover_list_lock); ls 252 fs/dlm/recover.c struct dlm_ls *ls = r->res_ls; ls 254 fs/dlm/recover.c spin_lock(&ls->ls_recover_list_lock); ls 256 fs/dlm/recover.c list_add_tail(&r->res_recover_list, &ls->ls_recover_list); ls 257 fs/dlm/recover.c ls->ls_recover_list_count++; ls 260 fs/dlm/recover.c spin_unlock(&ls->ls_recover_list_lock); ls 265 fs/dlm/recover.c struct dlm_ls *ls = r->res_ls; ls 267 fs/dlm/recover.c spin_lock(&ls->ls_recover_list_lock); ls 269 fs/dlm/recover.c ls->ls_recover_list_count--; ls 270 fs/dlm/recover.c spin_unlock(&ls->ls_recover_list_lock); ls 275 fs/dlm/recover.c static void recover_list_clear(struct dlm_ls *ls) ls 279 fs/dlm/recover.c spin_lock(&ls->ls_recover_list_lock); ls 280 fs/dlm/recover.c list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { ls 284 fs/dlm/recover.c ls->ls_recover_list_count--; ls 287 fs/dlm/recover.c if (ls->ls_recover_list_count != 0) { ls 288 fs/dlm/recover.c log_error(ls, "warning: recover_list_count %d", ls 289 fs/dlm/recover.c ls->ls_recover_list_count); ls 290 fs/dlm/recover.c ls->ls_recover_list_count = 0; ls 292 fs/dlm/recover.c spin_unlock(&ls->ls_recover_list_lock); ls 295 fs/dlm/recover.c static int recover_idr_empty(struct dlm_ls *ls) ls 299 fs/dlm/recover.c spin_lock(&ls->ls_recover_idr_lock); ls 300 fs/dlm/recover.c if (ls->ls_recover_list_count) ls 302 fs/dlm/recover.c spin_unlock(&ls->ls_recover_idr_lock); ls 309 fs/dlm/recover.c struct dlm_ls *ls = r->res_ls; ls 313 fs/dlm/recover.c spin_lock(&ls->ls_recover_idr_lock); ls 318 fs/dlm/recover.c rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT); ls 323 fs/dlm/recover.c ls->ls_recover_list_count++; ls 327 fs/dlm/recover.c spin_unlock(&ls->ls_recover_idr_lock); ls 334 fs/dlm/recover.c struct dlm_ls *ls = r->res_ls; ls 336 fs/dlm/recover.c spin_lock(&ls->ls_recover_idr_lock); ls 337 fs/dlm/recover.c idr_remove(&ls->ls_recover_idr, r->res_id); ls 339 fs/dlm/recover.c ls->ls_recover_list_count--; ls 340 fs/dlm/recover.c spin_unlock(&ls->ls_recover_idr_lock); ls 345 fs/dlm/recover.c static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id) ls 349 fs/dlm/recover.c spin_lock(&ls->ls_recover_idr_lock); ls 350 fs/dlm/recover.c r = idr_find(&ls->ls_recover_idr, (int)id); ls 351 fs/dlm/recover.c spin_unlock(&ls->ls_recover_idr_lock); ls 355 fs/dlm/recover.c static void recover_idr_clear(struct dlm_ls *ls) ls 360 fs/dlm/recover.c spin_lock(&ls->ls_recover_idr_lock); ls 362 fs/dlm/recover.c idr_for_each_entry(&ls->ls_recover_idr, r, id) { ls 363 fs/dlm/recover.c idr_remove(&ls->ls_recover_idr, id); ls 366 fs/dlm/recover.c ls->ls_recover_list_count--; ls 371 fs/dlm/recover.c if (ls->ls_recover_list_count != 0) { ls 372 fs/dlm/recover.c log_error(ls, "warning: recover_list_count %d", ls 373 fs/dlm/recover.c ls->ls_recover_list_count); ls 374 fs/dlm/recover.c ls->ls_recover_list_count = 0; ls 376 fs/dlm/recover.c spin_unlock(&ls->ls_recover_idr_lock); ls 446 fs/dlm/recover.c struct dlm_ls *ls = r->res_ls; ls 454 fs/dlm/recover.c is_removed = dlm_is_removed(ls, r->res_nodeid); ls 523 fs/dlm/recover.c int dlm_recover_masters(struct dlm_ls *ls) ls 528 fs/dlm/recover.c int nodir = dlm_no_directory(ls); ls 531 fs/dlm/recover.c log_rinfo(ls, "dlm_recover_masters"); ls 533 fs/dlm/recover.c down_read(&ls->ls_root_sem); ls 534 fs/dlm/recover.c list_for_each_entry(r, &ls->ls_root_list, res_root_list) { ls 535 fs/dlm/recover.c if (dlm_recovery_stopped(ls)) { ls 536 fs/dlm/recover.c up_read(&ls->ls_root_sem); ls 551 fs/dlm/recover.c up_read(&ls->ls_root_sem); ls 555 fs/dlm/recover.c up_read(&ls->ls_root_sem); ls 557 fs/dlm/recover.c log_rinfo(ls, "dlm_recover_masters %u of %u", count, total); ls 559 fs/dlm/recover.c error = dlm_wait_function(ls, &recover_idr_empty); ls 562 fs/dlm/recover.c recover_idr_clear(ls); ls 566 fs/dlm/recover.c int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) ls 571 fs/dlm/recover.c r = recover_idr_find(ls, rc->rc_id); ls 573 fs/dlm/recover.c log_error(ls, "dlm_recover_master_reply no id %llx", ls 592 fs/dlm/recover.c if (recover_idr_empty(ls)) ls 593 fs/dlm/recover.c wake_up(&ls->ls_wait_general); ls 659 fs/dlm/recover.c int dlm_recover_locks(struct dlm_ls *ls) ls 664 fs/dlm/recover.c down_read(&ls->ls_root_sem); ls 665 fs/dlm/recover.c list_for_each_entry(r, &ls->ls_root_list, res_root_list) { ls 674 fs/dlm/recover.c if (dlm_recovery_stopped(ls)) { ls 676 fs/dlm/recover.c up_read(&ls->ls_root_sem); ls 682 fs/dlm/recover.c up_read(&ls->ls_root_sem); ls 688 fs/dlm/recover.c up_read(&ls->ls_root_sem); ls 690 fs/dlm/recover.c log_rinfo(ls, "dlm_recover_locks %d out", count); ls 692 fs/dlm/recover.c error = dlm_wait_function(ls, &recover_list_empty); ls 695 fs/dlm/recover.c recover_list_clear(ls); ls 821 fs/dlm/recover.c struct dlm_ls *ls = r->res_ls; ls 837 fs/dlm/recover.c log_debug(ls, "recover_conversion %x set gr to rq %d", ls 841 fs/dlm/recover.c log_debug(ls, "recover_conversion %x set gr %d", ls 858 fs/dlm/recover.c void dlm_recover_rsbs(struct dlm_ls *ls) ls 863 fs/dlm/recover.c down_read(&ls->ls_root_sem); ls 864 fs/dlm/recover.c list_for_each_entry(r, &ls->ls_root_list, res_root_list) { ls 885 fs/dlm/recover.c up_read(&ls->ls_root_sem); ls 888 fs/dlm/recover.c log_rinfo(ls, "dlm_recover_rsbs %d done", count); ls 893 fs/dlm/recover.c int dlm_create_root_list(struct dlm_ls *ls) ls 899 fs/dlm/recover.c down_write(&ls->ls_root_sem); ls 900 fs/dlm/recover.c if (!list_empty(&ls->ls_root_list)) { ls 901 fs/dlm/recover.c log_error(ls, "root list not empty"); ls 906 fs/dlm/recover.c for (i = 0; i < ls->ls_rsbtbl_size; i++) { ls 907 fs/dlm/recover.c spin_lock(&ls->ls_rsbtbl[i].lock); ls 908 fs/dlm/recover.c for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { ls 910 fs/dlm/recover.c list_add(&r->res_root_list, &ls->ls_root_list); ls 914 fs/dlm/recover.c if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) ls 915 fs/dlm/recover.c log_error(ls, "dlm_create_root_list toss not empty"); ls 916 fs/dlm/recover.c spin_unlock(&ls->ls_rsbtbl[i].lock); ls 919 fs/dlm/recover.c up_write(&ls->ls_root_sem); ls 923 fs/dlm/recover.c void dlm_release_root_list(struct dlm_ls *ls) ls 927 fs/dlm/recover.c down_write(&ls->ls_root_sem); ls 928 fs/dlm/recover.c list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { ls 932 fs/dlm/recover.c up_write(&ls->ls_root_sem); ls 935 fs/dlm/recover.c void dlm_clear_toss(struct dlm_ls *ls) ls 942 fs/dlm/recover.c for (i = 0; i < ls->ls_rsbtbl_size; i++) { ls 943 fs/dlm/recover.c spin_lock(&ls->ls_rsbtbl[i].lock); ls 944 fs/dlm/recover.c for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { ls 947 fs/dlm/recover.c rb_erase(n, &ls->ls_rsbtbl[i].toss); ls 951 fs/dlm/recover.c spin_unlock(&ls->ls_rsbtbl[i].lock); ls 955 fs/dlm/recover.c log_rinfo(ls, "dlm_clear_toss %u done", count); ls 15 fs/dlm/recover.h int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)); ls 16 fs/dlm/recover.h uint32_t dlm_recover_status(struct dlm_ls *ls); ls 17 fs/dlm/recover.h void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status); ls 18 fs/dlm/recover.h int dlm_recover_members_wait(struct dlm_ls *ls); ls 19 fs/dlm/recover.h int dlm_recover_directory_wait(struct dlm_ls *ls); ls 20 fs/dlm/recover.h int dlm_recover_locks_wait(struct dlm_ls *ls); ls 21 fs/dlm/recover.h int dlm_recover_done_wait(struct dlm_ls *ls); ls 22 fs/dlm/recover.h int dlm_recover_masters(struct dlm_ls *ls); ls 23 fs/dlm/recover.h int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc); ls 24 fs/dlm/recover.h int dlm_recover_locks(struct dlm_ls *ls); ls 26 fs/dlm/recover.h int dlm_create_root_list(struct dlm_ls *ls); ls 27 fs/dlm/recover.h void dlm_release_root_list(struct dlm_ls *ls); ls 28 fs/dlm/recover.h void dlm_clear_toss(struct dlm_ls *ls); ls 29 fs/dlm/recover.h void dlm_recover_rsbs(struct dlm_ls *ls); ls 31 fs/dlm/recoverd.c static int enable_locking(struct dlm_ls *ls, uint64_t seq) ls 35 fs/dlm/recoverd.c down_write(&ls->ls_recv_active); ls 37 fs/dlm/recoverd.c spin_lock(&ls->ls_recover_lock); ls 38 fs/dlm/recoverd.c if (ls->ls_recover_seq == seq) { ls 39 fs/dlm/recoverd.c set_bit(LSFL_RUNNING, &ls->ls_flags); ls 41 fs/dlm/recoverd.c up_write(&ls->ls_in_recovery); ls 42 fs/dlm/recoverd.c clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); ls 45 fs/dlm/recoverd.c spin_unlock(&ls->ls_recover_lock); ls 47 fs/dlm/recoverd.c up_write(&ls->ls_recv_active); ls 51 fs/dlm/recoverd.c static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) ls 56 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover %llu", (unsigned long long)rv->seq); ls 58 fs/dlm/recoverd.c mutex_lock(&ls->ls_recoverd_active); ls 60 fs/dlm/recoverd.c dlm_callback_suspend(ls); ls 62 fs/dlm/recoverd.c dlm_clear_toss(ls); ls 69 fs/dlm/recoverd.c dlm_create_root_list(ls); ls 75 fs/dlm/recoverd.c error = dlm_recover_members(ls, rv, &neg); ls 77 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_members error %d", error); ls 81 fs/dlm/recoverd.c dlm_recover_dir_nodeid(ls); ls 83 fs/dlm/recoverd.c ls->ls_recover_dir_sent_res = 0; ls 84 fs/dlm/recoverd.c ls->ls_recover_dir_sent_msg = 0; ls 85 fs/dlm/recoverd.c ls->ls_recover_locks_in = 0; ls 87 fs/dlm/recoverd.c dlm_set_recover_status(ls, DLM_RS_NODES); ls 89 fs/dlm/recoverd.c error = dlm_recover_members_wait(ls); ls 91 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_members_wait error %d", error); ls 102 fs/dlm/recoverd.c error = dlm_recover_directory(ls); ls 104 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_directory error %d", error); ls 108 fs/dlm/recoverd.c dlm_set_recover_status(ls, DLM_RS_DIR); ls 110 fs/dlm/recoverd.c error = dlm_recover_directory_wait(ls); ls 112 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_directory_wait error %d", error); ls 116 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_directory %u out %u messages", ls 117 fs/dlm/recoverd.c ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg); ls 125 fs/dlm/recoverd.c dlm_recover_waiters_pre(ls); ls 127 fs/dlm/recoverd.c error = dlm_recovery_stopped(ls); ls 131 fs/dlm/recoverd.c if (neg || dlm_no_directory(ls)) { ls 136 fs/dlm/recoverd.c dlm_recover_purge(ls); ls 143 fs/dlm/recoverd.c error = dlm_recover_masters(ls); ls 145 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_masters error %d", error); ls 153 fs/dlm/recoverd.c error = dlm_recover_locks(ls); ls 155 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_locks error %d", error); ls 159 fs/dlm/recoverd.c dlm_set_recover_status(ls, DLM_RS_LOCKS); ls 161 fs/dlm/recoverd.c error = dlm_recover_locks_wait(ls); ls 163 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_locks_wait error %d", error); ls 167 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_locks %u in", ls 168 fs/dlm/recoverd.c ls->ls_recover_locks_in); ls 176 fs/dlm/recoverd.c dlm_recover_rsbs(ls); ls 183 fs/dlm/recoverd.c dlm_set_recover_status(ls, DLM_RS_LOCKS); ls 185 fs/dlm/recoverd.c error = dlm_recover_locks_wait(ls); ls 187 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_locks_wait error %d", error); ls 192 fs/dlm/recoverd.c dlm_release_root_list(ls); ls 200 fs/dlm/recoverd.c dlm_purge_requestqueue(ls); ls 202 fs/dlm/recoverd.c dlm_set_recover_status(ls, DLM_RS_DONE); ls 204 fs/dlm/recoverd.c error = dlm_recover_done_wait(ls); ls 206 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_done_wait error %d", error); ls 210 fs/dlm/recoverd.c dlm_clear_members_gone(ls); ls 212 fs/dlm/recoverd.c dlm_adjust_timeouts(ls); ls 214 fs/dlm/recoverd.c dlm_callback_resume(ls); ls 216 fs/dlm/recoverd.c error = enable_locking(ls, rv->seq); ls 218 fs/dlm/recoverd.c log_rinfo(ls, "enable_locking error %d", error); ls 222 fs/dlm/recoverd.c error = dlm_process_requestqueue(ls); ls 224 fs/dlm/recoverd.c log_rinfo(ls, "dlm_process_requestqueue error %d", error); ls 228 fs/dlm/recoverd.c error = dlm_recover_waiters_post(ls); ls 230 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover_waiters_post error %d", error); ls 234 fs/dlm/recoverd.c dlm_recover_grant(ls); ls 236 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover %llu generation %u done: %u ms", ls 237 fs/dlm/recoverd.c (unsigned long long)rv->seq, ls->ls_generation, ls 239 fs/dlm/recoverd.c mutex_unlock(&ls->ls_recoverd_active); ls 241 fs/dlm/recoverd.c dlm_lsop_recover_done(ls); ls 245 fs/dlm/recoverd.c dlm_release_root_list(ls); ls 246 fs/dlm/recoverd.c log_rinfo(ls, "dlm_recover %llu error %d", ls 248 fs/dlm/recoverd.c mutex_unlock(&ls->ls_recoverd_active); ls 256 fs/dlm/recoverd.c static void do_ls_recovery(struct dlm_ls *ls) ls 260 fs/dlm/recoverd.c spin_lock(&ls->ls_recover_lock); ls 261 fs/dlm/recoverd.c rv = ls->ls_recover_args; ls 262 fs/dlm/recoverd.c ls->ls_recover_args = NULL; ls 263 fs/dlm/recoverd.c if (rv && ls->ls_recover_seq == rv->seq) ls 264 fs/dlm/recoverd.c clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags); ls 265 fs/dlm/recoverd.c spin_unlock(&ls->ls_recover_lock); ls 268 fs/dlm/recoverd.c ls_recover(ls, rv); ls 276 fs/dlm/recoverd.c struct dlm_ls *ls; ls 278 fs/dlm/recoverd.c ls = dlm_find_lockspace_local(arg); ls 279 fs/dlm/recoverd.c if (!ls) { ls 284 fs/dlm/recoverd.c down_write(&ls->ls_in_recovery); ls 285 fs/dlm/recoverd.c set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); ls 286 fs/dlm/recoverd.c wake_up(&ls->ls_recover_lock_wait); ls 299 fs/dlm/recoverd.c if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) && ls 300 fs/dlm/recoverd.c !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { ls 307 fs/dlm/recoverd.c if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { ls 308 fs/dlm/recoverd.c down_write(&ls->ls_in_recovery); ls 309 fs/dlm/recoverd.c set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); ls 310 fs/dlm/recoverd.c wake_up(&ls->ls_recover_lock_wait); ls 313 fs/dlm/recoverd.c if (test_and_clear_bit(LSFL_RECOVER_WORK, &ls->ls_flags)) ls 314 fs/dlm/recoverd.c do_ls_recovery(ls); ls 317 fs/dlm/recoverd.c if (test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)) ls 318 fs/dlm/recoverd.c up_write(&ls->ls_in_recovery); ls 320 fs/dlm/recoverd.c dlm_put_lockspace(ls); ls 324 fs/dlm/recoverd.c int dlm_recoverd_start(struct dlm_ls *ls) ls 329 fs/dlm/recoverd.c p = kthread_run(dlm_recoverd, ls, "dlm_recoverd"); ls 333 fs/dlm/recoverd.c ls->ls_recoverd_task = p; ls 337 fs/dlm/recoverd.c void dlm_recoverd_stop(struct dlm_ls *ls) ls 339 fs/dlm/recoverd.c kthread_stop(ls->ls_recoverd_task); ls 342 fs/dlm/recoverd.c void dlm_recoverd_suspend(struct dlm_ls *ls) ls 344 fs/dlm/recoverd.c wake_up(&ls->ls_wait_general); ls 345 fs/dlm/recoverd.c mutex_lock(&ls->ls_recoverd_active); ls 348 fs/dlm/recoverd.c void dlm_recoverd_resume(struct dlm_ls *ls) ls 350 fs/dlm/recoverd.c mutex_unlock(&ls->ls_recoverd_active); ls 15 fs/dlm/recoverd.h void dlm_recoverd_stop(struct dlm_ls *ls); ls 16 fs/dlm/recoverd.h int dlm_recoverd_start(struct dlm_ls *ls); ls 17 fs/dlm/recoverd.h void dlm_recoverd_suspend(struct dlm_ls *ls); ls 18 fs/dlm/recoverd.h void dlm_recoverd_resume(struct dlm_ls *ls); ls 32 fs/dlm/requestqueue.c void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) ls 43 fs/dlm/requestqueue.c e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF; ls 47 fs/dlm/requestqueue.c mutex_lock(&ls->ls_requestqueue_mutex); ls 48 fs/dlm/requestqueue.c list_add_tail(&e->list, &ls->ls_requestqueue); ls 49 fs/dlm/requestqueue.c mutex_unlock(&ls->ls_requestqueue_mutex); ls 63 fs/dlm/requestqueue.c int dlm_process_requestqueue(struct dlm_ls *ls) ls 69 fs/dlm/requestqueue.c mutex_lock(&ls->ls_requestqueue_mutex); ls 72 fs/dlm/requestqueue.c if (list_empty(&ls->ls_requestqueue)) { ls 73 fs/dlm/requestqueue.c mutex_unlock(&ls->ls_requestqueue_mutex); ls 77 fs/dlm/requestqueue.c e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); ls 78 fs/dlm/requestqueue.c mutex_unlock(&ls->ls_requestqueue_mutex); ls 82 fs/dlm/requestqueue.c log_limit(ls, "dlm_process_requestqueue msg %d from %d " ls 88 fs/dlm/requestqueue.c dlm_receive_message_saved(ls, &e->request, e->recover_seq); ls 90 fs/dlm/requestqueue.c mutex_lock(&ls->ls_requestqueue_mutex); ls 94 fs/dlm/requestqueue.c if (dlm_locking_stopped(ls)) { ls 95 fs/dlm/requestqueue.c log_debug(ls, "process_requestqueue abort running"); ls 96 fs/dlm/requestqueue.c mutex_unlock(&ls->ls_requestqueue_mutex); ls 116 fs/dlm/requestqueue.c void dlm_wait_requestqueue(struct dlm_ls *ls) ls 119 fs/dlm/requestqueue.c mutex_lock(&ls->ls_requestqueue_mutex); ls 120 fs/dlm/requestqueue.c if (list_empty(&ls->ls_requestqueue)) ls 122 fs/dlm/requestqueue.c mutex_unlock(&ls->ls_requestqueue_mutex); ls 125 fs/dlm/requestqueue.c mutex_unlock(&ls->ls_requestqueue_mutex); ls 128 fs/dlm/requestqueue.c static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) ls 133 fs/dlm/requestqueue.c if (!ls->ls_count) ls 136 fs/dlm/requestqueue.c if (dlm_is_removed(ls, nodeid)) ls 147 fs/dlm/requestqueue.c if (!dlm_no_directory(ls)) ls 153 fs/dlm/requestqueue.c void dlm_purge_requestqueue(struct dlm_ls *ls) ls 158 fs/dlm/requestqueue.c mutex_lock(&ls->ls_requestqueue_mutex); ls 159 fs/dlm/requestqueue.c list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { ls 162 fs/dlm/requestqueue.c if (purge_request(ls, ms, e->nodeid)) { ls 167 fs/dlm/requestqueue.c mutex_unlock(&ls->ls_requestqueue_mutex); ls 14 fs/dlm/requestqueue.h void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms); ls 15 fs/dlm/requestqueue.h int dlm_process_requestqueue(struct dlm_ls *ls); ls 16 fs/dlm/requestqueue.h void dlm_wait_requestqueue(struct dlm_ls *ls); ls 17 fs/dlm/requestqueue.h void dlm_purge_requestqueue(struct dlm_ls *ls); ls 178 fs/dlm/user.c struct dlm_ls *ls; ls 186 fs/dlm/user.c ls = lkb->lkb_resource->res_ls; ls 187 fs/dlm/user.c mutex_lock(&ls->ls_clear_proc_locks); ls 233 fs/dlm/user.c mutex_unlock(&ls->ls_clear_proc_locks); ls 239 fs/dlm/user.c struct dlm_ls *ls; ls 244 fs/dlm/user.c ls = dlm_find_lockspace_local(proc->lockspace); ls 245 fs/dlm/user.c if (!ls) ls 265 fs/dlm/user.c error = dlm_user_convert(ls, ua, ls 270 fs/dlm/user.c error = dlm_user_adopt_orphan(ls, ua, ls 278 fs/dlm/user.c error = dlm_user_request(ls, ua, ls 286 fs/dlm/user.c dlm_put_lockspace(ls); ls 293 fs/dlm/user.c struct dlm_ls *ls; ls 297 fs/dlm/user.c ls = dlm_find_lockspace_local(proc->lockspace); ls 298 fs/dlm/user.c if (!ls) ls 310 fs/dlm/user.c error = dlm_user_cancel(ls, ua, params->flags, params->lkid); ls 312 fs/dlm/user.c error = dlm_user_unlock(ls, ua, params->flags, params->lkid, ls 315 fs/dlm/user.c dlm_put_lockspace(ls); ls 322 fs/dlm/user.c struct dlm_ls *ls; ls 325 fs/dlm/user.c ls = dlm_find_lockspace_local(proc->lockspace); ls 326 fs/dlm/user.c if (!ls) ls 329 fs/dlm/user.c error = dlm_user_deadlock(ls, params->flags, params->lkid); ls 331 fs/dlm/user.c dlm_put_lockspace(ls); ls 335 fs/dlm/user.c static int dlm_device_register(struct dlm_ls *ls, char *name) ls 341 fs/dlm/user.c if (ls->ls_device.name) ls 346 fs/dlm/user.c ls->ls_device.name = kzalloc(len, GFP_NOFS); ls 347 fs/dlm/user.c if (!ls->ls_device.name) ls 350 fs/dlm/user.c snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix, ls 352 fs/dlm/user.c ls->ls_device.fops = &device_fops; ls 353 fs/dlm/user.c ls->ls_device.minor = MISC_DYNAMIC_MINOR; ls 355 fs/dlm/user.c error = misc_register(&ls->ls_device); ls 357 fs/dlm/user.c kfree(ls->ls_device.name); ls 361 fs/dlm/user.c ls->ls_device.name = NULL; ls 367 fs/dlm/user.c int dlm_device_deregister(struct dlm_ls *ls) ls 372 fs/dlm/user.c if (!ls->ls_device.name) ls 375 fs/dlm/user.c misc_deregister(&ls->ls_device); ls 376 fs/dlm/user.c kfree(ls->ls_device.name); ls 383 fs/dlm/user.c struct dlm_ls *ls; ls 386 fs/dlm/user.c ls = dlm_find_lockspace_local(proc->lockspace); ls 387 fs/dlm/user.c if (!ls) ls 390 fs/dlm/user.c error = dlm_user_purge(ls, proc, params->nodeid, params->pid); ls 392 fs/dlm/user.c dlm_put_lockspace(ls); ls 399 fs/dlm/user.c struct dlm_ls *ls; ls 411 fs/dlm/user.c ls = dlm_find_lockspace_local(lockspace); ls 412 fs/dlm/user.c if (!ls) ls 415 fs/dlm/user.c error = dlm_device_register(ls, params->name); ls 416 fs/dlm/user.c dlm_put_lockspace(ls); ls 421 fs/dlm/user.c error = ls->ls_device.minor; ls 429 fs/dlm/user.c struct dlm_ls *ls; ls 435 fs/dlm/user.c ls = dlm_find_lockspace_device(params->minor); ls 436 fs/dlm/user.c if (!ls) ls 442 fs/dlm/user.c lockspace = ls->ls_local_handle; ls 443 fs/dlm/user.c dlm_put_lockspace(ls); ls 633 fs/dlm/user.c struct dlm_ls *ls; ls 635 fs/dlm/user.c ls = dlm_find_lockspace_device(iminor(inode)); ls 636 fs/dlm/user.c if (!ls) ls 641 fs/dlm/user.c dlm_put_lockspace(ls); ls 645 fs/dlm/user.c proc->lockspace = ls->ls_local_handle; ls 660 fs/dlm/user.c struct dlm_ls *ls; ls 662 fs/dlm/user.c ls = dlm_find_lockspace_local(proc->lockspace); ls 663 fs/dlm/user.c if (!ls) ls 668 fs/dlm/user.c dlm_clear_proc_locks(ls, proc); ls 677 fs/dlm/user.c dlm_put_lockspace(ls); ls 678 fs/dlm/user.c dlm_put_lockspace(ls); /* for the find in device_open() */ ls 13 fs/dlm/user.h int dlm_device_deregister(struct dlm_ls *ls); ls 1188 fs/gfs2/file.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1206 fs/gfs2/file.c return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); ls 1208 fs/gfs2/file.c return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); ls 1210 fs/gfs2/file.c return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); ls 1495 fs/gfs2/glock.c struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; ls 1500 fs/gfs2/glock.c if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { ls 253 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; ls 275 fs/gfs2/lock_dlm.c return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, ls 282 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 307 fs/gfs2/lock_dlm.c error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, ls 319 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; ls 320 fs/gfs2/lock_dlm.c dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); ls 465 fs/gfs2/lock_dlm.c static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, ls 469 fs/gfs2/lock_dlm.c memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); ls 474 fs/gfs2/lock_dlm.c static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, ls 478 fs/gfs2/lock_dlm.c memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); ls 480 fs/gfs2/lock_dlm.c memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); ls 491 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = arg; ls 492 fs/gfs2/lock_dlm.c complete(&ls->ls_sync_wait); ls 497 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 500 fs/gfs2/lock_dlm.c error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); ls 507 fs/gfs2/lock_dlm.c wait_for_completion(&ls->ls_sync_wait); ls 520 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 527 fs/gfs2/lock_dlm.c error = dlm_lock(ls->ls_dlm, mode, lksb, flags, ls 529 fs/gfs2/lock_dlm.c 0, sync_wait_cb, ls, NULL); ls 536 fs/gfs2/lock_dlm.c wait_for_completion(&ls->ls_sync_wait); ls 550 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 551 fs/gfs2/lock_dlm.c return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); ls 556 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 558 fs/gfs2/lock_dlm.c &ls->ls_mounted_lksb, "mounted_lock"); ls 563 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 564 fs/gfs2/lock_dlm.c return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); ls 569 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 571 fs/gfs2/lock_dlm.c &ls->ls_control_lksb, "control_lock"); ls 577 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 584 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 594 fs/gfs2/lock_dlm.c if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || ls 595 fs/gfs2/lock_dlm.c test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { ls 596 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 599 fs/gfs2/lock_dlm.c block_gen = ls->ls_recover_block; ls 600 fs/gfs2/lock_dlm.c start_gen = ls->ls_recover_start; ls 601 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 631 fs/gfs2/lock_dlm.c control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); ls 633 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 634 fs/gfs2/lock_dlm.c if (block_gen != ls->ls_recover_block || ls 635 fs/gfs2/lock_dlm.c start_gen != ls->ls_recover_start) { ls 637 fs/gfs2/lock_dlm.c start_gen, block_gen, ls->ls_recover_block); ls 638 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 643 fs/gfs2/lock_dlm.c recover_size = ls->ls_recover_size; ls 656 fs/gfs2/lock_dlm.c if (ls->ls_recover_result[i] != LM_RD_SUCCESS) ls 659 fs/gfs2/lock_dlm.c ls->ls_recover_result[i] = 0; ls 661 fs/gfs2/lock_dlm.c if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) ls 664 fs/gfs2/lock_dlm.c __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); ls 674 fs/gfs2/lock_dlm.c if (!ls->ls_recover_submit[i]) ls 676 fs/gfs2/lock_dlm.c if (ls->ls_recover_submit[i] < lvb_gen) ls 677 fs/gfs2/lock_dlm.c ls->ls_recover_submit[i] = 0; ls 684 fs/gfs2/lock_dlm.c if (!ls->ls_recover_submit[i]) ls 686 fs/gfs2/lock_dlm.c if (ls->ls_recover_submit[i] < start_gen) { ls 687 fs/gfs2/lock_dlm.c ls->ls_recover_submit[i] = 0; ls 688 fs/gfs2/lock_dlm.c __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); ls 699 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 702 fs/gfs2/lock_dlm.c control_lvb_write(ls, start_gen, ls->ls_lvb_bits); ls 722 fs/gfs2/lock_dlm.c if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { ls 738 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 739 fs/gfs2/lock_dlm.c if (ls->ls_recover_block == block_gen && ls 740 fs/gfs2/lock_dlm.c ls->ls_recover_start == start_gen) { ls 741 fs/gfs2/lock_dlm.c clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); ls 742 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 747 fs/gfs2/lock_dlm.c start_gen, block_gen, ls->ls_recover_block); ls 748 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 754 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 760 fs/gfs2/lock_dlm.c memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); ls 761 fs/gfs2/lock_dlm.c memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); ls 762 fs/gfs2/lock_dlm.c memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); ls 763 fs/gfs2/lock_dlm.c ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; ls 764 fs/gfs2/lock_dlm.c init_completion(&ls->ls_sync_wait); ls 766 fs/gfs2/lock_dlm.c set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); ls 860 fs/gfs2/lock_dlm.c control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); ls 871 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 872 fs/gfs2/lock_dlm.c clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); ls 873 fs/gfs2/lock_dlm.c set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); ls 874 fs/gfs2/lock_dlm.c set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); ls 875 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 890 fs/gfs2/lock_dlm.c if (!all_jid_bits_clear(ls->ls_lvb_bits)) { ls 896 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 897 fs/gfs2/lock_dlm.c block_gen = ls->ls_recover_block; ls 898 fs/gfs2/lock_dlm.c start_gen = ls->ls_recover_start; ls 899 fs/gfs2/lock_dlm.c mount_gen = ls->ls_recover_mount; ls 912 fs/gfs2/lock_dlm.c ls->ls_recover_flags); ls 914 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 923 fs/gfs2/lock_dlm.c lvb_gen, ls->ls_recover_flags); ls 924 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 932 fs/gfs2/lock_dlm.c lvb_gen, ls->ls_recover_flags); ls 933 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 937 fs/gfs2/lock_dlm.c clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); ls 938 fs/gfs2/lock_dlm.c set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); ls 939 fs/gfs2/lock_dlm.c memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); ls 940 fs/gfs2/lock_dlm.c memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); ls 941 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 952 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 957 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 958 fs/gfs2/lock_dlm.c start_gen = ls->ls_recover_start; ls 959 fs/gfs2/lock_dlm.c block_gen = ls->ls_recover_block; ls 961 fs/gfs2/lock_dlm.c if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || ls 962 fs/gfs2/lock_dlm.c !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || ls 963 fs/gfs2/lock_dlm.c !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { ls 966 fs/gfs2/lock_dlm.c start_gen, block_gen, ls->ls_recover_flags); ls 967 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 980 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 983 fs/gfs2/lock_dlm.c wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, ls 988 fs/gfs2/lock_dlm.c clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); ls 989 fs/gfs2/lock_dlm.c set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); ls 990 fs/gfs2/lock_dlm.c memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); ls 991 fs/gfs2/lock_dlm.c memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); ls 992 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 994 fs/gfs2/lock_dlm.c memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); ls 995 fs/gfs2/lock_dlm.c control_lvb_write(ls, start_gen, ls->ls_lvb_bits); ls 1019 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1025 fs/gfs2/lock_dlm.c if (!ls->ls_lvb_bits) { ls 1026 fs/gfs2/lock_dlm.c ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); ls 1027 fs/gfs2/lock_dlm.c if (!ls->ls_lvb_bits) ls 1037 fs/gfs2/lock_dlm.c old_size = ls->ls_recover_size; ls 1052 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 1053 fs/gfs2/lock_dlm.c memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); ls 1054 fs/gfs2/lock_dlm.c memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); ls 1055 fs/gfs2/lock_dlm.c kfree(ls->ls_recover_submit); ls 1056 fs/gfs2/lock_dlm.c kfree(ls->ls_recover_result); ls 1057 fs/gfs2/lock_dlm.c ls->ls_recover_submit = submit; ls 1058 fs/gfs2/lock_dlm.c ls->ls_recover_result = result; ls 1059 fs/gfs2/lock_dlm.c ls->ls_recover_size = new_size; ls 1060 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1064 fs/gfs2/lock_dlm.c static void free_recover_size(struct lm_lockstruct *ls) ls 1066 fs/gfs2/lock_dlm.c kfree(ls->ls_lvb_bits); ls 1067 fs/gfs2/lock_dlm.c kfree(ls->ls_recover_submit); ls 1068 fs/gfs2/lock_dlm.c kfree(ls->ls_recover_result); ls 1069 fs/gfs2/lock_dlm.c ls->ls_recover_submit = NULL; ls 1070 fs/gfs2/lock_dlm.c ls->ls_recover_result = NULL; ls 1071 fs/gfs2/lock_dlm.c ls->ls_recover_size = 0; ls 1072 fs/gfs2/lock_dlm.c ls->ls_lvb_bits = NULL; ls 1080 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1082 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 1083 fs/gfs2/lock_dlm.c ls->ls_recover_block = ls->ls_recover_start; ls 1084 fs/gfs2/lock_dlm.c set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); ls 1086 fs/gfs2/lock_dlm.c if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || ls 1087 fs/gfs2/lock_dlm.c test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { ls 1088 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1091 fs/gfs2/lock_dlm.c set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); ls 1092 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1101 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1104 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 1105 fs/gfs2/lock_dlm.c if (ls->ls_recover_size < jid + 1) { ls 1107 fs/gfs2/lock_dlm.c jid, ls->ls_recover_block, ls->ls_recover_size); ls 1108 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1112 fs/gfs2/lock_dlm.c if (ls->ls_recover_submit[jid]) { ls 1114 fs/gfs2/lock_dlm.c jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); ls 1116 fs/gfs2/lock_dlm.c ls->ls_recover_submit[jid] = ls->ls_recover_block; ls 1117 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1126 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1131 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 1132 fs/gfs2/lock_dlm.c ls->ls_recover_start = generation; ls 1134 fs/gfs2/lock_dlm.c if (!ls->ls_recover_mount) { ls 1135 fs/gfs2/lock_dlm.c ls->ls_recover_mount = generation; ls 1136 fs/gfs2/lock_dlm.c ls->ls_jid = our_slot - 1; ls 1139 fs/gfs2/lock_dlm.c if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) ls 1142 fs/gfs2/lock_dlm.c clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); ls 1144 fs/gfs2/lock_dlm.c wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); ls 1145 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1153 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1155 fs/gfs2/lock_dlm.c if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) ls 1159 fs/gfs2/lock_dlm.c if (jid == ls->ls_jid) ls 1162 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 1163 fs/gfs2/lock_dlm.c if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { ls 1164 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1167 fs/gfs2/lock_dlm.c if (ls->ls_recover_size < jid + 1) { ls 1169 fs/gfs2/lock_dlm.c jid, ls->ls_recover_size); ls 1170 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1177 fs/gfs2/lock_dlm.c ls->ls_recover_result[jid] = result; ls 1183 fs/gfs2/lock_dlm.c if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) ls 1186 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1197 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1208 fs/gfs2/lock_dlm.c spin_lock_init(&ls->ls_recover_spin); ls 1209 fs/gfs2/lock_dlm.c ls->ls_recover_flags = 0; ls 1210 fs/gfs2/lock_dlm.c ls->ls_recover_mount = 0; ls 1211 fs/gfs2/lock_dlm.c ls->ls_recover_start = 0; ls 1212 fs/gfs2/lock_dlm.c ls->ls_recover_block = 0; ls 1213 fs/gfs2/lock_dlm.c ls->ls_recover_size = 0; ls 1214 fs/gfs2/lock_dlm.c ls->ls_recover_submit = NULL; ls 1215 fs/gfs2/lock_dlm.c ls->ls_recover_result = NULL; ls 1216 fs/gfs2/lock_dlm.c ls->ls_lvb_bits = NULL; ls 1244 fs/gfs2/lock_dlm.c &ls->ls_dlm); ls 1256 fs/gfs2/lock_dlm.c free_recover_size(ls); ls 1257 fs/gfs2/lock_dlm.c set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); ls 1278 fs/gfs2/lock_dlm.c ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); ls 1285 fs/gfs2/lock_dlm.c dlm_release_lockspace(ls->ls_dlm, 2); ls 1287 fs/gfs2/lock_dlm.c free_recover_size(ls); ls 1294 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1297 fs/gfs2/lock_dlm.c if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) ls 1307 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 1309 fs/gfs2/lock_dlm.c if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) ls 1314 fs/gfs2/lock_dlm.c spin_lock(&ls->ls_recover_spin); ls 1315 fs/gfs2/lock_dlm.c set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); ls 1316 fs/gfs2/lock_dlm.c spin_unlock(&ls->ls_recover_spin); ls 1321 fs/gfs2/lock_dlm.c if (ls->ls_dlm) { ls 1322 fs/gfs2/lock_dlm.c dlm_release_lockspace(ls->ls_dlm, 2); ls 1323 fs/gfs2/lock_dlm.c ls->ls_dlm = NULL; ls 1326 fs/gfs2/lock_dlm.c free_recover_size(ls); ls 936 fs/gfs2/ops_fstype.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 957 fs/gfs2/ops_fstype.c ls->ls_ops = lm; ls 958 fs/gfs2/ops_fstype.c ls->ls_first = 1; ls 974 fs/gfs2/ops_fstype.c ls->ls_jid = option; ls 984 fs/gfs2/ops_fstype.c ls->ls_first = option; ls 280 fs/gfs2/recovery.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 282 fs/gfs2/recovery.c ls->ls_recover_jid_done = jid; ls 283 fs/gfs2/recovery.c ls->ls_recover_jid_status = message; ls 327 fs/gfs2/sys.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 331 fs/gfs2/sys.c if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags)) ls 339 fs/gfs2/sys.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 347 fs/gfs2/sys.c set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); ls 349 fs/gfs2/sys.c clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); ls 383 fs/gfs2/sys.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 384 fs/gfs2/sys.c return sprintf(buf, "%d\n", ls->ls_first); ls 416 fs/gfs2/sys.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 417 fs/gfs2/sys.c return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags)); ls 472 fs/gfs2/sys.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 473 fs/gfs2/sys.c return sprintf(buf, "%d\n", ls->ls_recover_jid_done); ls 478 fs/gfs2/sys.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 479 fs/gfs2/sys.c return sprintf(buf, "%d\n", ls->ls_recover_jid_status); ls 38 fs/gfs2/util.c struct lm_lockstruct *ls = &sdp->sd_lockstruct; ls 39 fs/gfs2/util.c const struct lm_lockops *lm = ls->ls_ops; ls 405 fs/nfsd/blocklayout.c nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls) ls 407 fs/nfsd/blocklayout.c struct nfs4_client *clp = ls->ls_stid.sc_client; ls 408 fs/nfsd/blocklayout.c struct block_device *bdev = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_bdev; ls 550 fs/nfsd/nfs4callback.c const struct nfs4_layout_stateid *ls, ls 559 fs/nfsd/nfs4callback.c *p++ = cpu_to_be32(ls->ls_layout_type); ls 564 fs/nfsd/nfs4callback.c encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle); ls 570 fs/nfsd/nfs4callback.c encode_stateid4(xdr, &ls->ls_recall_sid); ls 580 fs/nfsd/nfs4callback.c const struct nfs4_layout_stateid *ls = ls 589 fs/nfsd/nfs4callback.c encode_cb_layout4args(xdr, ls, &hdr); ls 157 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls = layoutstateid(stid); ls 158 fs/nfsd/nfs4layouts.c struct nfs4_client *clp = ls->ls_stid.sc_client; ls 159 fs/nfsd/nfs4layouts.c struct nfs4_file *fp = ls->ls_stid.sc_file; ls 161 fs/nfsd/nfs4layouts.c trace_nfsd_layoutstate_free(&ls->ls_stid.sc_stateid); ls 164 fs/nfsd/nfs4layouts.c list_del_init(&ls->ls_perclnt); ls 168 fs/nfsd/nfs4layouts.c list_del_init(&ls->ls_perfile); ls 171 fs/nfsd/nfs4layouts.c if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls) ls 172 fs/nfsd/nfs4layouts.c vfs_setlease(ls->ls_file->nf_file, F_UNLCK, NULL, (void **)&ls); ls 173 fs/nfsd/nfs4layouts.c nfsd_file_put(ls->ls_file); ls 175 fs/nfsd/nfs4layouts.c if (ls->ls_recalled) ls 176 fs/nfsd/nfs4layouts.c atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls); ls 178 fs/nfsd/nfs4layouts.c kmem_cache_free(nfs4_layout_stateid_cache, ls); ls 182 fs/nfsd/nfs4layouts.c nfsd4_layout_setlease(struct nfs4_layout_stateid *ls) ls 187 fs/nfsd/nfs4layouts.c if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls) ls 198 fs/nfsd/nfs4layouts.c fl->fl_owner = ls; ls 200 fs/nfsd/nfs4layouts.c fl->fl_file = ls->ls_file->nf_file; ls 217 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls; ls 228 fs/nfsd/nfs4layouts.c ls = layoutstateid(stp); ls 229 fs/nfsd/nfs4layouts.c INIT_LIST_HEAD(&ls->ls_perclnt); ls 230 fs/nfsd/nfs4layouts.c INIT_LIST_HEAD(&ls->ls_perfile); ls 231 fs/nfsd/nfs4layouts.c spin_lock_init(&ls->ls_lock); ls 232 fs/nfsd/nfs4layouts.c INIT_LIST_HEAD(&ls->ls_layouts); ls 233 fs/nfsd/nfs4layouts.c mutex_init(&ls->ls_mutex); ls 234 fs/nfsd/nfs4layouts.c ls->ls_layout_type = layout_type; ls 235 fs/nfsd/nfs4layouts.c nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops, ls 239 fs/nfsd/nfs4layouts.c ls->ls_file = nfsd_file_get(fp->fi_deleg_file); ls 241 fs/nfsd/nfs4layouts.c ls->ls_file = find_any_file(fp); ls 242 fs/nfsd/nfs4layouts.c BUG_ON(!ls->ls_file); ls 244 fs/nfsd/nfs4layouts.c if (nfsd4_layout_setlease(ls)) { ls 245 fs/nfsd/nfs4layouts.c nfsd_file_put(ls->ls_file); ls 247 fs/nfsd/nfs4layouts.c kmem_cache_free(nfs4_layout_stateid_cache, ls); ls 253 fs/nfsd/nfs4layouts.c list_add(&ls->ls_perclnt, &clp->cl_lo_states); ls 257 fs/nfsd/nfs4layouts.c list_add(&ls->ls_perfile, &fp->fi_lo_states); ls 260 fs/nfsd/nfs4layouts.c trace_nfsd_layoutstate_alloc(&ls->ls_stid.sc_stateid); ls 261 fs/nfsd/nfs4layouts.c return ls; ls 269 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls; ls 289 fs/nfsd/nfs4layouts.c ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type); ls 293 fs/nfsd/nfs4layouts.c if (!ls) ls 295 fs/nfsd/nfs4layouts.c mutex_lock(&ls->ls_mutex); ls 297 fs/nfsd/nfs4layouts.c ls = container_of(stid, struct nfs4_layout_stateid, ls_stid); ls 300 fs/nfsd/nfs4layouts.c mutex_lock(&ls->ls_mutex); ls 303 fs/nfsd/nfs4layouts.c if (layout_type != ls->ls_layout_type) ls 307 fs/nfsd/nfs4layouts.c *lsp = ls; ls 311 fs/nfsd/nfs4layouts.c mutex_unlock(&ls->ls_mutex); ls 319 fs/nfsd/nfs4layouts.c nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls) ls 321 fs/nfsd/nfs4layouts.c spin_lock(&ls->ls_lock); ls 322 fs/nfsd/nfs4layouts.c if (ls->ls_recalled) ls 325 fs/nfsd/nfs4layouts.c ls->ls_recalled = true; ls 326 fs/nfsd/nfs4layouts.c atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls); ls 327 fs/nfsd/nfs4layouts.c if (list_empty(&ls->ls_layouts)) ls 330 fs/nfsd/nfs4layouts.c trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid); ls 332 fs/nfsd/nfs4layouts.c refcount_inc(&ls->ls_stid.sc_count); ls 333 fs/nfsd/nfs4layouts.c nfsd4_run_cb(&ls->ls_recall); ls 336 fs/nfsd/nfs4layouts.c spin_unlock(&ls->ls_lock); ls 383 fs/nfsd/nfs4layouts.c nfsd4_recall_conflict(struct nfs4_layout_stateid *ls) ls 385 fs/nfsd/nfs4layouts.c struct nfs4_file *fp = ls->ls_stid.sc_file; ls 392 fs/nfsd/nfs4layouts.c if (l != ls) { ls 402 fs/nfsd/nfs4layouts.c nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls) ls 405 fs/nfsd/nfs4layouts.c struct nfs4_file *fp = ls->ls_stid.sc_file; ls 410 fs/nfsd/nfs4layouts.c nfserr = nfsd4_recall_conflict(ls); ls 413 fs/nfsd/nfs4layouts.c spin_lock(&ls->ls_lock); ls 414 fs/nfsd/nfs4layouts.c list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) { ls 418 fs/nfsd/nfs4layouts.c spin_unlock(&ls->ls_lock); ls 425 fs/nfsd/nfs4layouts.c new->lo_state = ls; ls 428 fs/nfsd/nfs4layouts.c nfserr = nfsd4_recall_conflict(ls); ls 431 fs/nfsd/nfs4layouts.c spin_lock(&ls->ls_lock); ls 432 fs/nfsd/nfs4layouts.c list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) { ls 437 fs/nfsd/nfs4layouts.c refcount_inc(&ls->ls_stid.sc_count); ls 438 fs/nfsd/nfs4layouts.c list_add_tail(&new->lo_perstate, &ls->ls_layouts); ls 441 fs/nfsd/nfs4layouts.c nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid); ls 442 fs/nfsd/nfs4layouts.c spin_unlock(&ls->ls_lock); ls 493 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls; ls 501 fs/nfsd/nfs4layouts.c &ls); ls 507 fs/nfsd/nfs4layouts.c spin_lock(&ls->ls_lock); ls 508 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) { ls 514 fs/nfsd/nfs4layouts.c if (!list_empty(&ls->ls_layouts)) { ls 516 fs/nfsd/nfs4layouts.c nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid); ls 519 fs/nfsd/nfs4layouts.c trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid); ls 520 fs/nfsd/nfs4layouts.c nfs4_unhash_stid(&ls->ls_stid); ls 523 fs/nfsd/nfs4layouts.c spin_unlock(&ls->ls_lock); ls 525 fs/nfsd/nfs4layouts.c mutex_unlock(&ls->ls_mutex); ls 526 fs/nfsd/nfs4layouts.c nfs4_put_stid(&ls->ls_stid); ls 536 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls, *n; ls 544 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { ls 545 fs/nfsd/nfs4layouts.c if (ls->ls_layout_type != lrp->lr_layout_type) ls 549 fs/nfsd/nfs4layouts.c !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle, ls 553 fs/nfsd/nfs4layouts.c spin_lock(&ls->ls_lock); ls 554 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) { ls 559 fs/nfsd/nfs4layouts.c spin_unlock(&ls->ls_lock); ls 568 fs/nfsd/nfs4layouts.c nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls, ls 571 fs/nfsd/nfs4layouts.c spin_lock(&ls->ls_lock); ls 572 fs/nfsd/nfs4layouts.c list_splice_init(&ls->ls_layouts, reaplist); ls 573 fs/nfsd/nfs4layouts.c spin_unlock(&ls->ls_lock); ls 579 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls, *n; ls 583 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) ls 584 fs/nfsd/nfs4layouts.c nfsd4_return_all_layouts(ls, &reaplist); ls 593 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls, *n; ls 597 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) { ls 598 fs/nfsd/nfs4layouts.c if (ls->ls_stid.sc_client == clp) ls 599 fs/nfsd/nfs4layouts.c nfsd4_return_all_layouts(ls, &reaplist); ls 607 fs/nfsd/nfs4layouts.c nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) ls 609 fs/nfsd/nfs4layouts.c struct nfs4_client *clp = ls->ls_stid.sc_client; ls 629 fs/nfsd/nfs4layouts.c argv[2] = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_id; ls 643 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls = ls 646 fs/nfsd/nfs4layouts.c mutex_lock(&ls->ls_mutex); ls 647 fs/nfsd/nfs4layouts.c nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid); ls 648 fs/nfsd/nfs4layouts.c mutex_unlock(&ls->ls_mutex); ls 654 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls = ls 669 fs/nfsd/nfs4layouts.c if (list_empty(&ls->ls_layouts)) ls 674 fs/nfsd/nfs4layouts.c nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id); ls 689 fs/nfsd/nfs4layouts.c trace_nfsd_layout_recall_fail(&ls->ls_stid.sc_stateid); ls 691 fs/nfsd/nfs4layouts.c ops = nfsd4_layout_ops[ls->ls_layout_type]; ls 693 fs/nfsd/nfs4layouts.c ops->fence_client(ls); ls 695 fs/nfsd/nfs4layouts.c nfsd4_cb_layout_fail(ls); ls 698 fs/nfsd/nfs4layouts.c trace_nfsd_layout_recall_done(&ls->ls_stid.sc_stateid); ls 707 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *ls = ls 711 fs/nfsd/nfs4layouts.c trace_nfsd_layout_recall_release(&ls->ls_stid.sc_stateid); ls 713 fs/nfsd/nfs4layouts.c nfsd4_return_all_layouts(ls, &reaplist); ls 715 fs/nfsd/nfs4layouts.c nfs4_put_stid(&ls->ls_stid); ls 1604 fs/nfsd/nfs4proc.c struct nfs4_layout_stateid *ls; ls 1653 fs/nfsd/nfs4proc.c true, lgp->lg_layout_type, &ls); ls 1660 fs/nfsd/nfs4proc.c if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls)) ls 1668 fs/nfsd/nfs4proc.c nfserr = nfsd4_insert_layout(lgp, ls); ls 1671 fs/nfsd/nfs4proc.c mutex_unlock(&ls->ls_mutex); ls 1672 fs/nfsd/nfs4proc.c nfs4_put_stid(&ls->ls_stid); ls 1693 fs/nfsd/nfs4proc.c struct nfs4_layout_stateid *ls; ls 1722 fs/nfsd/nfs4proc.c &ls); ls 1732 fs/nfsd/nfs4proc.c mutex_unlock(&ls->ls_mutex); ls 1742 fs/nfsd/nfs4proc.c nfs4_put_stid(&ls->ls_stid); ls 2460 fs/nfsd/nfs4state.c struct nfs4_layout_stateid *ls; ls 2463 fs/nfsd/nfs4state.c ls = container_of(st, struct nfs4_layout_stateid, ls_stid); ls 2464 fs/nfsd/nfs4state.c file = ls->ls_file; ls 40 fs/nfsd/pnfs.h void (*fence_client)(struct nfs4_layout_stateid *ls); ls 58 fs/nfsd/pnfs.h struct nfs4_layout_stateid *ls); ls 1119 fs/xfs/libxfs/xfs_format.h #define XFS_SUMOFFS(mp,ls,bb) ((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb))) ls 2327 kernel/rcu/tree_plugin.h int ls = rcu_nocb_gp_stride; ls 2335 kernel/rcu/tree_plugin.h if (ls == -1) { ls 2336 kernel/rcu/tree_plugin.h ls = nr_cpu_ids / int_sqrt(nr_cpu_ids); ls 2337 kernel/rcu/tree_plugin.h rcu_nocb_gp_stride = ls; ls 2350 kernel/rcu/tree_plugin.h nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; ls 80 security/apparmor/include/label.h void aa_labelset_destroy(struct aa_labelset *ls); ls 81 security/apparmor/include/label.h void aa_labelset_init(struct aa_labelset *ls); ls 274 security/apparmor/include/label.h void aa_labelset_destroy(struct aa_labelset *ls); ls 275 security/apparmor/include/label.h void aa_labelset_init(struct aa_labelset *ls); ls 288 security/apparmor/include/label.h struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *l); ls 290 security/apparmor/include/label.h bool aa_label_make_newest(struct aa_labelset *ls, struct aa_label *old, ls 565 security/apparmor/label.c struct aa_labelset *ls = labels_set(label); ls 567 security/apparmor/label.c AA_BUG(!ls); ls 569 security/apparmor/label.c lockdep_assert_held_write(&ls->lock); ls 578 security/apparmor/label.c rb_erase(&label->node, &ls->root); ls 601 security/apparmor/label.c struct aa_labelset *ls = labels_set(old); ls 603 security/apparmor/label.c AA_BUG(!ls); ls 606 security/apparmor/label.c lockdep_assert_held_write(&ls->lock); ls 613 security/apparmor/label.c rb_replace_node(&old->node, &new->node, &ls->root); ls 635 security/apparmor/label.c static struct aa_label *__label_insert(struct aa_labelset *ls, ls 640 security/apparmor/label.c AA_BUG(!ls); ls 642 security/apparmor/label.c AA_BUG(labels_set(label) != ls); ls 643 security/apparmor/label.c lockdep_assert_held_write(&ls->lock); ls 647 security/apparmor/label.c new = &ls->root.rb_node; ls 674 security/apparmor/label.c rb_insert_color(&label->node, &ls->root); ls 744 security/apparmor/label.c struct aa_labelset *ls = labels_set(label); ls 748 security/apparmor/label.c AA_BUG(!ls); ls 750 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 752 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 780 security/apparmor/label.c struct aa_labelset *ls = labels_set(old); ls 782 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 785 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 786 security/apparmor/label.c ls = labels_set(new); ls 787 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 789 security/apparmor/label.c l = __label_insert(ls, new, true); ls 791 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 808 security/apparmor/label.c struct aa_labelset *ls; ls 816 security/apparmor/label.c ls = vec_labelset(vec, n); ls 817 security/apparmor/label.c read_lock_irqsave(&ls->lock, flags); ls 819 security/apparmor/label.c read_unlock_irqrestore(&ls->lock, flags); ls 829 security/apparmor/label.c struct aa_labelset *ls; ls 839 security/apparmor/label.c ls = labels_set(&vec[len - 1]->label); ls 851 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 852 security/apparmor/label.c label = __label_insert(ls, new, false); ls 853 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 898 security/apparmor/label.c struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *label) ls 903 security/apparmor/label.c AA_BUG(!ls); ls 908 security/apparmor/label.c read_lock_irqsave(&ls->lock, flags); ls 910 security/apparmor/label.c read_unlock_irqrestore(&ls->lock, flags); ls 915 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 916 security/apparmor/label.c l = __label_insert(ls, label, false); ls 917 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 1024 security/apparmor/label.c struct aa_labelset *ls; ls 1075 security/apparmor/label.c ls = labels_set(new); ls 1076 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 1078 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 1112 security/apparmor/label.c static struct aa_label *__label_find_merge(struct aa_labelset *ls, ls 1118 security/apparmor/label.c AA_BUG(!ls); ls 1125 security/apparmor/label.c node = ls->root.rb_node; ls 1155 security/apparmor/label.c struct aa_labelset *ls; ls 1166 security/apparmor/label.c ls = labelset_of_merge(a, b); ls 1167 security/apparmor/label.c read_lock_irqsave(&ls->lock, flags); ls 1168 security/apparmor/label.c label = __label_find_merge(ls, a, b); ls 1169 security/apparmor/label.c read_unlock_irqrestore(&ls->lock, flags); ls 1418 security/apparmor/label.c struct aa_labelset *ls; ls 1432 security/apparmor/label.c ls = labels_set(label); ls 1433 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 1439 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 1952 security/apparmor/label.c void aa_labelset_destroy(struct aa_labelset *ls) ls 1957 security/apparmor/label.c AA_BUG(!ls); ls 1959 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 1960 security/apparmor/label.c for (node = rb_first(&ls->root); node; node = rb_first(&ls->root)) { ls 1969 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 1975 security/apparmor/label.c void aa_labelset_init(struct aa_labelset *ls) ls 1977 security/apparmor/label.c AA_BUG(!ls); ls 1979 security/apparmor/label.c rwlock_init(&ls->lock); ls 1980 security/apparmor/label.c ls->root = RB_ROOT; ls 1983 security/apparmor/label.c static struct aa_label *labelset_next_stale(struct aa_labelset *ls) ls 1989 security/apparmor/label.c AA_BUG(!ls); ls 1991 security/apparmor/label.c read_lock_irqsave(&ls->lock, flags); ls 1993 security/apparmor/label.c __labelset_for_each(ls, node) { ls 2004 security/apparmor/label.c read_unlock_irqrestore(&ls->lock, flags); ls 2024 security/apparmor/label.c struct aa_labelset *ls; ls 2039 security/apparmor/label.c ls = labels_set(label); ls 2040 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 2062 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 2064 security/apparmor/label.c write_lock_irqsave(&ls->lock, flags); ls 2074 security/apparmor/label.c write_unlock_irqrestore(&ls->lock, flags); ls 4340 tools/lib/traceevent/event-parse.c int ls = 0; ls 4349 tools/lib/traceevent/event-parse.c ls++; ls 4352 tools/lib/traceevent/event-parse.c ls = 2; ls 4360 tools/lib/traceevent/event-parse.c ls = 1; ls 4363 tools/lib/traceevent/event-parse.c ls = 1; ls 4400 tools/lib/traceevent/event-parse.c switch (ls) { ls 4411 tools/lib/traceevent/event-parse.c vsize = ls; /* ? */ ls 4970 tools/lib/traceevent/event-parse.c int ls; ls 4986 tools/lib/traceevent/event-parse.c ls = 0; ls 5021 tools/lib/traceevent/event-parse.c ls--; ls 5024 tools/lib/traceevent/event-parse.c ls++; ls 5027 tools/lib/traceevent/event-parse.c ls = 2; ls 5048 tools/lib/traceevent/event-parse.c ls = 1; ls 5050 tools/lib/traceevent/event-parse.c ls = 2; ls 5118 tools/lib/traceevent/event-parse.c if (tep->long_size == 8 && ls == 1 && ls 5123 tools/lib/traceevent/event-parse.c if (ls == 1 && (p = strchr(format, 'l'))) ls 5127 tools/lib/traceevent/event-parse.c ls = 2; ls 5129 tools/lib/traceevent/event-parse.c switch (ls) { ls 5162 tools/lib/traceevent/event-parse.c do_warning_event(event, "bad count (%d)", ls); ls 404 tools/perf/builtin-lock.c struct lock_stat *ls; ls 413 tools/perf/builtin-lock.c ls = lock_stat_findnew(addr, name); ls 414 tools/perf/builtin-lock.c if (!ls) ls 416 tools/perf/builtin-lock.c if (ls->discard) ls 434 tools/perf/builtin-lock.c ls->nr_trylock++; ls 436 tools/perf/builtin-lock.c ls->nr_readlock++; ls 439 tools/perf/builtin-lock.c ls->nr_acquired++; ls 445 tools/perf/builtin-lock.c ls->nr_acquired++; ls 456 tools/perf/builtin-lock.c ls->discard = 1; ls 466 tools/perf/builtin-lock.c ls->nr_acquire++; ls 476 tools/perf/builtin-lock.c struct lock_stat *ls; ls 485 tools/perf/builtin-lock.c ls = lock_stat_findnew(addr, name); ls 486 tools/perf/builtin-lock.c if (!ls) ls 488 tools/perf/builtin-lock.c if (ls->discard) ls 507 tools/perf/builtin-lock.c ls->wait_time_total += contended_term; ls 508 tools/perf/builtin-lock.c if (contended_term < ls->wait_time_min) ls 509 tools/perf/builtin-lock.c ls->wait_time_min = contended_term; ls 510 tools/perf/builtin-lock.c if (ls->wait_time_max < contended_term) ls 511 tools/perf/builtin-lock.c ls->wait_time_max = contended_term; ls 517 tools/perf/builtin-lock.c ls->discard = 1; ls 528 tools/perf/builtin-lock.c ls->nr_acquired++; ls 529 tools/perf/builtin-lock.c ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0; ls 539 tools/perf/builtin-lock.c struct lock_stat *ls; ls 547 tools/perf/builtin-lock.c ls = lock_stat_findnew(addr, name); ls 548 tools/perf/builtin-lock.c if (!ls) ls 550 tools/perf/builtin-lock.c if (ls->discard) ls 572 tools/perf/builtin-lock.c ls->discard = 1; ls 583 tools/perf/builtin-lock.c ls->nr_contended++; ls 584 tools/perf/builtin-lock.c ls->avg_wait_time = ls->wait_time_total/ls->nr_contended; ls 594 tools/perf/builtin-lock.c struct lock_stat *ls; ls 602 tools/perf/builtin-lock.c ls = lock_stat_findnew(addr, name); ls 603 tools/perf/builtin-lock.c if (!ls) ls 605 tools/perf/builtin-lock.c if (ls->discard) ls 625 tools/perf/builtin-lock.c ls->nr_release++; ls 633 tools/perf/builtin-lock.c ls->discard = 1; ls 641 tools/perf/builtin-lock.c ls->nr_release++;