md                 22 arch/arm/include/asm/efi.h int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
md                 23 arch/arm/include/asm/efi.h int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
md                 42 arch/arm/include/asm/mach/map.h extern void create_mapping_late(struct mm_struct *mm, struct map_desc *md,
md                 13 arch/arm/kernel/efi.c 	efi_memory_desc_t *md = data;
md                 16 arch/arm/kernel/efi.c 	if (md->attribute & EFI_MEMORY_RO)
md                 18 arch/arm/kernel/efi.c 	if (md->attribute & EFI_MEMORY_XP)
md                 25 arch/arm/kernel/efi.c 				       efi_memory_desc_t *md)
md                 29 arch/arm/kernel/efi.c 	base = md->virt_addr;
md                 30 arch/arm/kernel/efi.c 	size = md->num_pages << EFI_PAGE_SHIFT;
md                 40 arch/arm/kernel/efi.c 		return apply_to_page_range(mm, base, size, set_permissions, md);
md                 45 arch/arm/kernel/efi.c int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
md                 48 arch/arm/kernel/efi.c 		.virtual	= md->virt_addr,
md                 49 arch/arm/kernel/efi.c 		.pfn		= __phys_to_pfn(md->phys_addr),
md                 50 arch/arm/kernel/efi.c 		.length		= md->num_pages * EFI_PAGE_SIZE,
md                 58 arch/arm/kernel/efi.c 	if (md->attribute & EFI_MEMORY_WB)
md                 60 arch/arm/kernel/efi.c 	else if (md->attribute & EFI_MEMORY_WT)
md                 62 arch/arm/kernel/efi.c 	else if (md->attribute & EFI_MEMORY_WC)
md                 72 arch/arm/kernel/efi.c 	if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
md                 73 arch/arm/kernel/efi.c 		return efi_set_mapping_permissions(mm, md);
md                847 arch/arm/mm/mmu.c 					struct map_desc *md,
md                855 arch/arm/mm/mmu.c 	addr = md->virtual;
md                856 arch/arm/mm/mmu.c 	phys = __pfn_to_phys(md->pfn);
md                857 arch/arm/mm/mmu.c 	length = PAGE_ALIGN(md->length);
md                861 arch/arm/mm/mmu.c 		       (long long)__pfn_to_phys((u64)md->pfn), addr);
md                873 arch/arm/mm/mmu.c 		       (long long)__pfn_to_phys((u64)md->pfn), addr);
md                877 arch/arm/mm/mmu.c 	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
md                879 arch/arm/mm/mmu.c 		       (long long)__pfn_to_phys((u64)md->pfn), addr);
md                887 arch/arm/mm/mmu.c 	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
md                907 arch/arm/mm/mmu.c static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
md                916 arch/arm/mm/mmu.c 	type = &mem_types[md->type];
md                922 arch/arm/mm/mmu.c 	if (md->pfn >= 0x100000) {
md                923 arch/arm/mm/mmu.c 		create_36bit_mapping(mm, md, type, ng);
md                928 arch/arm/mm/mmu.c 	addr = md->virtual & PAGE_MASK;
md                929 arch/arm/mm/mmu.c 	phys = __pfn_to_phys(md->pfn);
md                930 arch/arm/mm/mmu.c 	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
md                934 arch/arm/mm/mmu.c 			(long long)__pfn_to_phys(md->pfn), addr);
md                957 arch/arm/mm/mmu.c static void __init create_mapping(struct map_desc *md)
md                959 arch/arm/mm/mmu.c 	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
md                961 arch/arm/mm/mmu.c 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
md                965 arch/arm/mm/mmu.c 	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md                966 arch/arm/mm/mmu.c 	    md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
md                967 arch/arm/mm/mmu.c 	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
md                969 arch/arm/mm/mmu.c 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
md                972 arch/arm/mm/mmu.c 	__create_mapping(&init_mm, md, early_alloc, false);
md                975 arch/arm/mm/mmu.c void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
md                979 arch/arm/mm/mmu.c 	pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
md                984 arch/arm/mm/mmu.c 	__create_mapping(mm, md, late_alloc, ng);
md                992 arch/arm/mm/mmu.c 	struct map_desc *md;
md               1004 arch/arm/mm/mmu.c 	for (md = io_desc; nr; md++, nr--) {
md               1005 arch/arm/mm/mmu.c 		create_mapping(md);
md               1008 arch/arm/mm/mmu.c 		vm->addr = (void *)(md->virtual & PAGE_MASK);
md               1009 arch/arm/mm/mmu.c 		vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
md               1010 arch/arm/mm/mmu.c 		vm->phys_addr = __pfn_to_phys(md->pfn);
md               1012 arch/arm/mm/mmu.c 		vm->flags |= VM_ARM_MTYPE(md->type);
md                 21 arch/arm64/include/asm/efi.h int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
md                 22 arch/arm64/include/asm/efi.h int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
md                 20 arch/arm64/kernel/efi.c static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
md                 22 arch/arm64/kernel/efi.c 	u64 attr = md->attribute;
md                 23 arch/arm64/kernel/efi.c 	u32 type = md->type;
md                 28 arch/arm64/kernel/efi.c 	if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
md                 59 arch/arm64/kernel/efi.c int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
md                 61 arch/arm64/kernel/efi.c 	pteval_t prot_val = create_mapping_protection(md);
md                 62 arch/arm64/kernel/efi.c 	bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md                 63 arch/arm64/kernel/efi.c 				   md->type == EFI_RUNTIME_SERVICES_DATA);
md                 65 arch/arm64/kernel/efi.c 	if (!PAGE_ALIGNED(md->phys_addr) ||
md                 66 arch/arm64/kernel/efi.c 	    !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
md                 79 arch/arm64/kernel/efi.c 	create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md                 80 arch/arm64/kernel/efi.c 			   md->num_pages << EFI_PAGE_SHIFT,
md                 87 arch/arm64/kernel/efi.c 	efi_memory_desc_t *md = data;
md                 90 arch/arm64/kernel/efi.c 	if (md->attribute & EFI_MEMORY_RO)
md                 92 arch/arm64/kernel/efi.c 	if (md->attribute & EFI_MEMORY_XP)
md                 99 arch/arm64/kernel/efi.c 				       efi_memory_desc_t *md)
md                101 arch/arm64/kernel/efi.c 	BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
md                102 arch/arm64/kernel/efi.c 	       md->type != EFI_RUNTIME_SERVICES_DATA);
md                111 arch/arm64/kernel/efi.c 	return apply_to_page_range(mm, md->virt_addr,
md                112 arch/arm64/kernel/efi.c 				   md->num_pages << EFI_PAGE_SHIFT,
md                113 arch/arm64/kernel/efi.c 				   set_permissions, md);
md                258 arch/ia64/kernel/efi.c is_memory_available (efi_memory_desc_t *md)
md                260 arch/ia64/kernel/efi.c 	if (!(md->attribute & EFI_MEMORY_WB))
md                263 arch/ia64/kernel/efi.c 	switch (md->type) {
md                282 arch/ia64/kernel/efi.c #define efi_md_size(md)	(md->num_pages << EFI_PAGE_SHIFT)
md                291 arch/ia64/kernel/efi.c efi_md_end(efi_memory_desc_t *md)
md                293 arch/ia64/kernel/efi.c 	return (md->phys_addr + efi_md_size(md));
md                297 arch/ia64/kernel/efi.c efi_wb(efi_memory_desc_t *md)
md                299 arch/ia64/kernel/efi.c 	return (md->attribute & EFI_MEMORY_WB);
md                303 arch/ia64/kernel/efi.c efi_uc(efi_memory_desc_t *md)
md                305 arch/ia64/kernel/efi.c 	return (md->attribute & EFI_MEMORY_UC);
md                355 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md;
md                365 arch/ia64/kernel/efi.c 		md = p;
md                366 arch/ia64/kernel/efi.c 		if (md->type != EFI_PAL_CODE)
md                371 arch/ia64/kernel/efi.c 			       "dropped @ %llx\n", md->phys_addr);
md                379 arch/ia64/kernel/efi.c 		vaddr = PAGE_OFFSET + md->phys_addr;
md                400 arch/ia64/kernel/efi.c 		if (efi_md_size(md) > IA64_GRANULE_SIZE)
md                408 arch/ia64/kernel/efi.c                        smp_processor_id(), md->phys_addr,
md                409 arch/ia64/kernel/efi.c                        md->phys_addr + efi_md_size(md),
md                412 arch/ia64/kernel/efi.c 		return __va(md->phys_addr);
md                564 arch/ia64/kernel/efi.c 		efi_memory_desc_t *md;
md                574 arch/ia64/kernel/efi.c 			md = p;
md                575 arch/ia64/kernel/efi.c 			size = md->num_pages << EFI_PAGE_SHIFT;
md                593 arch/ia64/kernel/efi.c 			       i, efi_md_typeattr_format(buf, sizeof(buf), md),
md                594 arch/ia64/kernel/efi.c 			       md->phys_addr,
md                595 arch/ia64/kernel/efi.c 			       md->phys_addr + efi_md_size(md), size, unit);
md                608 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md;
md                617 arch/ia64/kernel/efi.c 		md = p;
md                618 arch/ia64/kernel/efi.c 		if (md->attribute & EFI_MEMORY_RUNTIME) {
md                623 arch/ia64/kernel/efi.c 			if (md->attribute & EFI_MEMORY_WB) {
md                624 arch/ia64/kernel/efi.c 				md->virt_addr = (u64) __va(md->phys_addr);
md                625 arch/ia64/kernel/efi.c 			} else if (md->attribute & EFI_MEMORY_UC) {
md                626 arch/ia64/kernel/efi.c 				md->virt_addr = (u64) ioremap(md->phys_addr, 0);
md                627 arch/ia64/kernel/efi.c 			} else if (md->attribute & EFI_MEMORY_WC) {
md                629 arch/ia64/kernel/efi.c 				md->virt_addr = ia64_remap(md->phys_addr,
md                638 arch/ia64/kernel/efi.c 				md->virt_addr = (u64) ioremap(md->phys_addr, 0);
md                640 arch/ia64/kernel/efi.c 			} else if (md->attribute & EFI_MEMORY_WT) {
md                642 arch/ia64/kernel/efi.c 				md->virt_addr = ia64_remap(md->phys_addr,
md                651 arch/ia64/kernel/efi.c 				md->virt_addr = (u64) ioremap(md->phys_addr, 0);
md                693 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md;
md                701 arch/ia64/kernel/efi.c 		md = p;
md                702 arch/ia64/kernel/efi.c 		if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
md                703 arch/ia64/kernel/efi.c 			if (md->attribute & EFI_MEMORY_UC)
md                704 arch/ia64/kernel/efi.c 				return md->phys_addr;
md                713 arch/ia64/kernel/efi.c 	struct kern_memdesc *md;
md                715 arch/ia64/kernel/efi.c 	for (md = kern_memmap; md->start != ~0UL; md++) {
md                716 arch/ia64/kernel/efi.c 		if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
md                717 arch/ia64/kernel/efi.c 			 return md;
md                726 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md;
md                734 arch/ia64/kernel/efi.c 		md = p;
md                736 arch/ia64/kernel/efi.c 		if (phys_addr - md->phys_addr < efi_md_size(md))
md                737 arch/ia64/kernel/efi.c 			 return md;
md                746 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md;
md                757 arch/ia64/kernel/efi.c 		md = p;
md                758 arch/ia64/kernel/efi.c 		if (md->phys_addr < end && efi_md_end(md) > phys_addr)
md                767 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
md                769 arch/ia64/kernel/efi.c 	if (md)
md                770 arch/ia64/kernel/efi.c 		return md->type;
md                777 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
md                779 arch/ia64/kernel/efi.c 	if (md)
md                780 arch/ia64/kernel/efi.c 		return md->attribute;
md                789 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
md                792 arch/ia64/kernel/efi.c 	if (!md)
md                799 arch/ia64/kernel/efi.c 	attr = md->attribute & ~EFI_MEMORY_RUNTIME;
md                801 arch/ia64/kernel/efi.c 		unsigned long md_end = efi_md_end(md);
md                806 arch/ia64/kernel/efi.c 		md = efi_memory_descriptor(md_end);
md                807 arch/ia64/kernel/efi.c 		if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
md                809 arch/ia64/kernel/efi.c 	} while (md);
md                817 arch/ia64/kernel/efi.c 	struct kern_memdesc *md;
md                831 arch/ia64/kernel/efi.c 	md = kern_memory_descriptor(phys_addr);
md                832 arch/ia64/kernel/efi.c 	if (!md)
md                835 arch/ia64/kernel/efi.c 	attr = md->attribute;
md                837 arch/ia64/kernel/efi.c 		unsigned long md_end = kmd_end(md);
md                842 arch/ia64/kernel/efi.c 		md = kern_memory_descriptor(md_end);
md                843 arch/ia64/kernel/efi.c 		if (!md || md->attribute != attr)
md                845 arch/ia64/kernel/efi.c 	} while (md);
md                978 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md, *pmd = NULL, *check_md;
md                994 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
md                995 arch/ia64/kernel/efi.c 		md = p;
md                996 arch/ia64/kernel/efi.c 		if (!efi_wb(md)) {
md               1000 arch/ia64/kernel/efi.c 		    efi_md_end(pmd) != md->phys_addr) {
md               1001 arch/ia64/kernel/efi.c 			contig_low = GRANULEROUNDUP(md->phys_addr);
md               1002 arch/ia64/kernel/efi.c 			contig_high = efi_md_end(md);
md               1014 arch/ia64/kernel/efi.c 		if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
md               1018 arch/ia64/kernel/efi.c 		as = max(contig_low, md->phys_addr);
md               1019 arch/ia64/kernel/efi.c 		ae = min(contig_high, efi_md_end(md));
md               1055 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md, *pmd = NULL, *check_md;
md               1065 arch/ia64/kernel/efi.c 	for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
md               1066 arch/ia64/kernel/efi.c 		md = p;
md               1067 arch/ia64/kernel/efi.c 		if (!efi_wb(md)) {
md               1068 arch/ia64/kernel/efi.c 			if (efi_uc(md) &&
md               1069 arch/ia64/kernel/efi.c 			    (md->type == EFI_CONVENTIONAL_MEMORY ||
md               1070 arch/ia64/kernel/efi.c 			     md->type == EFI_BOOT_SERVICES_DATA)) {
md               1072 arch/ia64/kernel/efi.c 				k->start = md->phys_addr;
md               1073 arch/ia64/kernel/efi.c 				k->num_pages = md->num_pages;
md               1079 arch/ia64/kernel/efi.c 		    efi_md_end(pmd) != md->phys_addr) {
md               1080 arch/ia64/kernel/efi.c 			contig_low = GRANULEROUNDUP(md->phys_addr);
md               1081 arch/ia64/kernel/efi.c 			contig_high = efi_md_end(md);
md               1093 arch/ia64/kernel/efi.c 		if (!is_memory_available(md))
md               1100 arch/ia64/kernel/efi.c 		if (md->phys_addr < contig_low) {
md               1101 arch/ia64/kernel/efi.c 			lim = min(efi_md_end(md), contig_low);
md               1102 arch/ia64/kernel/efi.c 			if (efi_uc(md)) {
md               1105 arch/ia64/kernel/efi.c 				    kmd_end(k-1) == md->phys_addr) {
md               1107 arch/ia64/kernel/efi.c 						(lim - md->phys_addr)
md               1111 arch/ia64/kernel/efi.c 					k->start = md->phys_addr;
md               1112 arch/ia64/kernel/efi.c 					k->num_pages = (lim - md->phys_addr)
md               1119 arch/ia64/kernel/efi.c 			as = md->phys_addr;
md               1121 arch/ia64/kernel/efi.c 		if (efi_md_end(md) > contig_high) {
md               1122 arch/ia64/kernel/efi.c 			lim = max(md->phys_addr, contig_high);
md               1123 arch/ia64/kernel/efi.c 			if (efi_uc(md)) {
md               1124 arch/ia64/kernel/efi.c 				if (lim == md->phys_addr && k > kern_memmap &&
md               1126 arch/ia64/kernel/efi.c 				    kmd_end(k-1) == md->phys_addr) {
md               1127 arch/ia64/kernel/efi.c 					(k-1)->num_pages += md->num_pages;
md               1131 arch/ia64/kernel/efi.c 					k->num_pages = (efi_md_end(md) - lim)
md               1138 arch/ia64/kernel/efi.c 			ae = efi_md_end(md);
md               1152 arch/ia64/kernel/efi.c 		if (prev && kmd_end(prev) == md->phys_addr) {
md               1179 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md;
md               1191 arch/ia64/kernel/efi.c 		md = p;
md               1193 arch/ia64/kernel/efi.c 		if (md->num_pages == 0) /* should not happen */
md               1199 arch/ia64/kernel/efi.c 		switch (md->type) {
md               1210 arch/ia64/kernel/efi.c 				if (md->attribute & EFI_MEMORY_WP) {
md               1213 arch/ia64/kernel/efi.c 				} else if (md->attribute == EFI_MEMORY_UC) {
md               1253 arch/ia64/kernel/efi.c 		res->start = md->phys_addr;
md               1254 arch/ia64/kernel/efi.c 		res->end = md->phys_addr + efi_md_size(md) - 1;
md               1290 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md;
md               1298 arch/ia64/kernel/efi.c 		md = p;
md               1299 arch/ia64/kernel/efi.c 		if (!efi_wb(md))
md               1301 arch/ia64/kernel/efi.c 		start = ALIGN(md->phys_addr, alignment);
md               1302 arch/ia64/kernel/efi.c 		end = efi_md_end(md);
md               1331 arch/ia64/kernel/efi.c 	efi_memory_desc_t *md;
md               1340 arch/ia64/kernel/efi.c 		md = p;
md               1341 arch/ia64/kernel/efi.c 		if (efi_wb(md) && md->type == EFI_LOADER_DATA
md               1342 arch/ia64/kernel/efi.c 		    && md->phys_addr == address) {
md               1343 arch/ia64/kernel/efi.c 			ret = efi_md_size(md);
md                134 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
md                139 arch/mips/pci/msi-xlp.c 	spin_lock_irqsave(&md->msi_lock, flags);
md                140 arch/mips/pci/msi-xlp.c 	md->msi_enabled_mask |= 1u << vec;
md                142 arch/mips/pci/msi-xlp.c 		nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
md                143 arch/mips/pci/msi-xlp.c 				md->msi_enabled_mask);
md                145 arch/mips/pci/msi-xlp.c 		nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
md                146 arch/mips/pci/msi-xlp.c 	spin_unlock_irqrestore(&md->msi_lock, flags);
md                151 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
md                156 arch/mips/pci/msi-xlp.c 	spin_lock_irqsave(&md->msi_lock, flags);
md                157 arch/mips/pci/msi-xlp.c 	md->msi_enabled_mask &= ~(1u << vec);
md                159 arch/mips/pci/msi-xlp.c 		nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
md                160 arch/mips/pci/msi-xlp.c 				md->msi_enabled_mask);
md                162 arch/mips/pci/msi-xlp.c 		nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
md                163 arch/mips/pci/msi-xlp.c 	spin_unlock_irqrestore(&md->msi_lock, flags);
md                168 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
md                177 arch/mips/pci/msi-xlp.c 		nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec);
md                179 arch/mips/pci/msi-xlp.c 		nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);
md                207 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md;
md                214 arch/mips/pci/msi-xlp.c 	md = irq_data_get_irq_chip_data(d);
md                224 arch/mips/pci/msi-xlp.c 	nlm_write_reg(md->lnkbase, status_reg, 1u << bit);
md                227 arch/mips/pci/msi-xlp.c 		nlm_pic_ack(md->node->picbase,
md                296 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md;
md                305 arch/mips/pci/msi-xlp.c 	md = irq_get_chip_data(xirq);
md                308 arch/mips/pci/msi-xlp.c 	spin_lock_irqsave(&md->msi_lock, flags);
md                309 arch/mips/pci/msi-xlp.c 	if (md->msi_alloc_mask == 0) {
md                322 arch/mips/pci/msi-xlp.c 	msivec = fls(md->msi_alloc_mask);
md                324 arch/mips/pci/msi-xlp.c 		spin_unlock_irqrestore(&md->msi_lock, flags);
md                327 arch/mips/pci/msi-xlp.c 	md->msi_alloc_mask |= (1u << msivec);
md                328 arch/mips/pci/msi-xlp.c 	spin_unlock_irqrestore(&md->msi_lock, flags);
md                403 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md;
md                412 arch/mips/pci/msi-xlp.c 	md = irq_get_chip_data(xirq);
md                415 arch/mips/pci/msi-xlp.c 	spin_lock_irqsave(&md->msi_lock, flags);
md                417 arch/mips/pci/msi-xlp.c 	if (md->msix_alloc_mask == 0)
md                421 arch/mips/pci/msi-xlp.c 	t = fls(md->msix_alloc_mask);
md                423 arch/mips/pci/msi-xlp.c 		spin_unlock_irqrestore(&md->msi_lock, flags);
md                426 arch/mips/pci/msi-xlp.c 	md->msix_alloc_mask |= (1u << t);
md                427 arch/mips/pci/msi-xlp.c 	spin_unlock_irqrestore(&md->msi_lock, flags);
md                469 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md;
md                476 arch/mips/pci/msi-xlp.c 	md = kzalloc(sizeof(*md), GFP_KERNEL);
md                477 arch/mips/pci/msi-xlp.c 	spin_lock_init(&md->msi_lock);
md                478 arch/mips/pci/msi-xlp.c 	md->msi_enabled_mask = 0;
md                479 arch/mips/pci/msi-xlp.c 	md->msi_alloc_mask = 0;
md                480 arch/mips/pci/msi-xlp.c 	md->msix_alloc_mask = 0;
md                481 arch/mips/pci/msi-xlp.c 	md->node = nodep;
md                482 arch/mips/pci/msi-xlp.c 	md->lnkbase = nlm_get_pcie_base(node, link);
md                488 arch/mips/pci/msi-xlp.c 		irq_set_chip_data(i, md);
md                495 arch/mips/pci/msi-xlp.c 			nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i +
md                511 arch/mips/pci/msi-xlp.c 		irq_set_chip_data(irq, md);
md                517 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md;
md                523 arch/mips/pci/msi-xlp.c 	md = irq_get_chip_data(irqbase);
md                525 arch/mips/pci/msi-xlp.c 		status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) &
md                526 arch/mips/pci/msi-xlp.c 						md->msi_enabled_mask;
md                528 arch/mips/pci/msi-xlp.c 		status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) &
md                529 arch/mips/pci/msi-xlp.c 						md->msi_enabled_mask;
md                539 arch/mips/pci/msi-xlp.c 		nlm_pic_ack(md->node->picbase,
md                542 arch/mips/pci/msi-xlp.c 		nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
md                547 arch/mips/pci/msi-xlp.c 	struct xlp_msi_data *md;
md                553 arch/mips/pci/msi-xlp.c 	md = irq_get_chip_data(irqbase);
md                555 arch/mips/pci/msi-xlp.c 		status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link));
md                557 arch/mips/pci/msi-xlp.c 		status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS);
md                 92 arch/sparc/kernel/mdesc.c static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
md                 98 arch/sparc/kernel/mdesc.c static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
md                337 arch/sparc/kernel/mdesc.c static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
md                350 arch/sparc/kernel/mdesc.c 	idp = mdesc_get_property(md, node, "id", NULL);
md                351 arch/sparc/kernel/mdesc.c 	name = mdesc_get_property(md, node, "name", NULL);
md                352 arch/sparc/kernel/mdesc.c 	parent_cfg_hdlp = parent_cfg_handle(md, node);
md                391 arch/sparc/kernel/mdesc.c static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
md                397 arch/sparc/kernel/mdesc.c 	idp = mdesc_get_property(md, node, "id", NULL);
md               1137 arch/sparc/mm/init_64.c static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
md               1142 arch/sparc/mm/init_64.c 	mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
md               1143 arch/sparc/mm/init_64.c 		u64 target = mdesc_arc_target(md, arc);
md               1146 arch/sparc/mm/init_64.c 		val = mdesc_get_property(md, target,
md               1154 arch/sparc/mm/init_64.c static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
md               1160 arch/sparc/mm/init_64.c 	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
md               1161 arch/sparc/mm/init_64.c 		u64 target = mdesc_arc_target(md, arc);
md               1162 arch/sparc/mm/init_64.c 		const char *name = mdesc_node_name(md, target);
md               1168 arch/sparc/mm/init_64.c 		val = mdesc_get_property(md, target, "latency", NULL);
md               1181 arch/sparc/mm/init_64.c 	return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
md               1187 arch/sparc/mm/init_64.c 	struct mdesc_handle *md;
md               1205 arch/sparc/mm/init_64.c 	md = mdesc_grab();
md               1209 arch/sparc/mm/init_64.c 	mdesc_for_each_node_by_name(md, grp, "group") {
md               1210 arch/sparc/mm/init_64.c 		if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
md               1217 arch/sparc/mm/init_64.c 	mdesc_release(md);
md               1255 arch/sparc/mm/init_64.c static int __init grab_mlgroups(struct mdesc_handle *md)
md               1261 arch/sparc/mm/init_64.c 	mdesc_for_each_node_by_name(md, node, "memory-latency-group")
md               1275 arch/sparc/mm/init_64.c 	mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
md               1281 arch/sparc/mm/init_64.c 		val = mdesc_get_property(md, node, "latency", NULL);
md               1283 arch/sparc/mm/init_64.c 		val = mdesc_get_property(md, node, "address-match", NULL);
md               1285 arch/sparc/mm/init_64.c 		val = mdesc_get_property(md, node, "address-mask", NULL);
md               1296 arch/sparc/mm/init_64.c static int __init grab_mblocks(struct mdesc_handle *md)
md               1302 arch/sparc/mm/init_64.c 	mdesc_for_each_node_by_name(md, node, "mblock")
md               1316 arch/sparc/mm/init_64.c 	mdesc_for_each_node_by_name(md, node, "mblock") {
md               1320 arch/sparc/mm/init_64.c 		val = mdesc_get_property(md, node, "base", NULL);
md               1322 arch/sparc/mm/init_64.c 		val = mdesc_get_property(md, node, "size", NULL);
md               1324 arch/sparc/mm/init_64.c 		val = mdesc_get_property(md, node,
md               1342 arch/sparc/mm/init_64.c static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
md               1349 arch/sparc/mm/init_64.c 	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
md               1350 arch/sparc/mm/init_64.c 		u64 target = mdesc_arc_target(md, arc);
md               1351 arch/sparc/mm/init_64.c 		const char *name = mdesc_node_name(md, target);
md               1356 arch/sparc/mm/init_64.c 		id = mdesc_get_property(md, target, "id", NULL);
md               1398 arch/sparc/mm/init_64.c static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
md               1403 arch/sparc/mm/init_64.c 	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
md               1405 arch/sparc/mm/init_64.c 		u64 target = mdesc_arc_target(md, arc);
md               1417 arch/sparc/mm/init_64.c static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
md               1424 arch/sparc/mm/init_64.c 	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
md               1425 arch/sparc/mm/init_64.c 		u64 target = mdesc_arc_target(md, arc);
md               1455 arch/sparc/mm/init_64.c static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
md               1461 arch/sparc/mm/init_64.c 	numa_parse_mdesc_group_cpus(md, grp, &mask);
md               1474 arch/sparc/mm/init_64.c 	return numa_attach_mlgroup(md, grp, index);
md               1479 arch/sparc/mm/init_64.c 	struct mdesc_handle *md = mdesc_grab();
md               1483 arch/sparc/mm/init_64.c 	node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
md               1485 arch/sparc/mm/init_64.c 		mdesc_release(md);
md               1489 arch/sparc/mm/init_64.c 	err = grab_mblocks(md);
md               1493 arch/sparc/mm/init_64.c 	err = grab_mlgroups(md);
md               1498 arch/sparc/mm/init_64.c 	mdesc_for_each_node_by_name(md, node, "group") {
md               1499 arch/sparc/mm/init_64.c 		err = numa_parse_mdesc_group(md, node, count);
md               1506 arch/sparc/mm/init_64.c 	mdesc_for_each_node_by_name(md, node, "group") {
md               1507 arch/sparc/mm/init_64.c 		find_numa_latencies_for_group(md, node, count);
md               1531 arch/sparc/mm/init_64.c 	mdesc_release(md);
md                203 arch/unicore32/mm/mmu.c static void __init create_mapping(struct map_desc *md)
md                209 arch/unicore32/mm/mmu.c 	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
md                212 arch/unicore32/mm/mmu.c 		       __pfn_to_phys((u64)md->pfn), md->virtual);
md                216 arch/unicore32/mm/mmu.c 	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md                217 arch/unicore32/mm/mmu.c 	    md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
md                220 arch/unicore32/mm/mmu.c 		       __pfn_to_phys((u64)md->pfn), md->virtual);
md                223 arch/unicore32/mm/mmu.c 	type = &mem_types[md->type];
md                225 arch/unicore32/mm/mmu.c 	addr = md->virtual & PAGE_MASK;
md                226 arch/unicore32/mm/mmu.c 	phys = (unsigned long)__pfn_to_phys(md->pfn);
md                227 arch/unicore32/mm/mmu.c 	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
md                232 arch/unicore32/mm/mmu.c 		       __pfn_to_phys(md->pfn), addr);
md                715 arch/x86/boot/compressed/kaslr.c 	efi_memory_desc_t *md;
md                739 arch/x86/boot/compressed/kaslr.c 		md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
md                740 arch/x86/boot/compressed/kaslr.c 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
md                747 arch/x86/boot/compressed/kaslr.c 		md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
md                760 arch/x86/boot/compressed/kaslr.c 		if (md->type != EFI_CONVENTIONAL_MEMORY)
md                764 arch/x86/boot/compressed/kaslr.c 		    !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
md                767 arch/x86/boot/compressed/kaslr.c 		region.start = md->phys_addr;
md                768 arch/x86/boot/compressed/kaslr.c 		region.size = md->num_pages << EFI_PAGE_SHIFT;
md                122 arch/x86/include/asm/efi.h extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
md                128 arch/x86/include/asm/efi.h extern void __init efi_map_region(efi_memory_desc_t *md);
md                129 arch/x86/include/asm/efi.h extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
md                133 arch/x86/include/asm/efi.h extern void __init old_map_region(efi_memory_desc_t *md);
md                519 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	union mon_data_bits md;
md                530 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	md.priv = of->kn->priv;
md                531 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	resid = md.u.rid;
md                532 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	domid = md.u.domid;
md                533 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 	evtid = md.u.evtid;
md                165 arch/x86/kernel/tsc_msr.c 	const struct muldiv *md;
md                185 arch/x86/kernel/tsc_msr.c 	md = &freq_desc->muldiv[index];
md                191 arch/x86/kernel/tsc_msr.c 	if (md->divider) {
md                192 arch/x86/kernel/tsc_msr.c 		tscref = TSC_REFERENCE_KHZ * md->multiplier;
md                193 arch/x86/kernel/tsc_msr.c 		freq = DIV_ROUND_CLOSEST(tscref, md->divider);
md                198 arch/x86/kernel/tsc_msr.c 		res = DIV_ROUND_CLOSEST(tscref * ratio, md->divider);
md                130 arch/x86/platform/efi/efi.c 	efi_memory_desc_t *md;
md                133 arch/x86/platform/efi/efi.c 	for_each_efi_memory_desc(md) {
md                134 arch/x86/platform/efi/efi.c 		unsigned long long start = md->phys_addr;
md                135 arch/x86/platform/efi/efi.c 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
md                138 arch/x86/platform/efi/efi.c 		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
md                156 arch/x86/platform/efi/efi.c 	efi_memory_desc_t *md;
md                158 arch/x86/platform/efi/efi.c 	for_each_efi_memory_desc(md) {
md                159 arch/x86/platform/efi/efi.c 		unsigned long long start = md->phys_addr;
md                160 arch/x86/platform/efi/efi.c 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
md                163 arch/x86/platform/efi/efi.c 		switch (md->type) {
md                169 arch/x86/platform/efi/efi.c 			if (md->attribute & EFI_MEMORY_WB)
md                245 arch/x86/platform/efi/efi.c static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
md                247 arch/x86/platform/efi/efi.c 	u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
md                251 arch/x86/platform/efi/efi.c 	if (md->num_pages == 0) {
md                253 arch/x86/platform/efi/efi.c 	} else if (md->num_pages > EFI_PAGES_MAX ||
md                254 arch/x86/platform/efi/efi.c 		   EFI_PAGES_MAX - md->num_pages <
md                255 arch/x86/platform/efi/efi.c 		   (md->phys_addr >> EFI_PAGE_SHIFT)) {
md                256 arch/x86/platform/efi/efi.c 		end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
md                259 arch/x86/platform/efi/efi.c 		if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
md                269 arch/x86/platform/efi/efi.c 			i, efi_md_typeattr_format(buf, sizeof(buf), md),
md                270 arch/x86/platform/efi/efi.c 			md->phys_addr, end_hi, end);
md                273 arch/x86/platform/efi/efi.c 			i, efi_md_typeattr_format(buf, sizeof(buf), md),
md                274 arch/x86/platform/efi/efi.c 			md->phys_addr, end);
md                307 arch/x86/platform/efi/efi.c 	efi_memory_desc_t *md;
md                310 arch/x86/platform/efi/efi.c 	for_each_efi_memory_desc(md) {
md                314 arch/x86/platform/efi/efi.c 			i++, efi_md_typeattr_format(buf, sizeof(buf), md),
md                315 arch/x86/platform/efi/efi.c 			md->phys_addr,
md                316 arch/x86/platform/efi/efi.c 			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
md                317 arch/x86/platform/efi/efi.c 			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
md                574 arch/x86/platform/efi/efi.c void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
md                578 arch/x86/platform/efi/efi.c 	addr = md->virt_addr;
md                579 arch/x86/platform/efi/efi.c 	npages = md->num_pages;
md                591 arch/x86/platform/efi/efi.c 	efi_memory_desc_t *md;
md                594 arch/x86/platform/efi/efi.c 	for_each_efi_memory_desc(md) {
md                595 arch/x86/platform/efi/efi.c 		if (md->type != EFI_RUNTIME_SERVICES_CODE)
md                598 arch/x86/platform/efi/efi.c 		efi_set_executable(md, true);
md                612 arch/x86/platform/efi/efi.c void __init old_map_region(efi_memory_desc_t *md)
md                618 arch/x86/platform/efi/efi.c 	start_pfn = PFN_DOWN(md->phys_addr);
md                619 arch/x86/platform/efi/efi.c 	size	  = md->num_pages << PAGE_SHIFT;
md                620 arch/x86/platform/efi/efi.c 	end	  = md->phys_addr + size;
md                624 arch/x86/platform/efi/efi.c 		va = __va(md->phys_addr);
md                626 arch/x86/platform/efi/efi.c 		if (!(md->attribute & EFI_MEMORY_WB))
md                629 arch/x86/platform/efi/efi.c 		va = efi_ioremap(md->phys_addr, size,
md                630 arch/x86/platform/efi/efi.c 				 md->type, md->attribute);
md                632 arch/x86/platform/efi/efi.c 	md->virt_addr = (u64) (unsigned long) va;
md                635 arch/x86/platform/efi/efi.c 		       (unsigned long long)md->phys_addr);
md                641 arch/x86/platform/efi/efi.c 	efi_memory_desc_t *md, *prev_md = NULL;
md                643 arch/x86/platform/efi/efi.c 	for_each_efi_memory_desc(md) {
md                647 arch/x86/platform/efi/efi.c 			prev_md = md;
md                651 arch/x86/platform/efi/efi.c 		if (prev_md->type != md->type ||
md                652 arch/x86/platform/efi/efi.c 		    prev_md->attribute != md->attribute) {
md                653 arch/x86/platform/efi/efi.c 			prev_md = md;
md                659 arch/x86/platform/efi/efi.c 		if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
md                660 arch/x86/platform/efi/efi.c 			prev_md->num_pages += md->num_pages;
md                661 arch/x86/platform/efi/efi.c 			md->type = EFI_RESERVED_TYPE;
md                662 arch/x86/platform/efi/efi.c 			md->attribute = 0;
md                665 arch/x86/platform/efi/efi.c 		prev_md = md;
md                669 arch/x86/platform/efi/efi.c static void __init get_systab_virt_addr(efi_memory_desc_t *md)
md                674 arch/x86/platform/efi/efi.c 	size = md->num_pages << EFI_PAGE_SHIFT;
md                675 arch/x86/platform/efi/efi.c 	end = md->phys_addr + size;
md                677 arch/x86/platform/efi/efi.c 	if (md->phys_addr <= systab && systab < end) {
md                678 arch/x86/platform/efi/efi.c 		systab += md->virt_addr - md->phys_addr;
md                768 arch/x86/platform/efi/efi.c static bool should_map_region(efi_memory_desc_t *md)
md                773 arch/x86/platform/efi/efi.c 	if (md->attribute & EFI_MEMORY_RUNTIME)
md                789 arch/x86/platform/efi/efi.c 		if (md->type == EFI_CONVENTIONAL_MEMORY ||
md                790 arch/x86/platform/efi/efi.c 		    md->type == EFI_LOADER_DATA ||
md                791 arch/x86/platform/efi/efi.c 		    md->type == EFI_LOADER_CODE)
md                801 arch/x86/platform/efi/efi.c 	if (md->type == EFI_BOOT_SERVICES_CODE ||
md                802 arch/x86/platform/efi/efi.c 	    md->type == EFI_BOOT_SERVICES_DATA)
md                817 arch/x86/platform/efi/efi.c 	efi_memory_desc_t *md;
md                823 arch/x86/platform/efi/efi.c 		md = p;
md                825 arch/x86/platform/efi/efi.c 		if (!should_map_region(md))
md                828 arch/x86/platform/efi/efi.c 		efi_map_region(md);
md                829 arch/x86/platform/efi/efi.c 		get_systab_virt_addr(md);
md                840 arch/x86/platform/efi/efi.c 		memcpy(new_memmap + (*count * desc_size), md, desc_size);
md                852 arch/x86/platform/efi/efi.c 	efi_memory_desc_t *md;
md                879 arch/x86/platform/efi/efi.c 	for_each_efi_memory_desc(md) {
md                880 arch/x86/platform/efi/efi.c 		efi_map_region_fixed(md); /* FIXME: add error handling */
md                881 arch/x86/platform/efi/efi.c 		get_systab_virt_addr(md);
md                 61 arch/x86/platform/efi/efi_32.c void __init efi_map_region(efi_memory_desc_t *md)
md                 63 arch/x86/platform/efi/efi_32.c 	old_map_region(md);
md                 66 arch/x86/platform/efi/efi_32.c void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
md                 62 arch/x86/platform/efi/efi_64.c 	efi_memory_desc_t *md;
md                 68 arch/x86/platform/efi/efi_64.c 	for_each_efi_memory_desc(md) {
md                 69 arch/x86/platform/efi/efi_64.c 		if (md->type == EFI_RUNTIME_SERVICES_CODE ||
md                 70 arch/x86/platform/efi/efi_64.c 		    md->type == EFI_BOOT_SERVICES_CODE)
md                 71 arch/x86/platform/efi/efi_64.c 			efi_set_executable(md, executable);
md                412 arch/x86/platform/efi/efi_64.c static void __init __map_region(efi_memory_desc_t *md, u64 va)
md                418 arch/x86/platform/efi/efi_64.c 	if (!(md->attribute & EFI_MEMORY_WB))
md                421 arch/x86/platform/efi/efi_64.c 	if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
md                424 arch/x86/platform/efi/efi_64.c 	pfn = md->phys_addr >> PAGE_SHIFT;
md                425 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
md                427 arch/x86/platform/efi/efi_64.c 			   md->phys_addr, va);
md                430 arch/x86/platform/efi/efi_64.c void __init efi_map_region(efi_memory_desc_t *md)
md                432 arch/x86/platform/efi/efi_64.c 	unsigned long size = md->num_pages << PAGE_SHIFT;
md                433 arch/x86/platform/efi/efi_64.c 	u64 pa = md->phys_addr;
md                436 arch/x86/platform/efi/efi_64.c 		return old_map_region(md);
md                443 arch/x86/platform/efi/efi_64.c 	__map_region(md, md->phys_addr);
md                451 arch/x86/platform/efi/efi_64.c 		md->virt_addr = md->phys_addr;
md                477 arch/x86/platform/efi/efi_64.c 	__map_region(md, efi_va);
md                478 arch/x86/platform/efi/efi_64.c 	md->virt_addr = efi_va;
md                486 arch/x86/platform/efi/efi_64.c void __init efi_map_region_fixed(efi_memory_desc_t *md)
md                488 arch/x86/platform/efi/efi_64.c 	__map_region(md, md->phys_addr);
md                489 arch/x86/platform/efi/efi_64.c 	__map_region(md, md->virt_addr);
md                517 arch/x86/platform/efi/efi_64.c static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
md                524 arch/x86/platform/efi/efi_64.c 	pfn = md->phys_addr >> PAGE_SHIFT;
md                525 arch/x86/platform/efi/efi_64.c 	err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
md                528 arch/x86/platform/efi/efi_64.c 			   md->phys_addr, md->virt_addr);
md                531 arch/x86/platform/efi/efi_64.c 	err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
md                534 arch/x86/platform/efi/efi_64.c 			   md->phys_addr, md->virt_addr);
md                540 arch/x86/platform/efi/efi_64.c static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
md                544 arch/x86/platform/efi/efi_64.c 	if (md->attribute & EFI_MEMORY_XP)
md                547 arch/x86/platform/efi/efi_64.c 	if (!(md->attribute & EFI_MEMORY_RO))
md                553 arch/x86/platform/efi/efi_64.c 	return efi_update_mappings(md, pf);
md                558 arch/x86/platform/efi/efi_64.c 	efi_memory_desc_t *md;
md                587 arch/x86/platform/efi/efi_64.c 	for_each_efi_memory_desc(md) {
md                590 arch/x86/platform/efi/efi_64.c 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
md                593 arch/x86/platform/efi/efi_64.c 		if (!(md->attribute & EFI_MEMORY_WB))
md                596 arch/x86/platform/efi/efi_64.c 		if ((md->attribute & EFI_MEMORY_XP) ||
md                597 arch/x86/platform/efi/efi_64.c 			(md->type == EFI_RUNTIME_SERVICES_DATA))
md                600 arch/x86/platform/efi/efi_64.c 		if (!(md->attribute & EFI_MEMORY_RO) &&
md                601 arch/x86/platform/efi/efi_64.c 			(md->type != EFI_RUNTIME_SERVICES_CODE))
md                607 arch/x86/platform/efi/efi_64.c 		efi_update_mappings(md, pf);
md                248 arch/x86/platform/efi/quirks.c 	efi_memory_desc_t md;
md                252 arch/x86/platform/efi/quirks.c 	if (efi_mem_desc_lookup(addr, &md) ||
md                253 arch/x86/platform/efi/quirks.c 	    md.type != EFI_BOOT_SERVICES_DATA) {
md                258 arch/x86/platform/efi/quirks.c 	if (addr + size > md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT)) {
md                269 arch/x86/platform/efi/quirks.c 	mr.attribute = md.attribute | EFI_MEMORY_RUNTIME;
md                271 arch/x86/platform/efi/quirks.c 	num_entries = efi_memmap_split_count(&md, &mr.range);
md                319 arch/x86/platform/efi/quirks.c 	efi_memory_desc_t *md;
md                321 arch/x86/platform/efi/quirks.c 	for_each_efi_memory_desc(md) {
md                322 arch/x86/platform/efi/quirks.c 		u64 start = md->phys_addr;
md                323 arch/x86/platform/efi/quirks.c 		u64 size = md->num_pages << EFI_PAGE_SHIFT;
md                326 arch/x86/platform/efi/quirks.c 		if (md->type != EFI_BOOT_SERVICES_CODE &&
md                327 arch/x86/platform/efi/quirks.c 		    md->type != EFI_BOOT_SERVICES_DATA)
md                367 arch/x86/platform/efi/quirks.c 		md->attribute |= EFI_MEMORY_RUNTIME;
md                376 arch/x86/platform/efi/quirks.c static void __init efi_unmap_pages(efi_memory_desc_t *md)
md                379 arch/x86/platform/efi/quirks.c 	u64 pa = md->phys_addr;
md                380 arch/x86/platform/efi/quirks.c 	u64 va = md->virt_addr;
md                398 arch/x86/platform/efi/quirks.c 	if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
md                401 arch/x86/platform/efi/quirks.c 	if (kernel_unmap_pages_in_pgd(pgd, va, md->num_pages))
md                408 arch/x86/platform/efi/quirks.c 	efi_memory_desc_t *md;
md                412 arch/x86/platform/efi/quirks.c 	for_each_efi_memory_desc(md) {
md                413 arch/x86/platform/efi/quirks.c 		unsigned long long start = md->phys_addr;
md                414 arch/x86/platform/efi/quirks.c 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
md                417 arch/x86/platform/efi/quirks.c 		if (md->type != EFI_BOOT_SERVICES_CODE &&
md                418 arch/x86/platform/efi/quirks.c 		    md->type != EFI_BOOT_SERVICES_DATA) {
md                424 arch/x86/platform/efi/quirks.c 		if (md->attribute & EFI_MEMORY_RUNTIME) {
md                434 arch/x86/platform/efi/quirks.c 		efi_unmap_pages(md);
md                481 arch/x86/platform/efi/quirks.c 	for_each_efi_memory_desc(md) {
md                482 arch/x86/platform/efi/quirks.c 		if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
md                483 arch/x86/platform/efi/quirks.c 		    (md->type == EFI_BOOT_SERVICES_CODE ||
md                484 arch/x86/platform/efi/quirks.c 		     md->type == EFI_BOOT_SERVICES_DATA))
md                487 arch/x86/platform/efi/quirks.c 		memcpy(new_md, md, efi.memmap.desc_size);
md                 42 block/partitions/mac.c 	struct mac_driver_desc *md;
md                 45 block/partitions/mac.c 	md = read_part_sector(state, 0, &sect);
md                 46 block/partitions/mac.c 	if (!md)
md                 48 block/partitions/mac.c 	if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) {
md                 52 block/partitions/mac.c 	secsize = be16_to_cpu(md->block_size);
md                300 drivers/block/drbd/drbd_actlog.c 	const unsigned int stripes = device->ldev->md.al_stripes;
md                301 drivers/block/drbd/drbd_actlog.c 	const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k;
md                304 drivers/block/drbd/drbd_actlog.c 	unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k);
md                313 drivers/block/drbd/drbd_actlog.c 	return device->ldev->md.md_offset + device->ldev->md.al_offset + t;
md                611 drivers/block/drbd/drbd_actlog.c 	struct drbd_md *md = &device->ldev->md;
md                612 drivers/block/drbd/drbd_actlog.c 	int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
md                617 drivers/block/drbd/drbd_bitmap.c 	if (ldev->md.al_offset == 8)
md                618 drivers/block/drbd/drbd_bitmap.c 		bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset;
md                620 drivers/block/drbd/drbd_bitmap.c 		bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset;
md                989 drivers/block/drbd/drbd_bitmap.c 		device->ldev->md.md_offset + device->ldev->md.bm_offset;
md                721 drivers/block/drbd/drbd_debugfs.c 	struct drbd_md *md;
md                727 drivers/block/drbd/drbd_debugfs.c 	md = &device->ldev->md;
md                728 drivers/block/drbd/drbd_debugfs.c 	spin_lock_irq(&md->uuid_lock);
md                730 drivers/block/drbd/drbd_debugfs.c 		seq_printf(m, "0x%016llX\n", md->uuid[idx]);
md                732 drivers/block/drbd/drbd_debugfs.c 	spin_unlock_irq(&md->uuid_lock);
md                596 drivers/block/drbd/drbd_int.h 	struct drbd_md md;
md               1831 drivers/block/drbd/drbd_int.h 	switch (bdev->md.meta_dev_idx) {
md               1834 drivers/block/drbd/drbd_int.h 		return bdev->md.md_offset + bdev->md.bm_offset;
md               1837 drivers/block/drbd/drbd_int.h 		return bdev->md.md_offset;
md               1847 drivers/block/drbd/drbd_int.h 	switch (bdev->md.meta_dev_idx) {
md               1850 drivers/block/drbd/drbd_int.h 		return bdev->md.md_offset + MD_4kB_SECT -1;
md               1853 drivers/block/drbd/drbd_int.h 		return bdev->md.md_offset + bdev->md.md_size_sect -1;
md               1876 drivers/block/drbd/drbd_int.h 	switch (bdev->md.meta_dev_idx) {
md               1889 drivers/block/drbd/drbd_int.h 			BM_EXT_TO_SECT(bdev->md.md_size_sect
md               1890 drivers/block/drbd/drbd_int.h 				     - bdev->md.bm_offset));
md               1905 drivers/block/drbd/drbd_int.h 	const int meta_dev_idx = bdev->md.meta_dev_idx;
md               1917 drivers/block/drbd/drbd_int.h 	return MD_128MB_SECT * bdev->md.meta_dev_idx;
md                843 drivers/block/drbd/drbd_main.c 	spin_lock_irq(&device->ldev->md.uuid_lock);
md                845 drivers/block/drbd/drbd_main.c 		p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
md                846 drivers/block/drbd/drbd_main.c 	spin_unlock_irq(&device->ldev->md.uuid_lock);
md                874 drivers/block/drbd/drbd_main.c 		u64 *uuid = device->ldev->md.uuid;
md                898 drivers/block/drbd/drbd_main.c 	uuid = device->ldev->md.uuid[UI_BITMAP];
md               3109 drivers/block/drbd/drbd_main.c 		buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
md               3110 drivers/block/drbd/drbd_main.c 	buffer->flags = cpu_to_be32(device->ldev->md.flags);
md               3113 drivers/block/drbd/drbd_main.c 	buffer->md_size_sect  = cpu_to_be32(device->ldev->md.md_size_sect);
md               3114 drivers/block/drbd/drbd_main.c 	buffer->al_offset     = cpu_to_be32(device->ldev->md.al_offset);
md               3117 drivers/block/drbd/drbd_main.c 	buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
md               3119 drivers/block/drbd/drbd_main.c 	buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
md               3122 drivers/block/drbd/drbd_main.c 	buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
md               3123 drivers/block/drbd/drbd_main.c 	buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
md               3125 drivers/block/drbd/drbd_main.c 	D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
md               3126 drivers/block/drbd/drbd_main.c 	sector = device->ldev->md.md_offset;
md               3165 drivers/block/drbd/drbd_main.c 	device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
md               3220 drivers/block/drbd/drbd_main.c 	struct drbd_md *in_core = &bdev->md;
md               3323 drivers/block/drbd/drbd_main.c 	bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
md               3324 drivers/block/drbd/drbd_main.c 	bdev->md.md_offset = drbd_md_ss(bdev);
md               3328 drivers/block/drbd/drbd_main.c 	bdev->md.md_size_sect = 8;
md               3330 drivers/block/drbd/drbd_main.c 	if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
md               3366 drivers/block/drbd/drbd_main.c 	bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
md               3368 drivers/block/drbd/drbd_main.c 		bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
md               3369 drivers/block/drbd/drbd_main.c 	bdev->md.flags = be32_to_cpu(buffer->flags);
md               3370 drivers/block/drbd/drbd_main.c 	bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
md               3372 drivers/block/drbd/drbd_main.c 	bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
md               3373 drivers/block/drbd/drbd_main.c 	bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
md               3374 drivers/block/drbd/drbd_main.c 	bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
md               3376 drivers/block/drbd/drbd_main.c 	if (check_activity_log_stripe_size(device, buffer, &bdev->md))
md               3381 drivers/block/drbd/drbd_main.c 	if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
md               3383 drivers/block/drbd/drbd_main.c 		    be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
md               3386 drivers/block/drbd/drbd_main.c 	if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
md               3388 drivers/block/drbd/drbd_main.c 		    be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
md               3439 drivers/block/drbd/drbd_main.c 		device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
md               3453 drivers/block/drbd/drbd_main.c 	device->ldev->md.uuid[idx] = val;
md               3460 drivers/block/drbd/drbd_main.c 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
md               3462 drivers/block/drbd/drbd_main.c 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
md               3468 drivers/block/drbd/drbd_main.c 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
md               3469 drivers/block/drbd/drbd_main.c 	if (device->ldev->md.uuid[idx]) {
md               3471 drivers/block/drbd/drbd_main.c 		device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
md               3474 drivers/block/drbd/drbd_main.c 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
md               3491 drivers/block/drbd/drbd_main.c 	spin_lock_irq(&device->ldev->md.uuid_lock);
md               3492 drivers/block/drbd/drbd_main.c 	bm_uuid = device->ldev->md.uuid[UI_BITMAP];
md               3497 drivers/block/drbd/drbd_main.c 	device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
md               3499 drivers/block/drbd/drbd_main.c 	spin_unlock_irq(&device->ldev->md.uuid_lock);
md               3509 drivers/block/drbd/drbd_main.c 	if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
md               3512 drivers/block/drbd/drbd_main.c 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
md               3515 drivers/block/drbd/drbd_main.c 		device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
md               3516 drivers/block/drbd/drbd_main.c 		device->ldev->md.uuid[UI_BITMAP] = 0;
md               3518 drivers/block/drbd/drbd_main.c 		unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
md               3522 drivers/block/drbd/drbd_main.c 		device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
md               3524 drivers/block/drbd/drbd_main.c 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
md               3679 drivers/block/drbd/drbd_main.c 	if ((device->ldev->md.flags & flag) != flag) {
md               3681 drivers/block/drbd/drbd_main.c 		device->ldev->md.flags |= flag;
md               3687 drivers/block/drbd/drbd_main.c 	if ((device->ldev->md.flags & flag) != 0) {
md               3689 drivers/block/drbd/drbd_main.c 		device->ldev->md.flags &= ~flag;
md               3694 drivers/block/drbd/drbd_main.c 	return (bdev->md.flags & flag) != 0;
md                721 drivers/block/drbd/drbd_nl.c 			device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
md                734 drivers/block/drbd/drbd_nl.c 			      && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
md                737 drivers/block/drbd/drbd_nl.c 			device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
md                830 drivers/block/drbd/drbd_nl.c 	unsigned int al_size_sect = bdev->md.al_size_4k * 8;
md                832 drivers/block/drbd/drbd_nl.c 	bdev->md.md_offset = drbd_md_ss(bdev);
md                834 drivers/block/drbd/drbd_nl.c 	switch (bdev->md.meta_dev_idx) {
md                837 drivers/block/drbd/drbd_nl.c 		bdev->md.md_size_sect = MD_128MB_SECT;
md                838 drivers/block/drbd/drbd_nl.c 		bdev->md.al_offset = MD_4kB_SECT;
md                839 drivers/block/drbd/drbd_nl.c 		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
md                843 drivers/block/drbd/drbd_nl.c 		bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
md                844 drivers/block/drbd/drbd_nl.c 		bdev->md.al_offset = MD_4kB_SECT;
md                845 drivers/block/drbd/drbd_nl.c 		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
md                850 drivers/block/drbd/drbd_nl.c 		bdev->md.al_offset = -al_size_sect;
md                861 drivers/block/drbd/drbd_nl.c 		bdev->md.md_size_sect = md_size_sect;
md                863 drivers/block/drbd/drbd_nl.c 		bdev->md.bm_offset   = -md_size_sect + MD_4kB_SECT;
md                941 drivers/block/drbd/drbd_nl.c 	struct drbd_md *md = &device->ldev->md;
md                963 drivers/block/drbd/drbd_nl.c 	prev.last_agreed_sect = md->la_size_sect;
md                964 drivers/block/drbd/drbd_nl.c 	prev.md_offset = md->md_offset;
md                965 drivers/block/drbd/drbd_nl.c 	prev.al_offset = md->al_offset;
md                966 drivers/block/drbd/drbd_nl.c 	prev.bm_offset = md->bm_offset;
md                967 drivers/block/drbd/drbd_nl.c 	prev.md_size_sect = md->md_size_sect;
md                968 drivers/block/drbd/drbd_nl.c 	prev.al_stripes = md->al_stripes;
md                969 drivers/block/drbd/drbd_nl.c 	prev.al_stripe_size_4k = md->al_stripe_size_4k;
md                973 drivers/block/drbd/drbd_nl.c 		md->al_stripes = rs->al_stripes;
md                974 drivers/block/drbd/drbd_nl.c 		md->al_stripe_size_4k = rs->al_stripe_size / 4;
md                975 drivers/block/drbd/drbd_nl.c 		md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
md               1018 drivers/block/drbd/drbd_nl.c 		md->la_size_sect = size;
md               1023 drivers/block/drbd/drbd_nl.c 	la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
md               1025 drivers/block/drbd/drbd_nl.c 	md_moved = prev.md_offset    != md->md_offset
md               1026 drivers/block/drbd/drbd_nl.c 		|| prev.md_size_sect != md->md_size_sect;
md               1043 drivers/block/drbd/drbd_nl.c 		prev_flags = md->flags;
md               1044 drivers/block/drbd/drbd_nl.c 		md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
md               1058 drivers/block/drbd/drbd_nl.c 		md->flags = prev_flags;
md               1063 drivers/block/drbd/drbd_nl.c 				  md->al_stripes, md->al_stripe_size_4k * 4);
md               1074 drivers/block/drbd/drbd_nl.c 		md->la_size_sect = prev.last_agreed_sect;
md               1075 drivers/block/drbd/drbd_nl.c 		md->md_offset = prev.md_offset;
md               1076 drivers/block/drbd/drbd_nl.c 		md->al_offset = prev.al_offset;
md               1077 drivers/block/drbd/drbd_nl.c 		md->bm_offset = prev.bm_offset;
md               1078 drivers/block/drbd/drbd_nl.c 		md->md_size_sect = prev.md_size_sect;
md               1079 drivers/block/drbd/drbd_nl.c 		md->al_stripes = prev.al_stripes;
md               1080 drivers/block/drbd/drbd_nl.c 		md->al_stripe_size_4k = prev.al_stripe_size_4k;
md               1081 drivers/block/drbd/drbd_nl.c 		md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
md               1096 drivers/block/drbd/drbd_nl.c 	sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
md               1497 drivers/block/drbd/drbd_nl.c 	unsigned int al_size_4k = bdev->md.al_size_4k;
md               1663 drivers/block/drbd/drbd_nl.c 		device->ldev->md.flags &= ~MDF_AL_DISABLED;
md               1665 drivers/block/drbd/drbd_nl.c 		device->ldev->md.flags |= MDF_AL_DISABLED;
md               1848 drivers/block/drbd/drbd_nl.c 	spin_lock_init(&nbc->md.uuid_lock);
md               1991 drivers/block/drbd/drbd_nl.c             (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
md               2007 drivers/block/drbd/drbd_nl.c 	unsigned long long eff = nbc->md.la_size_sect;
md               2148 drivers/block/drbd/drbd_nl.c 		device->ldev->md.flags &= ~MDF_AL_DISABLED;
md               2150 drivers/block/drbd/drbd_nl.c 		device->ldev->md.flags |= MDF_AL_DISABLED;
md               2177 drivers/block/drbd/drbd_nl.c 		device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
md               2179 drivers/block/drbd/drbd_nl.c 		device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
md               2864 drivers/block/drbd/drbd_nl.c 	rs.al_stripes = device->ldev->md.al_stripes;
md               2865 drivers/block/drbd/drbd_nl.c 	rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
md               2902 drivers/block/drbd/drbd_nl.c 	if (device->ldev->md.al_stripes != rs.al_stripes ||
md               2903 drivers/block/drbd/drbd_nl.c 	    device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
md               3371 drivers/block/drbd/drbd_nl.c 		struct drbd_md *md = &device->ldev->md;
md               3376 drivers/block/drbd/drbd_nl.c 		spin_lock_irq(&md->uuid_lock);
md               3377 drivers/block/drbd/drbd_nl.c 		s->dev_current_uuid = md->uuid[UI_CURRENT];
md               3380 drivers/block/drbd/drbd_nl.c 			history_uuids[n] = md->uuid[UI_HISTORY_START + n];
md               3384 drivers/block/drbd/drbd_nl.c 		spin_unlock_irq(&md->uuid_lock);
md               3386 drivers/block/drbd/drbd_nl.c 		s->dev_disk_flags = md->flags;
md               3650 drivers/block/drbd/drbd_nl.c 		struct drbd_md *md = &device->ldev->md;
md               3652 drivers/block/drbd/drbd_nl.c 		spin_lock_irq(&md->uuid_lock);
md               3653 drivers/block/drbd/drbd_nl.c 		s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
md               3654 drivers/block/drbd/drbd_nl.c 		spin_unlock_irq(&md->uuid_lock);
md               3850 drivers/block/drbd/drbd_nl.c 		spin_lock_irq(&device->ldev->md.uuid_lock);
md               3851 drivers/block/drbd/drbd_nl.c 		err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
md               3852 drivers/block/drbd/drbd_nl.c 		spin_unlock_irq(&device->ldev->md.uuid_lock);
md               3857 drivers/block/drbd/drbd_nl.c 		if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
md               4217 drivers/block/drbd/drbd_nl.c 	    device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
md               3058 drivers/block/drbd/drbd_receiver.c 	self = device->ldev->md.uuid[UI_BITMAP] & 1;
md               3275 drivers/block/drbd/drbd_receiver.c 	self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
md               3295 drivers/block/drbd/drbd_receiver.c 		if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
md               3300 drivers/block/drbd/drbd_receiver.c 			if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
md               3301 drivers/block/drbd/drbd_receiver.c 			    (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
md               3304 drivers/block/drbd/drbd_receiver.c 				device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
md               3305 drivers/block/drbd/drbd_receiver.c 				device->ldev->md.uuid[UI_BITMAP] = 0;
md               3307 drivers/block/drbd/drbd_receiver.c 				drbd_uuid_dump(device, "self", device->ldev->md.uuid,
md               3318 drivers/block/drbd/drbd_receiver.c 		if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
md               3323 drivers/block/drbd/drbd_receiver.c 			if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
md               3324 drivers/block/drbd/drbd_receiver.c 			    (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
md               3398 drivers/block/drbd/drbd_receiver.c 		    (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
md               3418 drivers/block/drbd/drbd_receiver.c 	self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
md               3426 drivers/block/drbd/drbd_receiver.c 	self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
md               3432 drivers/block/drbd/drbd_receiver.c 	self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
md               3435 drivers/block/drbd/drbd_receiver.c 		    (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
md               3437 drivers/block/drbd/drbd_receiver.c 		    self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
md               3444 drivers/block/drbd/drbd_receiver.c 			__drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
md               3445 drivers/block/drbd/drbd_receiver.c 			__drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
md               3448 drivers/block/drbd/drbd_receiver.c 			drbd_uuid_dump(device, "self", device->ldev->md.uuid,
md               3459 drivers/block/drbd/drbd_receiver.c 		self = device->ldev->md.uuid[i] & ~((u64)1);
md               3465 drivers/block/drbd/drbd_receiver.c 	self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
md               3472 drivers/block/drbd/drbd_receiver.c 		self = device->ldev->md.uuid[i] & ~((u64)1);
md               3502 drivers/block/drbd/drbd_receiver.c 	spin_lock_irq(&device->ldev->md.uuid_lock);
md               3503 drivers/block/drbd/drbd_receiver.c 	drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
md               3508 drivers/block/drbd/drbd_receiver.c 	spin_unlock_irq(&device->ldev->md.uuid_lock);
md               4315 drivers/block/drbd/drbd_receiver.c 			device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
md               1093 drivers/block/drbd/drbd_state.c 		if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) {
md               1411 drivers/block/drbd/drbd_state.c 		u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
md               1429 drivers/block/drbd/drbd_state.c 		if (mdf != device->ldev->md.flags) {
md               1430 drivers/block/drbd/drbd_state.c 			device->ldev->md.flags = mdf;
md               1434 drivers/block/drbd/drbd_state.c 			drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]);
md               1817 drivers/block/drbd/drbd_state.c 			    device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
md               1831 drivers/block/drbd/drbd_state.c 		    device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
md                965 drivers/block/drbd/drbd_worker.c 				drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]);
md                982 drivers/block/drbd/drbd_worker.c 					device->p_uuid[i] = device->ldev->md.uuid[i];
md                 82 drivers/clk/davinci/psc.c 	u32 md;
md                114 drivers/clk/davinci/psc.c 	regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDSTAT_STATE_MASK,
md                118 drivers/clk/davinci/psc.c 		regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDCTL_FORCE,
md                140 drivers/clk/davinci/psc.c 	regmap_read_poll_timeout(lpsc->regmap, MDSTAT(lpsc->md), mdstat,
md                166 drivers/clk/davinci/psc.c 	regmap_read(lpsc->regmap, MDSTAT(lpsc->md), &mdstat);
md                236 drivers/clk/davinci/psc.c 			  u32 md, u32 pd, u32 flags)
md                262 drivers/clk/davinci/psc.c 	lpsc->md = md;
md                301 drivers/clk/davinci/psc.c 	regmap_write_bits(lpsc->regmap, MDCTL(lpsc->md), MDCTL_LRESET, mdctl);
md                350 drivers/clk/davinci/psc.c 	return lpsc->md;
md                410 drivers/clk/davinci/psc.c 						 regmap, info->md, info->pd,
md                418 drivers/clk/davinci/psc.c 		clks[info->md] = lpsc->hw.clk;
md                419 drivers/clk/davinci/psc.c 		pm_domains[info->md] = &lpsc->pm_domain;
md                466 drivers/clk/davinci/psc.c 		struct clk *clk = psc->clk_data.clks[info->md];
md                 64 drivers/clk/davinci/psc.h 	u32 md;
md                 74 drivers/clk/davinci/psc.h 	.md	= (m),		\
md                 35 drivers/clk/qcom/apcs-msm8916.c 	struct clk_regmap_mux_div *md = container_of(nb,
md                 40 drivers/clk/qcom/apcs-msm8916.c 		ret = mux_div_set_src_div(md, 4, 3);
md                106 drivers/clk/qcom/clk-rcg.c static u32 md_to_m(struct mn *mn, u32 md)
md                108 drivers/clk/qcom/clk-rcg.c 	md >>= mn->m_val_shift;
md                109 drivers/clk/qcom/clk-rcg.c 	md &= BIT(mn->width) - 1;
md                110 drivers/clk/qcom/clk-rcg.c 	return md;
md                132 drivers/clk/qcom/clk-rcg.c static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md)
md                138 drivers/clk/qcom/clk-rcg.c 	md &= ~mask;
md                142 drivers/clk/qcom/clk-rcg.c 		md |= m;
md                143 drivers/clk/qcom/clk-rcg.c 		md |= ~n & mask_w;
md                146 drivers/clk/qcom/clk-rcg.c 	return md;
md                200 drivers/clk/qcom/clk-rcg.c 	u32 ns, md, reg;
md                233 drivers/clk/qcom/clk-rcg.c 		ret = regmap_read(rcg->clkr.regmap, md_reg, &md);
md                236 drivers/clk/qcom/clk-rcg.c 		md = mn_to_md(mn, f->m, f->n, md);
md                237 drivers/clk/qcom/clk-rcg.c 		ret = regmap_write(rcg->clkr.regmap, md_reg, md);
md                294 drivers/clk/qcom/clk-rcg.c 	u32 ns, md, reg;
md                306 drivers/clk/qcom/clk-rcg.c 		regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
md                307 drivers/clk/qcom/clk-rcg.c 		f.m = md_to_m(&rcg->mn[bank], md);
md                345 drivers/clk/qcom/clk-rcg.c 	u32 pre_div, m = 0, n = 0, ns, md, mode = 0;
md                352 drivers/clk/qcom/clk-rcg.c 		regmap_read(rcg->clkr.regmap, rcg->md_reg, &md);
md                353 drivers/clk/qcom/clk-rcg.c 		m = md_to_m(mn, md);
md                370 drivers/clk/qcom/clk-rcg.c 	u32 m, n, pre_div, ns, md, mode, reg;
md                384 drivers/clk/qcom/clk-rcg.c 		regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
md                385 drivers/clk/qcom/clk-rcg.c 		m = md_to_m(mn, md);
md                476 drivers/clk/qcom/clk-rcg.c 	u32 ns, md, ctl;
md                490 drivers/clk/qcom/clk-rcg.c 		regmap_read(rcg->clkr.regmap, rcg->md_reg, &md);
md                491 drivers/clk/qcom/clk-rcg.c 		md = mn_to_md(mn, f->m, f->n, md);
md                492 drivers/clk/qcom/clk-rcg.c 		regmap_write(rcg->clkr.regmap, rcg->md_reg, md);
md                 23 drivers/clk/qcom/clk-regmap-mux-div.c int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div)
md                 27 drivers/clk/qcom/clk-regmap-mux-div.c 	const char *name = clk_hw_get_name(&md->clkr.hw);
md                 29 drivers/clk/qcom/clk-regmap-mux-div.c 	val = (div << md->hid_shift) | (src << md->src_shift);
md                 30 drivers/clk/qcom/clk-regmap-mux-div.c 	mask = ((BIT(md->hid_width) - 1) << md->hid_shift) |
md                 31 drivers/clk/qcom/clk-regmap-mux-div.c 	       ((BIT(md->src_width) - 1) << md->src_shift);
md                 33 drivers/clk/qcom/clk-regmap-mux-div.c 	ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset,
md                 38 drivers/clk/qcom/clk-regmap-mux-div.c 	ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset,
md                 45 drivers/clk/qcom/clk-regmap-mux-div.c 		ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset,
md                 59 drivers/clk/qcom/clk-regmap-mux-div.c static void mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src,
md                 63 drivers/clk/qcom/clk-regmap-mux-div.c 	const char *name = clk_hw_get_name(&md->clkr.hw);
md                 65 drivers/clk/qcom/clk-regmap-mux-div.c 	regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
md                 72 drivers/clk/qcom/clk-regmap-mux-div.c 	regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
md                 73 drivers/clk/qcom/clk-regmap-mux-div.c 	s = (val >> md->src_shift);
md                 74 drivers/clk/qcom/clk-regmap-mux-div.c 	s &= BIT(md->src_width) - 1;
md                 77 drivers/clk/qcom/clk-regmap-mux-div.c 	d = (val >> md->hid_shift);
md                 78 drivers/clk/qcom/clk-regmap-mux-div.c 	d &= BIT(md->hid_width) - 1;
md                 91 drivers/clk/qcom/clk-regmap-mux-div.c 	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
md                100 drivers/clk/qcom/clk-regmap-mux-div.c 		max_div = BIT(md->hid_width) - 1;
md                127 drivers/clk/qcom/clk-regmap-mux-div.c 	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
md                137 drivers/clk/qcom/clk-regmap-mux-div.c 		max_div = BIT(md->hid_width) - 1;
md                145 drivers/clk/qcom/clk-regmap-mux-div.c 				best_src = md->parent_map[i];
md                154 drivers/clk/qcom/clk-regmap-mux-div.c 	ret = mux_div_set_src_div(md, best_src, best_div);
md                156 drivers/clk/qcom/clk-regmap-mux-div.c 		md->div = best_div;
md                157 drivers/clk/qcom/clk-regmap-mux-div.c 		md->src = best_src;
md                165 drivers/clk/qcom/clk-regmap-mux-div.c 	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
md                169 drivers/clk/qcom/clk-regmap-mux-div.c 	mux_div_get_src_div(md, &src, &div);
md                172 drivers/clk/qcom/clk-regmap-mux-div.c 		if (src == md->parent_map[i])
md                181 drivers/clk/qcom/clk-regmap-mux-div.c 	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
md                183 drivers/clk/qcom/clk-regmap-mux-div.c 	return mux_div_set_src_div(md, md->parent_map[index], md->div);
md                189 drivers/clk/qcom/clk-regmap-mux-div.c 	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
md                191 drivers/clk/qcom/clk-regmap-mux-div.c 	return __mux_div_set_rate_and_parent(hw, rate, prate, md->src);
md                197 drivers/clk/qcom/clk-regmap-mux-div.c 	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
md                200 drivers/clk/qcom/clk-regmap-mux-div.c 					     md->parent_map[index]);
md                205 drivers/clk/qcom/clk-regmap-mux-div.c 	struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
md                210 drivers/clk/qcom/clk-regmap-mux-div.c 	mux_div_get_src_div(md, &src, &div);
md                212 drivers/clk/qcom/clk-regmap-mux-div.c 		if (src == md->parent_map[i]) {
md                 42 drivers/clk/qcom/clk-regmap-mux-div.h extern int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div);
md                 53 drivers/clk/renesas/clk-r8a7779.c #define CPG_CLK_CONFIG_INDEX(md)	(((md) & (BIT(2)|BIT(1))) >> 1)
md                 81 drivers/clk/renesas/clk-r8a7779.c #define CPG_PLLA_MULT_INDEX(md)	(((md) & (BIT(12)|BIT(11))) >> 11)
md                265 drivers/clk/renesas/clk-rcar-gen2.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 12) | \
md                266 drivers/clk/renesas/clk-rcar-gen2.c 					 (((md) & BIT(13)) >> 12) | \
md                267 drivers/clk/renesas/clk-rcar-gen2.c 					 (((md) & BIT(19)) >> 19))
md                220 drivers/clk/renesas/r8a7743-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 12) | \
md                221 drivers/clk/renesas/r8a7743-cpg-mssr.c 					 (((md) & BIT(13)) >> 12) | \
md                222 drivers/clk/renesas/r8a7743-cpg-mssr.c 					 (((md) & BIT(19)) >> 19))
md                201 drivers/clk/renesas/r8a7745-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 13) | \
md                202 drivers/clk/renesas/r8a7745-cpg-mssr.c 					 (((md) & BIT(13)) >> 13))
md                184 drivers/clk/renesas/r8a77470-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 13) | \
md                185 drivers/clk/renesas/r8a77470-cpg-mssr.c 					 (((md) & BIT(13)) >> 13))
md                268 drivers/clk/renesas/r8a774a1-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 11) | \
md                269 drivers/clk/renesas/r8a774a1-cpg-mssr.c 					 (((md) & BIT(13)) >> 11) | \
md                270 drivers/clk/renesas/r8a774a1-cpg-mssr.c 					 (((md) & BIT(19)) >> 18) | \
md                271 drivers/clk/renesas/r8a774a1-cpg-mssr.c 					 (((md) & BIT(17)) >> 17))
md                254 drivers/clk/renesas/r8a774c0-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	(((md) & BIT(19)) >> 19)
md                235 drivers/clk/renesas/r8a7790-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 12) | \
md                236 drivers/clk/renesas/r8a7790-cpg-mssr.c 					 (((md) & BIT(13)) >> 12) | \
md                237 drivers/clk/renesas/r8a7790-cpg-mssr.c 					 (((md) & BIT(19)) >> 19))
md                233 drivers/clk/renesas/r8a7791-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 12) | \
md                234 drivers/clk/renesas/r8a7791-cpg-mssr.c 					 (((md) & BIT(13)) >> 12) | \
md                235 drivers/clk/renesas/r8a7791-cpg-mssr.c 					 (((md) & BIT(19)) >> 19))
md                179 drivers/clk/renesas/r8a7792-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 12) | \
md                180 drivers/clk/renesas/r8a7792-cpg-mssr.c 					 (((md) & BIT(13)) >> 12) | \
md                181 drivers/clk/renesas/r8a7792-cpg-mssr.c 					 (((md) & BIT(19)) >> 19))
md                211 drivers/clk/renesas/r8a7794-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 13) | \
md                212 drivers/clk/renesas/r8a7794-cpg-mssr.c 					 (((md) & BIT(13)) >> 13))
md                311 drivers/clk/renesas/r8a7795-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 11) | \
md                312 drivers/clk/renesas/r8a7795-cpg-mssr.c 					 (((md) & BIT(13)) >> 11) | \
md                313 drivers/clk/renesas/r8a7795-cpg-mssr.c 					 (((md) & BIT(19)) >> 18) | \
md                314 drivers/clk/renesas/r8a7795-cpg-mssr.c 					 (((md) & BIT(17)) >> 17))
md                282 drivers/clk/renesas/r8a7796-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 11) | \
md                283 drivers/clk/renesas/r8a7796-cpg-mssr.c 					 (((md) & BIT(13)) >> 11) | \
md                284 drivers/clk/renesas/r8a7796-cpg-mssr.c 					 (((md) & BIT(19)) >> 18) | \
md                285 drivers/clk/renesas/r8a7796-cpg-mssr.c 					 (((md) & BIT(17)) >> 17))
md                284 drivers/clk/renesas/r8a77965-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 11) | \
md                285 drivers/clk/renesas/r8a77965-cpg-mssr.c 					 (((md) & BIT(13)) >> 11) | \
md                286 drivers/clk/renesas/r8a77965-cpg-mssr.c 					 (((md) & BIT(19)) >> 18) | \
md                287 drivers/clk/renesas/r8a77965-cpg-mssr.c 					 (((md) & BIT(17)) >> 17))
md                189 drivers/clk/renesas/r8a77970-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 12) | \
md                190 drivers/clk/renesas/r8a77970-cpg-mssr.c 					 (((md) & BIT(13)) >> 12) | \
md                191 drivers/clk/renesas/r8a77970-cpg-mssr.c 					 (((md) & BIT(19)) >> 19))
md                200 drivers/clk/renesas/r8a77980-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	((((md) & BIT(14)) >> 13) | \
md                201 drivers/clk/renesas/r8a77980-cpg-mssr.c 					 (((md) & BIT(13)) >> 13))
md                259 drivers/clk/renesas/r8a77990-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	(((md) & BIT(19)) >> 19)
md                198 drivers/clk/renesas/r8a77995-cpg-mssr.c #define CPG_PLL_CONFIG_INDEX(md)	(((md) & BIT(19)) >> 19)
md                462 drivers/clk/st/clkgen-fsyn.c 	u32 md;
md                492 drivers/clk/st/clkgen-fsyn.c 	CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md);
md                705 drivers/clk/st/clkgen-fsyn.c 	fs->md = params->mdiv;
md                774 drivers/clk/st/clkgen-fsyn.c 	fs->md = params->mdiv;
md               1142 drivers/crypto/hifn_795x.c 		u16 md = 0;
md               1145 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_NEW_KEY;
md               1147 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_NEW_IV;
md               1151 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_MODE_ECB;
md               1154 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_MODE_CBC;
md               1157 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_MODE_CFB;
md               1160 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_MODE_OFB;
md               1170 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_KSZ_128 |
md               1176 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_KSZ_192 |
md               1182 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_KSZ_256 |
md               1188 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_ALG_3DES;
md               1193 drivers/crypto/hifn_795x.c 			md |= HIFN_CRYPT_CMD_ALG_DES;
md               1201 drivers/crypto/hifn_795x.c 				rctx->iv, rctx->ivsize, md);
md                280 drivers/dma/imx-sdma.c 	u32  md;
md                 62 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_desc *md;
md                 94 drivers/dma/uniphier-mdmac.c 		mc->md = NULL;
md                100 drivers/dma/uniphier-mdmac.c 	mc->md = to_uniphier_mdmac_desc(vd);
md                102 drivers/dma/uniphier-mdmac.c 	return mc->md;
md                107 drivers/dma/uniphier-mdmac.c 				  struct uniphier_mdmac_desc *md)
md                114 drivers/dma/uniphier-mdmac.c 	sg = &md->sgl[md->sg_cur];
md                116 drivers/dma/uniphier-mdmac.c 	if (md->dir == DMA_MEM_TO_DEV) {
md                147 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_desc *md;
md                149 drivers/dma/uniphier-mdmac.c 	md = uniphier_mdmac_next_desc(mc);
md                150 drivers/dma/uniphier-mdmac.c 	if (md)
md                151 drivers/dma/uniphier-mdmac.c 		uniphier_mdmac_handle(mc, md);
md                178 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_desc *md;
md                203 drivers/dma/uniphier-mdmac.c 	md = mc->md;
md                204 drivers/dma/uniphier-mdmac.c 	if (!md)
md                207 drivers/dma/uniphier-mdmac.c 	md->sg_cur++;
md                209 drivers/dma/uniphier-mdmac.c 	if (md->sg_cur >= md->sg_len) {
md                210 drivers/dma/uniphier-mdmac.c 		vchan_cookie_complete(&md->vd);
md                211 drivers/dma/uniphier-mdmac.c 		md = uniphier_mdmac_next_desc(mc);
md                212 drivers/dma/uniphier-mdmac.c 		if (!md)
md                216 drivers/dma/uniphier-mdmac.c 	uniphier_mdmac_handle(mc, md);
md                236 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_desc *md;
md                241 drivers/dma/uniphier-mdmac.c 	md = kzalloc(sizeof(*md), GFP_NOWAIT);
md                242 drivers/dma/uniphier-mdmac.c 	if (!md)
md                245 drivers/dma/uniphier-mdmac.c 	md->sgl = sgl;
md                246 drivers/dma/uniphier-mdmac.c 	md->sg_len = sg_len;
md                247 drivers/dma/uniphier-mdmac.c 	md->dir = direction;
md                249 drivers/dma/uniphier-mdmac.c 	return vchan_tx_prep(vc, &md->vd, flags);
md                262 drivers/dma/uniphier-mdmac.c 	if (mc->md) {
md                263 drivers/dma/uniphier-mdmac.c 		vchan_terminate_vdesc(&mc->md->vd);
md                264 drivers/dma/uniphier-mdmac.c 		mc->md = NULL;
md                288 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_desc *md = NULL;
md                304 drivers/dma/uniphier-mdmac.c 	if (mc->md && mc->md->vd.tx.cookie == cookie) {
md                308 drivers/dma/uniphier-mdmac.c 		md = mc->md;
md                311 drivers/dma/uniphier-mdmac.c 	if (!md) {
md                314 drivers/dma/uniphier-mdmac.c 			md = to_uniphier_mdmac_desc(vd);
md                317 drivers/dma/uniphier-mdmac.c 	if (md) {
md                319 drivers/dma/uniphier-mdmac.c 		for (i = md->sg_cur; i < md->sg_len; i++)
md                320 drivers/dma/uniphier-mdmac.c 			txstate->residue += sg_dma_len(&md->sgl[i]);
md                336 drivers/dma/uniphier-mdmac.c 	if (vchan_issue_pending(vc) && !mc->md)
md                 25 drivers/firmware/efi/arm-init.c static int __init is_memory(efi_memory_desc_t *md)
md                 27 drivers/firmware/efi/arm-init.c 	if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC))
md                 39 drivers/firmware/efi/arm-init.c 	efi_memory_desc_t *md;
md                 41 drivers/firmware/efi/arm-init.c 	for_each_efi_memory_desc(md) {
md                 42 drivers/firmware/efi/arm-init.c 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
md                 44 drivers/firmware/efi/arm-init.c 		if (md->virt_addr == 0)
md                 47 drivers/firmware/efi/arm-init.c 		if (md->virt_addr <= addr &&
md                 48 drivers/firmware/efi/arm-init.c 		    (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
md                 49 drivers/firmware/efi/arm-init.c 			return md->phys_addr + addr - md->virt_addr;
md                156 drivers/firmware/efi/arm-init.c static __init int is_usable_memory(efi_memory_desc_t *md)
md                158 drivers/firmware/efi/arm-init.c 	switch (md->type) {
md                171 drivers/firmware/efi/arm-init.c 		return (md->attribute & EFI_MEMORY_WB);
md                180 drivers/firmware/efi/arm-init.c 	efi_memory_desc_t *md;
md                194 drivers/firmware/efi/arm-init.c 	for_each_efi_memory_desc(md) {
md                195 drivers/firmware/efi/arm-init.c 		paddr = md->phys_addr;
md                196 drivers/firmware/efi/arm-init.c 		npages = md->num_pages;
md                203 drivers/firmware/efi/arm-init.c 				efi_md_typeattr_format(buf, sizeof(buf), md));
md                209 drivers/firmware/efi/arm-init.c 		if (is_memory(md)) {
md                212 drivers/firmware/efi/arm-init.c 			if (!is_usable_memory(md))
md                216 drivers/firmware/efi/arm-init.c 			if (md->type == EFI_ACPI_RECLAIM_MEMORY)
md                 56 drivers/firmware/efi/arm-runtime.c 	efi_memory_desc_t *md;
md                 64 drivers/firmware/efi/arm-runtime.c 	for_each_efi_memory_desc(md) {
md                 65 drivers/firmware/efi/arm-runtime.c 		phys_addr_t phys = md->phys_addr;
md                 68 drivers/firmware/efi/arm-runtime.c 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
md                 70 drivers/firmware/efi/arm-runtime.c 		if (md->virt_addr == 0)
md                 73 drivers/firmware/efi/arm-runtime.c 		ret = efi_create_mapping(&efi_mm, md);
md                 84 drivers/firmware/efi/arm-runtime.c 		    efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
md                 86 drivers/firmware/efi/arm-runtime.c 							     phys + md->virt_addr);
md                397 drivers/firmware/efi/efi.c 	efi_memory_desc_t *md;
md                409 drivers/firmware/efi/efi.c 	for_each_efi_memory_desc(md) {
md                413 drivers/firmware/efi/efi.c 		size = md->num_pages << EFI_PAGE_SHIFT;
md                414 drivers/firmware/efi/efi.c 		end = md->phys_addr + size;
md                415 drivers/firmware/efi/efi.c 		if (phys_addr >= md->phys_addr && phys_addr < end) {
md                416 drivers/firmware/efi/efi.c 			memcpy(out_md, md, sizeof(*out_md));
md                426 drivers/firmware/efi/efi.c u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
md                428 drivers/firmware/efi/efi.c 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
md                429 drivers/firmware/efi/efi.c 	u64 end = md->phys_addr + size;
md                822 drivers/firmware/efi/efi.c 				     const efi_memory_desc_t *md)
md                829 drivers/firmware/efi/efi.c 	if (md->type >= ARRAY_SIZE(memory_type_name))
md                830 drivers/firmware/efi/efi.c 		type_len = snprintf(pos, size, "[type=%u", md->type);
md                834 drivers/firmware/efi/efi.c 				    memory_type_name[md->type]);
md                841 drivers/firmware/efi/efi.c 	attr = md->attribute;
md                882 drivers/firmware/efi/efi.c 	efi_memory_desc_t *md;
md                887 drivers/firmware/efi/efi.c 	for_each_efi_memory_desc(md) {
md                888 drivers/firmware/efi/efi.c 		if ((md->phys_addr <= phys_addr) &&
md                889 drivers/firmware/efi/efi.c 		    (phys_addr < (md->phys_addr +
md                890 drivers/firmware/efi/efi.c 		    (md->num_pages << EFI_PAGE_SHIFT))))
md                891 drivers/firmware/efi/efi.c 			return md->attribute;
md                906 drivers/firmware/efi/efi.c 	const efi_memory_desc_t *md;
md                911 drivers/firmware/efi/efi.c 	for_each_efi_memory_desc(md) {
md                912 drivers/firmware/efi/efi.c 		if ((md->phys_addr <= phys_addr) &&
md                913 drivers/firmware/efi/efi.c 		    (phys_addr < (md->phys_addr +
md                914 drivers/firmware/efi/efi.c 				  (md->num_pages << EFI_PAGE_SHIFT))))
md                915 drivers/firmware/efi/efi.c 			return md->type;
md                245 drivers/firmware/efi/esrt.c 	efi_memory_desc_t md;
md                253 drivers/firmware/efi/esrt.c 	rc = efi_mem_desc_lookup(efi.esrt, &md);
md                255 drivers/firmware/efi/esrt.c 	    (!(md.attribute & EFI_MEMORY_RUNTIME) &&
md                256 drivers/firmware/efi/esrt.c 	     md.type != EFI_BOOT_SERVICES_DATA &&
md                257 drivers/firmware/efi/esrt.c 	     md.type != EFI_RUNTIME_SERVICES_DATA)) {
md                262 drivers/firmware/efi/esrt.c 	max = efi_mem_desc_end(&md);
md                333 drivers/firmware/efi/esrt.c 	if (md.type == EFI_BOOT_SERVICES_DATA)
md                 42 drivers/firmware/efi/fake_mem.c 	efi_memory_desc_t *md;
md                 52 drivers/firmware/efi/fake_mem.c 		for_each_efi_memory_desc(md) {
md                 55 drivers/firmware/efi/fake_mem.c 			new_nr_map += efi_memmap_split_count(md, r);
md                139 drivers/firmware/efi/libstub/efi-stub-helper.c 	efi_memory_desc_t *md;
md                155 drivers/firmware/efi/libstub/efi-stub-helper.c 	for_each_efi_memory_desc_in_map(&map, md) {
md                156 drivers/firmware/efi/libstub/efi-stub-helper.c 		if (md->attribute & EFI_MEMORY_WB) {
md                157 drivers/firmware/efi/libstub/efi-stub-helper.c 			if (membase > md->phys_addr)
md                158 drivers/firmware/efi/libstub/efi-stub-helper.c 				membase = md->phys_addr;
md                 39 drivers/firmware/efi/libstub/random.c static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
md                 46 drivers/firmware/efi/libstub/random.c 	if (md->type != EFI_CONVENTIONAL_MEMORY)
md                 49 drivers/firmware/efi/libstub/random.c 	region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
md                 51 drivers/firmware/efi/libstub/random.c 	first_slot = round_up(md->phys_addr, align);
md                 66 drivers/firmware/efi/libstub/random.c #define MD_NUM_SLOTS(md)	((md)->virt_addr)
md                 97 drivers/firmware/efi/libstub/random.c 		efi_memory_desc_t *md = (void *)memory_map + map_offset;
md                100 drivers/firmware/efi/libstub/random.c 		slots = get_entry_num_slots(md, size, ilog2(align));
md                101 drivers/firmware/efi/libstub/random.c 		MD_NUM_SLOTS(md) = slots;
md                120 drivers/firmware/efi/libstub/random.c 		efi_memory_desc_t *md = (void *)memory_map + map_offset;
md                124 drivers/firmware/efi/libstub/random.c 		if (target_slot >= MD_NUM_SLOTS(md)) {
md                125 drivers/firmware/efi/libstub/random.c 			target_slot -= MD_NUM_SLOTS(md);
md                129 drivers/firmware/efi/libstub/random.c 		target = round_up(md->phys_addr, align) + target_slot * align;
md                 59 drivers/firmware/efi/memattr.c 	efi_memory_desc_t *md;
md                 88 drivers/firmware/efi/memattr.c 	for_each_efi_memory_desc(md) {
md                 89 drivers/firmware/efi/memattr.c 		u64 md_paddr = md->phys_addr;
md                 90 drivers/firmware/efi/memattr.c 		u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
md                 92 drivers/firmware/efi/memattr.c 		if (!(md->attribute & EFI_MEMORY_RUNTIME))
md                 94 drivers/firmware/efi/memattr.c 		if (md->virt_addr == 0 && md->phys_addr != 0) {
md                111 drivers/firmware/efi/memattr.c 		if (md->type != in->type) {
md                116 drivers/firmware/efi/memattr.c 		out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
md                161 drivers/firmware/efi/memattr.c 		efi_memory_desc_t md;
md                167 drivers/firmware/efi/memattr.c 				       &md);
md                168 drivers/firmware/efi/memattr.c 		size = md.num_pages << EFI_PAGE_SHIFT;
md                171 drivers/firmware/efi/memattr.c 				valid ? "" : "!", md.phys_addr,
md                172 drivers/firmware/efi/memattr.c 				md.phys_addr + size - 1,
md                173 drivers/firmware/efi/memattr.c 				efi_md_typeattr_format(buf, sizeof(buf), &md));
md                176 drivers/firmware/efi/memattr.c 			ret = fn(mm, &md);
md                217 drivers/firmware/efi/memmap.c int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
md                223 drivers/firmware/efi/memmap.c 	start = md->phys_addr;
md                224 drivers/firmware/efi/memmap.c 	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
md                261 drivers/firmware/efi/memmap.c 	efi_memory_desc_t *md;
md                287 drivers/firmware/efi/memmap.c 		md = new;
md                288 drivers/firmware/efi/memmap.c 		start = md->phys_addr;
md                289 drivers/firmware/efi/memmap.c 		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
md                292 drivers/firmware/efi/memmap.c 			md->attribute |= m_attr;
md                297 drivers/firmware/efi/memmap.c 			md->attribute |= m_attr;
md                298 drivers/firmware/efi/memmap.c 			md->num_pages = (m_end - md->phys_addr + 1) >>
md                303 drivers/firmware/efi/memmap.c 			md = new;
md                304 drivers/firmware/efi/memmap.c 			md->phys_addr = m_end + 1;
md                305 drivers/firmware/efi/memmap.c 			md->num_pages = (end - md->phys_addr + 1) >>
md                311 drivers/firmware/efi/memmap.c 			md->num_pages = (m_start - md->phys_addr) >>
md                316 drivers/firmware/efi/memmap.c 			md = new;
md                317 drivers/firmware/efi/memmap.c 			md->attribute |= m_attr;
md                318 drivers/firmware/efi/memmap.c 			md->phys_addr = m_start;
md                319 drivers/firmware/efi/memmap.c 			md->num_pages = (m_end - m_start + 1) >>
md                324 drivers/firmware/efi/memmap.c 			md = new;
md                325 drivers/firmware/efi/memmap.c 			md->phys_addr = m_end + 1;
md                326 drivers/firmware/efi/memmap.c 			md->num_pages = (end - m_end) >>
md                333 drivers/firmware/efi/memmap.c 			md->num_pages = (m_start - md->phys_addr) >>
md                338 drivers/firmware/efi/memmap.c 			md = new;
md                339 drivers/firmware/efi/memmap.c 			md->phys_addr = m_start;
md                340 drivers/firmware/efi/memmap.c 			md->num_pages = (end - md->phys_addr + 1) >>
md                342 drivers/firmware/efi/memmap.c 			md->attribute |= m_attr;
md                 17 drivers/firmware/efi/runtime-map.c 	efi_memory_desc_t md;
md                 35 drivers/firmware/efi/runtime-map.c 	return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type);
md                 38 drivers/firmware/efi/runtime-map.c #define EFI_RUNTIME_FIELD(var) entry->md.var
md                105 drivers/firmware/efi/runtime-map.c 			    efi_memory_desc_t *md)
md                123 drivers/firmware/efi/runtime-map.c 	memcpy(&entry->md, md, sizeof(efi_memory_desc_t));
md                163 drivers/firmware/efi/runtime-map.c 	efi_memory_desc_t *md;
md                175 drivers/firmware/efi/runtime-map.c 	for_each_efi_memory_desc(md) {
md                176 drivers/firmware/efi/runtime-map.c 		entry = add_sysfs_runtime_map_entry(efi_kobj, i, md);
md                714 drivers/hwmon/npcm750-pwm-fan.c 	int md;
md                719 drivers/hwmon/npcm750-pwm-fan.c 	for (md = 0; md < NPCM7XX_FAN_MAX_MODULE; md++) {
md                722 drivers/hwmon/npcm750-pwm-fan.c 			 NPCM7XX_FAN_REG_TCKC(data->fan_base, md));
md                725 drivers/hwmon/npcm750-pwm-fan.c 		iowrite8(0x00, NPCM7XX_FAN_REG_TIEN(data->fan_base, md));
md                729 drivers/hwmon/npcm750-pwm-fan.c 			 NPCM7XX_FAN_REG_TICLR(data->fan_base, md));
md                733 drivers/hwmon/npcm750-pwm-fan.c 			 NPCM7XX_FAN_REG_TPRSC(data->fan_base, md));
md                738 drivers/hwmon/npcm750-pwm-fan.c 			 NPCM7XX_FAN_REG_TMCTRL(data->fan_base, md));
md                742 drivers/hwmon/npcm750-pwm-fan.c 			  NPCM7XX_FAN_REG_TCNT1(data->fan_base, md));
md                744 drivers/hwmon/npcm750-pwm-fan.c 			  NPCM7XX_FAN_REG_TCNT2(data->fan_base, md));
md                748 drivers/hwmon/npcm750-pwm-fan.c 			 NPCM7XX_FAN_REG_TCPCFG(data->fan_base, md));
md                752 drivers/hwmon/npcm750-pwm-fan.c 			  NPCM7XX_FAN_REG_TCPA(data->fan_base, md));
md                754 drivers/hwmon/npcm750-pwm-fan.c 			  NPCM7XX_FAN_REG_TCPB(data->fan_base, md));
md                758 drivers/hwmon/npcm750-pwm-fan.c 			 NPCM7XX_FAN_REG_TINASEL(data->fan_base, md));
md                760 drivers/hwmon/npcm750-pwm-fan.c 			 NPCM7XX_FAN_REG_TINBSEL(data->fan_base, md));
md                763 drivers/hwmon/npcm750-pwm-fan.c 			ch = md * NPCM7XX_FAN_MAX_CHN_NUM_IN_A_MODULE + i;
md                415 drivers/hwmon/pmbus/ibm-cffps.c 	const void *md = of_device_get_match_data(&client->dev);
md                417 drivers/hwmon/pmbus/ibm-cffps.c 	if (md)
md                418 drivers/hwmon/pmbus/ibm-cffps.c 		vs = (enum versions)md;
md                203 drivers/iio/imu/adis16480.c 	u16 md, year;
md                212 drivers/iio/imu/adis16480.c 	ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_DM, &md);
md                217 drivers/iio/imu/adis16480.c 			md >> 8, md & 0xff, year);
md                737 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md,
md                742 drivers/input/touchscreen/cyttsp4_core.c 	if (md->num_prv_tch == 0)
md                746 drivers/input/touchscreen/cyttsp4_core.c 		input_mt_slot(md->input, t);
md                747 drivers/input/touchscreen/cyttsp4_core.c 		input_mt_report_slot_state(md->input,
md                752 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_lift_all(struct cyttsp4_mt_data *md)
md                754 drivers/input/touchscreen/cyttsp4_core.c 	if (!md->si)
md                757 drivers/input/touchscreen/cyttsp4_core.c 	if (md->num_prv_tch != 0) {
md                758 drivers/input/touchscreen/cyttsp4_core.c 		cyttsp4_report_slot_liftoff(md,
md                759 drivers/input/touchscreen/cyttsp4_core.c 				md->si->si_ofs.tch_abs[CY_TCH_T].max);
md                760 drivers/input/touchscreen/cyttsp4_core.c 		input_sync(md->input);
md                761 drivers/input/touchscreen/cyttsp4_core.c 		md->num_prv_tch = 0;
md                765 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_get_touch_axis(struct cyttsp4_mt_data *md,
md                772 drivers/input/touchscreen/cyttsp4_core.c 		dev_vdbg(&md->input->dev,
md                783 drivers/input/touchscreen/cyttsp4_core.c 	dev_vdbg(&md->input->dev,
md                790 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_get_touch(struct cyttsp4_mt_data *md,
md                793 drivers/input/touchscreen/cyttsp4_core.c 	struct device *dev = &md->input->dev;
md                794 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_sysinfo *si = md->si;
md                799 drivers/input/touchscreen/cyttsp4_core.c 		cyttsp4_get_touch_axis(md, &touch->abs[abs],
md                809 drivers/input/touchscreen/cyttsp4_core.c 	if (md->pdata->flags & CY_FLAG_FLIP) {
md                815 drivers/input/touchscreen/cyttsp4_core.c 	if (md->pdata->flags & CY_FLAG_INV_X) {
md                817 drivers/input/touchscreen/cyttsp4_core.c 			touch->abs[CY_TCH_X] = md->si->si_ofs.max_y -
md                820 drivers/input/touchscreen/cyttsp4_core.c 			touch->abs[CY_TCH_X] = md->si->si_ofs.max_x -
md                823 drivers/input/touchscreen/cyttsp4_core.c 	if (md->pdata->flags & CY_FLAG_INV_Y) {
md                825 drivers/input/touchscreen/cyttsp4_core.c 			touch->abs[CY_TCH_Y] = md->si->si_ofs.max_x -
md                828 drivers/input/touchscreen/cyttsp4_core.c 			touch->abs[CY_TCH_Y] = md->si->si_ofs.max_y -
md                834 drivers/input/touchscreen/cyttsp4_core.c 		md->pdata->flags & CY_FLAG_INV_X ? "true" : "false",
md                835 drivers/input/touchscreen/cyttsp4_core.c 		md->pdata->flags & CY_FLAG_INV_Y ? "true" : "false",
md                854 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_get_mt_touches(struct cyttsp4_mt_data *md, int num_cur_tch)
md                856 drivers/input/touchscreen/cyttsp4_core.c 	struct device *dev = &md->input->dev;
md                857 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_sysinfo *si = md->si;
md                865 drivers/input/touchscreen/cyttsp4_core.c 		cyttsp4_get_touch(md, &tch, si->xy_data +
md                867 drivers/input/touchscreen/cyttsp4_core.c 		if ((tch.abs[CY_TCH_T] < md->pdata->frmwrk->abs
md                869 drivers/input/touchscreen/cyttsp4_core.c 			(tch.abs[CY_TCH_T] > md->pdata->frmwrk->abs
md                873 drivers/input/touchscreen/cyttsp4_core.c 				md->pdata->frmwrk->abs[(CY_ABS_ID_OST *
md                879 drivers/input/touchscreen/cyttsp4_core.c 		sig = md->pdata->frmwrk->abs
md                882 drivers/input/touchscreen/cyttsp4_core.c 			t = tch.abs[CY_TCH_T] - md->pdata->frmwrk->abs
md                889 drivers/input/touchscreen/cyttsp4_core.c 			input_mt_slot(md->input, t);
md                890 drivers/input/touchscreen/cyttsp4_core.c 			input_mt_report_slot_state(md->input, MT_TOOL_FINGER,
md                897 drivers/input/touchscreen/cyttsp4_core.c 			sig = md->pdata->frmwrk->abs[((CY_ABS_X_OST + j) *
md                900 drivers/input/touchscreen/cyttsp4_core.c 				input_report_abs(md->input, sig,
md                915 drivers/input/touchscreen/cyttsp4_core.c 				sig = md->pdata->frmwrk->abs
md                919 drivers/input/touchscreen/cyttsp4_core.c 					input_report_abs(md->input, sig,
md                946 drivers/input/touchscreen/cyttsp4_core.c 	cyttsp4_final_sync(md->input, si->si_ofs.tch_abs[CY_TCH_T].max, ids);
md                948 drivers/input/touchscreen/cyttsp4_core.c 	md->num_prv_tch = num_cur_tch;
md                956 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_mt_data *md = &cd->md;
md                957 drivers/input/touchscreen/cyttsp4_core.c 	struct device *dev = &md->input->dev;
md                958 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_sysinfo *si = md->si;
md               1030 drivers/input/touchscreen/cyttsp4_core.c 		cyttsp4_get_mt_touches(md, num_cur_tch);
md               1032 drivers/input/touchscreen/cyttsp4_core.c 		cyttsp4_lift_all(md);
md               1043 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_mt_data *md = &cd->md;
md               1046 drivers/input/touchscreen/cyttsp4_core.c 	if (!md->si)
md               1049 drivers/input/touchscreen/cyttsp4_core.c 	mutex_lock(&md->report_lock);
md               1050 drivers/input/touchscreen/cyttsp4_core.c 	if (!md->is_suspended) {
md               1057 drivers/input/touchscreen/cyttsp4_core.c 	mutex_unlock(&md->report_lock);
md               1651 drivers/input/touchscreen/cyttsp4_core.c 	cyttsp4_lift_all(&cd->md);
md               1838 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_mt_data *md = &cd->md;
md               1841 drivers/input/touchscreen/cyttsp4_core.c 	md->is_suspended = true;
md               1854 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_mt_data *md = &cd->md;
md               1857 drivers/input/touchscreen/cyttsp4_core.c 	md->is_suspended = false;
md               1883 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_mt_data *md = input_get_drvdata(input);
md               1884 drivers/input/touchscreen/cyttsp4_core.c 	mutex_lock(&md->report_lock);
md               1885 drivers/input/touchscreen/cyttsp4_core.c 	if (!md->is_suspended)
md               1887 drivers/input/touchscreen/cyttsp4_core.c 	mutex_unlock(&md->report_lock);
md               1894 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_mt_data *md = &cd->md;
md               1902 drivers/input/touchscreen/cyttsp4_core.c 	__set_bit(EV_ABS, md->input->evbit);
md               1903 drivers/input/touchscreen/cyttsp4_core.c 	__set_bit(EV_REL, md->input->evbit);
md               1904 drivers/input/touchscreen/cyttsp4_core.c 	__set_bit(EV_KEY, md->input->evbit);
md               1906 drivers/input/touchscreen/cyttsp4_core.c 	max_x_tmp = md->si->si_ofs.max_x;
md               1907 drivers/input/touchscreen/cyttsp4_core.c 	max_y_tmp = md->si->si_ofs.max_y;
md               1910 drivers/input/touchscreen/cyttsp4_core.c 	if (md->pdata->flags & CY_FLAG_FLIP) {
md               1917 drivers/input/touchscreen/cyttsp4_core.c 	max_p = md->si->si_ofs.max_p;
md               1920 drivers/input/touchscreen/cyttsp4_core.c 	for (i = 0; i < (md->pdata->frmwrk->size / CY_NUM_ABS_SET); i++) {
md               1921 drivers/input/touchscreen/cyttsp4_core.c 		signal = md->pdata->frmwrk->abs
md               1924 drivers/input/touchscreen/cyttsp4_core.c 			__set_bit(signal, md->input->absbit);
md               1925 drivers/input/touchscreen/cyttsp4_core.c 			min = md->pdata->frmwrk->abs
md               1927 drivers/input/touchscreen/cyttsp4_core.c 			max = md->pdata->frmwrk->abs
md               1939 drivers/input/touchscreen/cyttsp4_core.c 			input_set_abs_params(md->input, signal, min, max,
md               1940 drivers/input/touchscreen/cyttsp4_core.c 				md->pdata->frmwrk->abs
md               1942 drivers/input/touchscreen/cyttsp4_core.c 				md->pdata->frmwrk->abs
md               1947 drivers/input/touchscreen/cyttsp4_core.c 				(md->si->si_ofs.tch_rec_size <
md               1953 drivers/input/touchscreen/cyttsp4_core.c 	input_mt_init_slots(md->input, md->si->si_ofs.tch_abs[CY_TCH_T].max,
md               1955 drivers/input/touchscreen/cyttsp4_core.c 	rc = input_register_device(md->input);
md               1965 drivers/input/touchscreen/cyttsp4_core.c 	struct cyttsp4_mt_data *md = &cd->md;
md               1969 drivers/input/touchscreen/cyttsp4_core.c 	mutex_init(&md->report_lock);
md               1970 drivers/input/touchscreen/cyttsp4_core.c 	md->pdata = pdata;
md               1974 drivers/input/touchscreen/cyttsp4_core.c 	md->input = input_allocate_device();
md               1975 drivers/input/touchscreen/cyttsp4_core.c 	if (md->input == NULL) {
md               1982 drivers/input/touchscreen/cyttsp4_core.c 	md->input->name = pdata->inp_dev_name;
md               1983 drivers/input/touchscreen/cyttsp4_core.c 	scnprintf(md->phys, sizeof(md->phys)-1, "%s", dev_name(dev));
md               1984 drivers/input/touchscreen/cyttsp4_core.c 	md->input->phys = md->phys;
md               1985 drivers/input/touchscreen/cyttsp4_core.c 	md->input->id.bustype = cd->bus_ops->bustype;
md               1986 drivers/input/touchscreen/cyttsp4_core.c 	md->input->dev.parent = dev;
md               1987 drivers/input/touchscreen/cyttsp4_core.c 	md->input->open = cyttsp4_mt_open;
md               1988 drivers/input/touchscreen/cyttsp4_core.c 	md->input->close = cyttsp4_mt_close;
md               1989 drivers/input/touchscreen/cyttsp4_core.c 	input_set_drvdata(md->input, md);
md               1992 drivers/input/touchscreen/cyttsp4_core.c 	md->si = &cd->sysinfo;
md               2001 drivers/input/touchscreen/cyttsp4_core.c 	input_free_device(md->input);
md               2133 drivers/input/touchscreen/cyttsp4_core.c static void cyttsp4_mt_release(struct cyttsp4_mt_data *md)
md               2135 drivers/input/touchscreen/cyttsp4_core.c 	input_unregister_device(md->input);
md               2136 drivers/input/touchscreen/cyttsp4_core.c 	input_set_drvdata(md->input, NULL);
md               2143 drivers/input/touchscreen/cyttsp4_core.c 	cyttsp4_mt_release(&cd->md);
md                350 drivers/input/touchscreen/cyttsp4_core.h 	struct cyttsp4_mt_data md;
md                118 drivers/md/dm-core.h void disable_discard(struct mapped_device *md);
md                119 drivers/md/dm-core.h void disable_write_same(struct mapped_device *md);
md                120 drivers/md/dm-core.h void disable_write_zeroes(struct mapped_device *md);
md                849 drivers/md/dm-crypt.c 	struct mapped_device *md = dm_table_get_md(ti->table);
md                869 drivers/md/dm-crypt.c 		DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
md                877 drivers/md/dm-crypt.c 		DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
md                 34 drivers/md/dm-era-target.c 	struct writeset_metadata md;
md                 74 drivers/md/dm-era-target.c 	ws->md.nr_bits = nr_blocks;
md                 75 drivers/md/dm-era-target.c 	ws->md.root = INVALID_WRITESET_ROOT;
md                 92 drivers/md/dm-era-target.c 	memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
md                 94 drivers/md/dm-era-target.c 	r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
md                138 drivers/md/dm-era-target.c 		r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
md                300 drivers/md/dm-era-target.c static int superblock_read_lock(struct era_metadata *md,
md                303 drivers/md/dm-era-target.c 	return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION,
md                307 drivers/md/dm-era-target.c static int superblock_lock_zero(struct era_metadata *md,
md                310 drivers/md/dm-era-target.c 	return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION,
md                314 drivers/md/dm-era-target.c static int superblock_lock(struct era_metadata *md,
md                317 drivers/md/dm-era-target.c 	return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION,
md                367 drivers/md/dm-era-target.c 	struct era_metadata *md = context;
md                374 drivers/md/dm-era-target.c 	dm_tm_inc(md->tm, b);
md                379 drivers/md/dm-era-target.c 	struct era_metadata *md = context;
md                386 drivers/md/dm-era-target.c 	dm_bitset_del(&md->bitset_info, b);
md                396 drivers/md/dm-era-target.c static void setup_writeset_tree_info(struct era_metadata *md)
md                398 drivers/md/dm-era-target.c 	struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
md                399 drivers/md/dm-era-target.c 	md->writeset_tree_info.tm = md->tm;
md                400 drivers/md/dm-era-target.c 	md->writeset_tree_info.levels = 1;
md                401 drivers/md/dm-era-target.c 	vt->context = md;
md                408 drivers/md/dm-era-target.c static void setup_era_array_info(struct era_metadata *md)
md                418 drivers/md/dm-era-target.c 	dm_array_info_init(&md->era_array_info, md->tm, &vt);
md                421 drivers/md/dm-era-target.c static void setup_infos(struct era_metadata *md)
md                423 drivers/md/dm-era-target.c 	dm_disk_bitset_init(md->tm, &md->bitset_info);
md                424 drivers/md/dm-era-target.c 	setup_writeset_tree_info(md);
md                425 drivers/md/dm-era-target.c 	setup_era_array_info(md);
md                430 drivers/md/dm-era-target.c static int create_fresh_metadata(struct era_metadata *md)
md                434 drivers/md/dm-era-target.c 	r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION,
md                435 drivers/md/dm-era-target.c 				 &md->tm, &md->sm);
md                441 drivers/md/dm-era-target.c 	setup_infos(md);
md                443 drivers/md/dm-era-target.c 	r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root);
md                449 drivers/md/dm-era-target.c 	r = dm_array_empty(&md->era_array_info, &md->era_array_root);
md                458 drivers/md/dm-era-target.c 	dm_sm_destroy(md->sm);
md                459 drivers/md/dm-era-target.c 	dm_tm_destroy(md->tm);
md                464 drivers/md/dm-era-target.c static int save_sm_root(struct era_metadata *md)
md                469 drivers/md/dm-era-target.c 	r = dm_sm_root_size(md->sm, &metadata_len);
md                473 drivers/md/dm-era-target.c 	return dm_sm_copy_root(md->sm, &md->metadata_space_map_root,
md                477 drivers/md/dm-era-target.c static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk)
md                480 drivers/md/dm-era-target.c 	       &md->metadata_space_map_root,
md                481 drivers/md/dm-era-target.c 	       sizeof(md->metadata_space_map_root));
md                489 drivers/md/dm-era-target.c static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk)
md                498 drivers/md/dm-era-target.c 	copy_sm_root(md, disk);
md                500 drivers/md/dm-era-target.c 	disk->data_block_size = cpu_to_le32(md->block_size);
md                502 drivers/md/dm-era-target.c 	disk->nr_blocks = cpu_to_le32(md->nr_blocks);
md                503 drivers/md/dm-era-target.c 	disk->current_era = cpu_to_le32(md->current_era);
md                505 drivers/md/dm-era-target.c 	ws_pack(&md->current_writeset->md, &disk->current_writeset);
md                506 drivers/md/dm-era-target.c 	disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root);
md                507 drivers/md/dm-era-target.c 	disk->era_array_root = cpu_to_le64(md->era_array_root);
md                508 drivers/md/dm-era-target.c 	disk->metadata_snap = cpu_to_le64(md->metadata_snap);
md                511 drivers/md/dm-era-target.c static int write_superblock(struct era_metadata *md)
md                517 drivers/md/dm-era-target.c 	r = save_sm_root(md);
md                523 drivers/md/dm-era-target.c 	r = superblock_lock_zero(md, &sblock);
md                528 drivers/md/dm-era-target.c 	prepare_superblock(md, disk);
md                530 drivers/md/dm-era-target.c 	return dm_tm_commit(md->tm, sblock);
md                536 drivers/md/dm-era-target.c static int format_metadata(struct era_metadata *md)
md                540 drivers/md/dm-era-target.c 	r = create_fresh_metadata(md);
md                544 drivers/md/dm-era-target.c 	r = write_superblock(md);
md                546 drivers/md/dm-era-target.c 		dm_sm_destroy(md->sm);
md                547 drivers/md/dm-era-target.c 		dm_tm_destroy(md->tm);
md                554 drivers/md/dm-era-target.c static int open_metadata(struct era_metadata *md)
md                560 drivers/md/dm-era-target.c 	r = superblock_read_lock(md, &sblock);
md                567 drivers/md/dm-era-target.c 	r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
md                570 drivers/md/dm-era-target.c 			       &md->tm, &md->sm);
md                576 drivers/md/dm-era-target.c 	setup_infos(md);
md                578 drivers/md/dm-era-target.c 	md->block_size = le32_to_cpu(disk->data_block_size);
md                579 drivers/md/dm-era-target.c 	md->nr_blocks = le32_to_cpu(disk->nr_blocks);
md                580 drivers/md/dm-era-target.c 	md->current_era = le32_to_cpu(disk->current_era);
md                582 drivers/md/dm-era-target.c 	md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
md                583 drivers/md/dm-era-target.c 	md->era_array_root = le64_to_cpu(disk->era_array_root);
md                584 drivers/md/dm-era-target.c 	md->metadata_snap = le64_to_cpu(disk->metadata_snap);
md                585 drivers/md/dm-era-target.c 	md->archived_writesets = true;
md                596 drivers/md/dm-era-target.c static int open_or_format_metadata(struct era_metadata *md,
md                602 drivers/md/dm-era-target.c 	r = superblock_all_zeroes(md->bm, &unformatted);
md                607 drivers/md/dm-era-target.c 		return may_format ? format_metadata(md) : -EPERM;
md                609 drivers/md/dm-era-target.c 	return open_metadata(md);
md                612 drivers/md/dm-era-target.c static int create_persistent_data_objects(struct era_metadata *md,
md                617 drivers/md/dm-era-target.c 	md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
md                619 drivers/md/dm-era-target.c 	if (IS_ERR(md->bm)) {
md                621 drivers/md/dm-era-target.c 		return PTR_ERR(md->bm);
md                624 drivers/md/dm-era-target.c 	r = open_or_format_metadata(md, may_format);
md                626 drivers/md/dm-era-target.c 		dm_block_manager_destroy(md->bm);
md                631 drivers/md/dm-era-target.c static void destroy_persistent_data_objects(struct era_metadata *md)
md                633 drivers/md/dm-era-target.c 	dm_sm_destroy(md->sm);
md                634 drivers/md/dm-era-target.c 	dm_tm_destroy(md->tm);
md                635 drivers/md/dm-era-target.c 	dm_block_manager_destroy(md->bm);
md                641 drivers/md/dm-era-target.c static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset)
md                643 drivers/md/dm-era-target.c 	rcu_assign_pointer(md->current_writeset, new_writeset);
md                664 drivers/md/dm-era-target.c static int metadata_digest_lookup_writeset(struct era_metadata *md,
md                667 drivers/md/dm-era-target.c static int metadata_digest_remove_writeset(struct era_metadata *md,
md                673 drivers/md/dm-era-target.c 	r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root,
md                674 drivers/md/dm-era-target.c 			    &key, &md->writeset_tree_root);
md                686 drivers/md/dm-era-target.c static int metadata_digest_transcribe_writeset(struct era_metadata *md,
md                704 drivers/md/dm-era-target.c 		r = dm_array_set_value(&md->era_array_info, md->era_array_root,
md                705 drivers/md/dm-era-target.c 				       b, &d->value, &md->era_array_root);
md                720 drivers/md/dm-era-target.c static int metadata_digest_lookup_writeset(struct era_metadata *md,
md                727 drivers/md/dm-era-target.c 	r = dm_btree_find_lowest_key(&md->writeset_tree_info,
md                728 drivers/md/dm-era-target.c 				     md->writeset_tree_root, &key);
md                734 drivers/md/dm-era-target.c 	r = dm_btree_lookup(&md->writeset_tree_info,
md                735 drivers/md/dm-era-target.c 			    md->writeset_tree_root, &key, &disk);
md                749 drivers/md/dm-era-target.c 	d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
md                756 drivers/md/dm-era-target.c static int metadata_digest_start(struct era_metadata *md, struct digest *d)
md                767 drivers/md/dm-era-target.c 	dm_disk_bitset_init(md->tm, &d->info);
md                782 drivers/md/dm-era-target.c 	struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL);
md                784 drivers/md/dm-era-target.c 	if (!md)
md                787 drivers/md/dm-era-target.c 	md->bdev = bdev;
md                788 drivers/md/dm-era-target.c 	md->block_size = block_size;
md                790 drivers/md/dm-era-target.c 	md->writesets[0].md.root = INVALID_WRITESET_ROOT;
md                791 drivers/md/dm-era-target.c 	md->writesets[1].md.root = INVALID_WRITESET_ROOT;
md                792 drivers/md/dm-era-target.c 	md->current_writeset = &md->writesets[0];
md                794 drivers/md/dm-era-target.c 	r = create_persistent_data_objects(md, may_format);
md                796 drivers/md/dm-era-target.c 		kfree(md);
md                800 drivers/md/dm-era-target.c 	return md;
md                803 drivers/md/dm-era-target.c static void metadata_close(struct era_metadata *md)
md                805 drivers/md/dm-era-target.c 	destroy_persistent_data_objects(md);
md                806 drivers/md/dm-era-target.c 	kfree(md);
md                818 drivers/md/dm-era-target.c static int metadata_resize(struct era_metadata *md, void *arg)
md                830 drivers/md/dm-era-target.c 	writeset_free(&md->writesets[0]);
md                831 drivers/md/dm-era-target.c 	writeset_free(&md->writesets[1]);
md                833 drivers/md/dm-era-target.c 	r = writeset_alloc(&md->writesets[0], *new_size);
md                839 drivers/md/dm-era-target.c 	r = writeset_alloc(&md->writesets[1], *new_size);
md                847 drivers/md/dm-era-target.c 	r = dm_array_resize(&md->era_array_info, md->era_array_root,
md                848 drivers/md/dm-era-target.c 			    md->nr_blocks, *new_size,
md                849 drivers/md/dm-era-target.c 			    &value, &md->era_array_root);
md                855 drivers/md/dm-era-target.c 	md->nr_blocks = *new_size;
md                859 drivers/md/dm-era-target.c static int metadata_era_archive(struct era_metadata *md)
md                865 drivers/md/dm-era-target.c 	r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
md                866 drivers/md/dm-era-target.c 			    &md->current_writeset->md.root);
md                872 drivers/md/dm-era-target.c 	ws_pack(&md->current_writeset->md, &value);
md                873 drivers/md/dm-era-target.c 	md->current_writeset->md.root = INVALID_WRITESET_ROOT;
md                875 drivers/md/dm-era-target.c 	keys[0] = md->current_era;
md                877 drivers/md/dm-era-target.c 	r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root,
md                878 drivers/md/dm-era-target.c 			    keys, &value, &md->writeset_tree_root);
md                885 drivers/md/dm-era-target.c 	md->archived_writesets = true;
md                890 drivers/md/dm-era-target.c static struct writeset *next_writeset(struct era_metadata *md)
md                892 drivers/md/dm-era-target.c 	return (md->current_writeset == &md->writesets[0]) ?
md                893 drivers/md/dm-era-target.c 		&md->writesets[1] : &md->writesets[0];
md                896 drivers/md/dm-era-target.c static int metadata_new_era(struct era_metadata *md)
md                899 drivers/md/dm-era-target.c 	struct writeset *new_writeset = next_writeset(md);
md                901 drivers/md/dm-era-target.c 	r = writeset_init(&md->bitset_info, new_writeset);
md                907 drivers/md/dm-era-target.c 	swap_writeset(md, new_writeset);
md                908 drivers/md/dm-era-target.c 	md->current_era++;
md                913 drivers/md/dm-era-target.c static int metadata_era_rollover(struct era_metadata *md)
md                917 drivers/md/dm-era-target.c 	if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
md                918 drivers/md/dm-era-target.c 		r = metadata_era_archive(md);
md                926 drivers/md/dm-era-target.c 	r = metadata_new_era(md);
md                936 drivers/md/dm-era-target.c static bool metadata_current_marked(struct era_metadata *md, dm_block_t block)
md                942 drivers/md/dm-era-target.c 	ws = rcu_dereference(md->current_writeset);
md                949 drivers/md/dm-era-target.c static int metadata_commit(struct era_metadata *md)
md                954 drivers/md/dm-era-target.c 	if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
md                955 drivers/md/dm-era-target.c 		r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
md                956 drivers/md/dm-era-target.c 				    &md->current_writeset->md.root);
md                963 drivers/md/dm-era-target.c 	r = dm_tm_pre_commit(md->tm);
md                969 drivers/md/dm-era-target.c 	r = save_sm_root(md);
md                975 drivers/md/dm-era-target.c 	r = superblock_lock(md, &sblock);
md                981 drivers/md/dm-era-target.c 	prepare_superblock(md, dm_block_data(sblock));
md                983 drivers/md/dm-era-target.c 	return dm_tm_commit(md->tm, sblock);
md                986 drivers/md/dm-era-target.c static int metadata_checkpoint(struct era_metadata *md)
md                992 drivers/md/dm-era-target.c 	return metadata_era_rollover(md);
md                998 drivers/md/dm-era-target.c static int metadata_take_snap(struct era_metadata *md)
md               1003 drivers/md/dm-era-target.c 	if (md->metadata_snap != SUPERBLOCK_LOCATION) {
md               1008 drivers/md/dm-era-target.c 	r = metadata_era_rollover(md);
md               1014 drivers/md/dm-era-target.c 	r = metadata_commit(md);
md               1020 drivers/md/dm-era-target.c 	r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION);
md               1026 drivers/md/dm-era-target.c 	r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION,
md               1030 drivers/md/dm-era-target.c 		dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION);
md               1035 drivers/md/dm-era-target.c 	r = dm_sm_inc_block(md->sm, md->writeset_tree_root);
md               1038 drivers/md/dm-era-target.c 		dm_tm_unlock(md->tm, clone);
md               1042 drivers/md/dm-era-target.c 	r = dm_sm_inc_block(md->sm, md->era_array_root);
md               1045 drivers/md/dm-era-target.c 		dm_sm_dec_block(md->sm, md->writeset_tree_root);
md               1046 drivers/md/dm-era-target.c 		dm_tm_unlock(md->tm, clone);
md               1050 drivers/md/dm-era-target.c 	md->metadata_snap = dm_block_location(clone);
md               1052 drivers/md/dm-era-target.c 	dm_tm_unlock(md->tm, clone);
md               1057 drivers/md/dm-era-target.c static int metadata_drop_snap(struct era_metadata *md)
md               1064 drivers/md/dm-era-target.c 	if (md->metadata_snap == SUPERBLOCK_LOCATION) {
md               1069 drivers/md/dm-era-target.c 	r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone);
md               1079 drivers/md/dm-era-target.c 	md->metadata_snap = SUPERBLOCK_LOCATION;
md               1082 drivers/md/dm-era-target.c 	r = dm_btree_del(&md->writeset_tree_info,
md               1086 drivers/md/dm-era-target.c 		dm_tm_unlock(md->tm, clone);
md               1090 drivers/md/dm-era-target.c 	r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root));
md               1093 drivers/md/dm-era-target.c 		dm_tm_unlock(md->tm, clone);
md               1098 drivers/md/dm-era-target.c 	dm_tm_unlock(md->tm, clone);
md               1100 drivers/md/dm-era-target.c 	return dm_sm_dec_block(md->sm, location);
md               1110 drivers/md/dm-era-target.c static int metadata_get_stats(struct era_metadata *md, void *ptr)
md               1116 drivers/md/dm-era-target.c 	r = dm_sm_get_nr_free(md->sm, &nr_free);
md               1122 drivers/md/dm-era-target.c 	r = dm_sm_get_nr_blocks(md->sm, &nr_total);
md               1130 drivers/md/dm-era-target.c 	s->snap = md->metadata_snap;
md               1131 drivers/md/dm-era-target.c 	s->era = md->current_era;
md               1148 drivers/md/dm-era-target.c 	struct era_metadata *md;
md               1215 drivers/md/dm-era-target.c 	r = era->digest.step(era->md, &era->digest);
md               1241 drivers/md/dm-era-target.c 		r = writeset_test_and_set(&era->md->bitset_info,
md               1242 drivers/md/dm-era-target.c 					  era->md->current_writeset,
md               1258 drivers/md/dm-era-target.c 		r = metadata_commit(era->md);
md               1284 drivers/md/dm-era-target.c 		rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
md               1289 drivers/md/dm-era-target.c 		r = metadata_commit(era->md);
md               1301 drivers/md/dm-era-target.c 	if (era->md->archived_writesets) {
md               1302 drivers/md/dm-era-target.c 		era->md->archived_writesets = false;
md               1303 drivers/md/dm-era-target.c 		metadata_digest_start(era->md, &era->digest);
md               1392 drivers/md/dm-era-target.c 	if (era->md)
md               1393 drivers/md/dm-era-target.c 		metadata_close(era->md);
md               1428 drivers/md/dm-era-target.c 	struct era_metadata *md;
md               1481 drivers/md/dm-era-target.c 	md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
md               1482 drivers/md/dm-era-target.c 	if (IS_ERR(md)) {
md               1485 drivers/md/dm-era-target.c 		return PTR_ERR(md);
md               1487 drivers/md/dm-era-target.c 	era->md = md;
md               1491 drivers/md/dm-era-target.c 	r = metadata_resize(era->md, &era->nr_blocks);
md               1545 drivers/md/dm-era-target.c 	    !metadata_current_marked(era->md, block)) {
md                 44 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md                119 drivers/md/dm-ioctl.c 			dm_get(hc->md);
md                133 drivers/md/dm-ioctl.c 			dm_get(hc->md);
md                142 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md                145 drivers/md/dm-ioctl.c 	md = dm_get_md(huge_decode_dev(dev));
md                146 drivers/md/dm-ioctl.c 	if (!md)
md                149 drivers/md/dm-ioctl.c 	hc = dm_get_mdptr(md);
md                151 drivers/md/dm-ioctl.c 		dm_put(md);
md                162 drivers/md/dm-ioctl.c 				    struct mapped_device *md)
md                190 drivers/md/dm-ioctl.c 	hc->md = md;
md                208 drivers/md/dm-ioctl.c static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
md                215 drivers/md/dm-ioctl.c 	cell = alloc_cell(name, uuid, md);
md                225 drivers/md/dm-ioctl.c 		dm_put(hc->md);
md                235 drivers/md/dm-ioctl.c 			dm_put(hc->md);
md                240 drivers/md/dm-ioctl.c 	dm_get(md);
md                242 drivers/md/dm-ioctl.c 	dm_set_mdptr(md, cell);
md                263 drivers/md/dm-ioctl.c 	dm_set_mdptr(hc->md, NULL);
md                266 drivers/md/dm-ioctl.c 	table = dm_get_live_table(hc->md, &srcu_idx);
md                269 drivers/md/dm-ioctl.c 	dm_put_live_table(hc->md, srcu_idx);
md                274 drivers/md/dm-ioctl.c 	dm_put(hc->md);
md                284 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md                294 drivers/md/dm-ioctl.c 			md = hc->md;
md                295 drivers/md/dm-ioctl.c 			dm_get(md);
md                298 drivers/md/dm-ioctl.c 			    dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
md                299 drivers/md/dm-ioctl.c 				dm_put(md);
md                309 drivers/md/dm-ioctl.c 				dm_sync_table(md);
md                312 drivers/md/dm-ioctl.c 			dm_put(md);
md                314 drivers/md/dm-ioctl.c 				dm_destroy(md);
md                316 drivers/md/dm-ioctl.c 				dm_destroy_immediate(md);
md                375 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md                401 drivers/md/dm-ioctl.c 		dm_put(hc->md);
md                426 drivers/md/dm-ioctl.c 		dm_put(hc->md);
md                440 drivers/md/dm-ioctl.c 	table = dm_get_live_table(hc->md, &srcu_idx);
md                443 drivers/md/dm-ioctl.c 	dm_put_live_table(hc->md, srcu_idx);
md                445 drivers/md/dm-ioctl.c 	if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
md                448 drivers/md/dm-ioctl.c 	md = hc->md;
md                452 drivers/md/dm-ioctl.c 	return md;
md                548 drivers/md/dm-ioctl.c 			disk = dm_disk(hc->md);
md                555 drivers/md/dm-ioctl.c 			*event_nr = dm_get_event_nr(hc->md);
md                682 drivers/md/dm-ioctl.c static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx)
md                688 drivers/md/dm-ioctl.c 	dm_get_live_table(md, srcu_idx);
md                691 drivers/md/dm-ioctl.c 	hc = dm_get_mdptr(md);
md                692 drivers/md/dm-ioctl.c 	if (!hc || hc->md != md) {
md                705 drivers/md/dm-ioctl.c static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
md                710 drivers/md/dm-ioctl.c 		dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx);
md                717 drivers/md/dm-ioctl.c static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
md                719 drivers/md/dm-ioctl.c 	struct gendisk *disk = dm_disk(md);
md                726 drivers/md/dm-ioctl.c 	if (dm_suspended_md(md))
md                729 drivers/md/dm-ioctl.c 	if (dm_suspended_internally_md(md))
md                732 drivers/md/dm-ioctl.c 	if (dm_test_deferred_remove_flag(md))
md                742 drivers/md/dm-ioctl.c 	param->open_count = dm_open_count(md);
md                744 drivers/md/dm-ioctl.c 	param->event_nr = dm_get_event_nr(md);
md                747 drivers/md/dm-ioctl.c 	table = dm_get_live_table(md, &srcu_idx);
md                757 drivers/md/dm-ioctl.c 	dm_put_live_table(md, srcu_idx);
md                761 drivers/md/dm-ioctl.c 		table = dm_get_inactive_table(md, &srcu_idx);
md                767 drivers/md/dm-ioctl.c 		dm_put_live_table(md, srcu_idx);
md                774 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md                783 drivers/md/dm-ioctl.c 	r = dm_create(m, &md);
md                787 drivers/md/dm-ioctl.c 	r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
md                789 drivers/md/dm-ioctl.c 		dm_put(md);
md                790 drivers/md/dm-ioctl.c 		dm_destroy(md);
md                796 drivers/md/dm-ioctl.c 	__dev_status(md, param);
md                798 drivers/md/dm-ioctl.c 	dm_put(md);
md                852 drivers/md/dm-ioctl.c 	struct mapped_device *md = NULL;
md                857 drivers/md/dm-ioctl.c 		md = hc->md;
md                860 drivers/md/dm-ioctl.c 	return md;
md                866 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md                879 drivers/md/dm-ioctl.c 	md = hc->md;
md                884 drivers/md/dm-ioctl.c 	r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);
md                888 drivers/md/dm-ioctl.c 			dm_put(md);
md                893 drivers/md/dm-ioctl.c 		dm_put(md);
md                901 drivers/md/dm-ioctl.c 		dm_sync_table(md);
md                907 drivers/md/dm-ioctl.c 	if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
md                910 drivers/md/dm-ioctl.c 	dm_put(md);
md                911 drivers/md/dm-ioctl.c 	dm_destroy(md);
md                932 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md                948 drivers/md/dm-ioctl.c 	md = dm_hash_rename(param, new_data);
md                949 drivers/md/dm-ioctl.c 	if (IS_ERR(md))
md                950 drivers/md/dm-ioctl.c 		return PTR_ERR(md);
md                952 drivers/md/dm-ioctl.c 	__dev_status(md, param);
md                953 drivers/md/dm-ioctl.c 	dm_put(md);
md                961 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md                967 drivers/md/dm-ioctl.c 	md = find_device(param);
md                968 drivers/md/dm-ioctl.c 	if (!md)
md                996 drivers/md/dm-ioctl.c 	r = dm_set_geometry(md, &geometry);
md               1001 drivers/md/dm-ioctl.c 	dm_put(md);
md               1009 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1011 drivers/md/dm-ioctl.c 	md = find_device(param);
md               1012 drivers/md/dm-ioctl.c 	if (!md)
md               1020 drivers/md/dm-ioctl.c 	if (!dm_suspended_md(md)) {
md               1021 drivers/md/dm-ioctl.c 		r = dm_suspend(md, suspend_flags);
md               1026 drivers/md/dm-ioctl.c 	__dev_status(md, param);
md               1029 drivers/md/dm-ioctl.c 	dm_put(md);
md               1039 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1051 drivers/md/dm-ioctl.c 	md = hc->md;
md               1066 drivers/md/dm-ioctl.c 		if (!dm_suspended_md(md))
md               1067 drivers/md/dm-ioctl.c 			dm_suspend(md, suspend_flags);
md               1069 drivers/md/dm-ioctl.c 		old_map = dm_swap_table(md, new_map);
md               1071 drivers/md/dm-ioctl.c 			dm_sync_table(md);
md               1073 drivers/md/dm-ioctl.c 			dm_put(md);
md               1078 drivers/md/dm-ioctl.c 			set_disk_ro(dm_disk(md), 0);
md               1080 drivers/md/dm-ioctl.c 			set_disk_ro(dm_disk(md), 1);
md               1083 drivers/md/dm-ioctl.c 	if (dm_suspended_md(md)) {
md               1084 drivers/md/dm-ioctl.c 		r = dm_resume(md);
md               1085 drivers/md/dm-ioctl.c 		if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
md               1097 drivers/md/dm-ioctl.c 		__dev_status(md, param);
md               1099 drivers/md/dm-ioctl.c 	dm_put(md);
md               1121 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1123 drivers/md/dm-ioctl.c 	md = find_device(param);
md               1124 drivers/md/dm-ioctl.c 	if (!md)
md               1127 drivers/md/dm-ioctl.c 	__dev_status(md, param);
md               1128 drivers/md/dm-ioctl.c 	dm_put(md);
md               1213 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1217 drivers/md/dm-ioctl.c 	md = find_device(param);
md               1218 drivers/md/dm-ioctl.c 	if (!md)
md               1224 drivers/md/dm-ioctl.c 	if (dm_wait_event(md, param->event_nr)) {
md               1234 drivers/md/dm-ioctl.c 	__dev_status(md, param);
md               1236 drivers/md/dm-ioctl.c 	table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
md               1239 drivers/md/dm-ioctl.c 	dm_put_live_table(md, srcu_idx);
md               1242 drivers/md/dm-ioctl.c 	dm_put(md);
md               1334 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1337 drivers/md/dm-ioctl.c 	md = find_device(param);
md               1338 drivers/md/dm-ioctl.c 	if (!md)
md               1341 drivers/md/dm-ioctl.c 	r = dm_table_create(&t, get_mode(param), param->target_count, md);
md               1346 drivers/md/dm-ioctl.c 	dm_lock_md_type(md);
md               1351 drivers/md/dm-ioctl.c 	immutable_target_type = dm_get_immutable_target_type(md);
md               1361 drivers/md/dm-ioctl.c 	if (dm_get_md_type(md) == DM_TYPE_NONE) {
md               1363 drivers/md/dm-ioctl.c 		dm_set_md_type(md, dm_table_get_type(t));
md               1366 drivers/md/dm-ioctl.c 		r = dm_setup_md_queue(md, t);
md               1371 drivers/md/dm-ioctl.c 	} else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
md               1373 drivers/md/dm-ioctl.c 		       dm_get_md_type(md), dm_table_get_type(t));
md               1378 drivers/md/dm-ioctl.c 	dm_unlock_md_type(md);
md               1382 drivers/md/dm-ioctl.c 	hc = dm_get_mdptr(md);
md               1383 drivers/md/dm-ioctl.c 	if (!hc || hc->md != md) {
md               1396 drivers/md/dm-ioctl.c 	__dev_status(md, param);
md               1399 drivers/md/dm-ioctl.c 		dm_sync_table(md);
md               1403 drivers/md/dm-ioctl.c 	dm_put(md);
md               1408 drivers/md/dm-ioctl.c 	dm_unlock_md_type(md);
md               1412 drivers/md/dm-ioctl.c 	dm_put(md);
md               1420 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1439 drivers/md/dm-ioctl.c 	__dev_status(hc->md, param);
md               1440 drivers/md/dm-ioctl.c 	md = hc->md;
md               1443 drivers/md/dm-ioctl.c 		dm_sync_table(md);
md               1446 drivers/md/dm-ioctl.c 	dm_put(md);
md               1493 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1497 drivers/md/dm-ioctl.c 	md = find_device(param);
md               1498 drivers/md/dm-ioctl.c 	if (!md)
md               1501 drivers/md/dm-ioctl.c 	__dev_status(md, param);
md               1503 drivers/md/dm-ioctl.c 	table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
md               1506 drivers/md/dm-ioctl.c 	dm_put_live_table(md, srcu_idx);
md               1508 drivers/md/dm-ioctl.c 	dm_put(md);
md               1519 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1523 drivers/md/dm-ioctl.c 	md = find_device(param);
md               1524 drivers/md/dm-ioctl.c 	if (!md)
md               1527 drivers/md/dm-ioctl.c 	__dev_status(md, param);
md               1529 drivers/md/dm-ioctl.c 	table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
md               1532 drivers/md/dm-ioctl.c 	dm_put_live_table(md, srcu_idx);
md               1534 drivers/md/dm-ioctl.c 	dm_put(md);
md               1545 drivers/md/dm-ioctl.c static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
md               1558 drivers/md/dm-ioctl.c 		return dm_cancel_deferred_remove(md);
md               1561 drivers/md/dm-ioctl.c 	r = dm_stats_message(md, argc, argv, result, maxlen);
md               1576 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               1584 drivers/md/dm-ioctl.c 	md = find_device(param);
md               1585 drivers/md/dm-ioctl.c 	if (!md)
md               1606 drivers/md/dm-ioctl.c 	r = message_for_md(md, argc, argv, result, maxlen);
md               1610 drivers/md/dm-ioctl.c 	table = dm_get_live_table(md, &srcu_idx);
md               1614 drivers/md/dm-ioctl.c 	if (dm_deleting_md(md)) {
md               1631 drivers/md/dm-ioctl.c 	dm_put_live_table(md, srcu_idx);
md               1636 drivers/md/dm-ioctl.c 		__dev_status(md, param);
md               1647 drivers/md/dm-ioctl.c 	dm_put(md);
md               2022 drivers/md/dm-ioctl.c int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
md               2027 drivers/md/dm-ioctl.c 	if (!md)
md               2031 drivers/md/dm-ioctl.c 	hc = dm_get_mdptr(md);
md               2032 drivers/md/dm-ioctl.c 	if (!hc || hc->md != md) {
md               2072 drivers/md/dm-ioctl.c 	struct mapped_device *md;
md               2086 drivers/md/dm-ioctl.c 	r = dm_create(m, &md);
md               2091 drivers/md/dm-ioctl.c 	r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md);
md               2096 drivers/md/dm-ioctl.c 	r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
md               2117 drivers/md/dm-ioctl.c 	md->type = dm_table_get_type(t);
md               2119 drivers/md/dm-ioctl.c 	r = dm_setup_md_queue(md, t);
md               2126 drivers/md/dm-ioctl.c 	dm_suspend(md, 0);
md               2127 drivers/md/dm-ioctl.c 	old_map = dm_swap_table(md, t);
md               2132 drivers/md/dm-ioctl.c 	set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG));
md               2135 drivers/md/dm-ioctl.c 	r = dm_resume(md);
md               2139 drivers/md/dm-ioctl.c 	DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name);
md               2140 drivers/md/dm-ioctl.c 	dm_put(md);
md               2148 drivers/md/dm-ioctl.c 	dm_put(md);
md               2150 drivers/md/dm-ioctl.c 	dm_put(md);
md               2151 drivers/md/dm-ioctl.c 	dm_destroy(md);
md                439 drivers/md/dm-mpath.c 	struct mapped_device *md = dm_table_get_md((m)->ti->table);	\
md                442 drivers/md/dm-mpath.c 		 dm_device_name(md),					\
md                240 drivers/md/dm-raid.c 	struct mddev md;
md                256 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md                265 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md                396 drivers/md/dm-raid.c 	return !rs->md.level;
md                402 drivers/md/dm-raid.c 	return rs->md.level == 1;
md                408 drivers/md/dm-raid.c 	return rs->md.level == 10;
md                414 drivers/md/dm-raid.c 	return rs->md.level == 6;
md                420 drivers/md/dm-raid.c 	return __within_range(rs->md.level, 4, 6);
md                428 drivers/md/dm-raid.c 	       (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
md                434 drivers/md/dm-raid.c 	return rs->md.recovery_cp < rs->md.dev_sectors;
md                440 drivers/md/dm-raid.c 	return rs->md.reshape_position != MaxSector;
md                680 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md                699 drivers/md/dm-raid.c 	set_capacity(gendisk, rs->md.array_sectors);
md                709 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md                722 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md                748 drivers/md/dm-raid.c 	mddev_init(&rs->md);
md                756 drivers/md/dm-raid.c 	rs->md.raid_disks = raid_devs;
md                757 drivers/md/dm-raid.c 	rs->md.level = raid_type->level;
md                758 drivers/md/dm-raid.c 	rs->md.new_level = rs->md.level;
md                759 drivers/md/dm-raid.c 	rs->md.layout = raid_type->algorithm;
md                760 drivers/md/dm-raid.c 	rs->md.new_layout = rs->md.layout;
md                761 drivers/md/dm-raid.c 	rs->md.delta_disks = 0;
md                762 drivers/md/dm-raid.c 	rs->md.recovery_cp = MaxSector;
md                841 drivers/md/dm-raid.c 		rs->dev[i].rdev.mddev = &rs->md;
md                893 drivers/md/dm-raid.c 		list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
md                899 drivers/md/dm-raid.c 		list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
md                902 drivers/md/dm-raid.c 		rs->md.external = 0;
md                903 drivers/md/dm-raid.c 		rs->md.persistent = 1;
md                904 drivers/md/dm-raid.c 		rs->md.major_version = 2;
md                905 drivers/md/dm-raid.c 	} else if (rebuild && !rs->md.recovery_cp) {
md                975 drivers/md/dm-raid.c 		if (region_size < rs->md.chunk_sectors) {
md                984 drivers/md/dm-raid.c 	rs->md.bitmap_info.chunksize = to_bytes(region_size);
md               1004 drivers/md/dm-raid.c 	for (i = 0; i < rs->md.raid_disks; i++)
md               1009 drivers/md/dm-raid.c 	switch (rs->md.level) {
md               1013 drivers/md/dm-raid.c 		if (rebuild_cnt >= rs->md.raid_disks)
md               1023 drivers/md/dm-raid.c 		copies = raid10_md_layout_to_copies(rs->md.new_layout);
md               1046 drivers/md/dm-raid.c 		if (__is_raid10_near(rs->md.new_layout)) {
md               1047 drivers/md/dm-raid.c 			for (i = 0; i < rs->md.raid_disks; i++) {
md               1070 drivers/md/dm-raid.c 		group_size = (rs->md.raid_disks / copies);
md               1071 drivers/md/dm-raid.c 		last_group_start = (rs->md.raid_disks / group_size) - 1;
md               1073 drivers/md/dm-raid.c 		for (i = 0; i < rs->md.raid_disks; i++) {
md               1154 drivers/md/dm-raid.c 	rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
md               1259 drivers/md/dm-raid.c 			jdev->mddev = &rs->md;
md               1327 drivers/md/dm-raid.c 			if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
md               1355 drivers/md/dm-raid.c 			rs->md.bitmap_info.max_write_behind = value / 2;
md               1365 drivers/md/dm-raid.c 			rs->md.bitmap_info.daemon_sleep = value;
md               1418 drivers/md/dm-raid.c 			rs->md.sync_speed_min = value;
md               1429 drivers/md/dm-raid.c 			rs->md.sync_speed_max = value;
md               1444 drivers/md/dm-raid.c 			if (!__within_range(value, 2, rs->md.raid_disks)) {
md               1470 drivers/md/dm-raid.c 	if (write_mostly >= rs->md.raid_disks) {
md               1475 drivers/md/dm-raid.c 	if (rs->md.sync_speed_max &&
md               1476 drivers/md/dm-raid.c 	    rs->md.sync_speed_min > rs->md.sync_speed_max) {
md               1484 drivers/md/dm-raid.c 	if (rs->md.chunk_sectors)
md               1485 drivers/md/dm-raid.c 		max_io_len = rs->md.chunk_sectors;
md               1493 drivers/md/dm-raid.c 		if (raid10_copies > rs->md.raid_disks) {
md               1498 drivers/md/dm-raid.c 		rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
md               1499 drivers/md/dm-raid.c 		if (rs->md.new_layout < 0) {
md               1501 drivers/md/dm-raid.c 			return rs->md.new_layout;
md               1504 drivers/md/dm-raid.c 		rt = get_raid_type_by_ll(10, rs->md.new_layout);
md               1521 drivers/md/dm-raid.c 	rs->md.persistent = 0;
md               1522 drivers/md/dm-raid.c 	rs->md.external = 1;
md               1533 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               1571 drivers/md/dm-raid.c 	return rs->md.raid_disks - rs->raid_type->parity_devs;
md               1588 drivers/md/dm-raid.c 	for (i = 0; i < rs->md.raid_disks; i++) {
md               1605 drivers/md/dm-raid.c 	rdev_for_each(rdev, &rs->md)
md               1608 drivers/md/dm-raid.c 			if (ds < rs->md.dev_sectors) {
md               1622 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               1677 drivers/md/dm-raid.c 		rs->md.recovery_cp = MaxSector;
md               1684 drivers/md/dm-raid.c 		rs->md.recovery_cp = dev_sectors;
md               1690 drivers/md/dm-raid.c 		rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
md               1712 drivers/md/dm-raid.c 	struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
md               1727 drivers/md/dm-raid.c 	return mddev_congested(&rs->md, bits);
md               1738 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               1741 drivers/md/dm-raid.c 	if (rs->md.degraded) {
md               1892 drivers/md/dm-raid.c 	return rs->md.new_level != rs->md.level;
md               1899 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               2029 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               2102 drivers/md/dm-raid.c 	struct raid_set *rs = container_of(mddev, struct raid_set, md);
md               2227 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               2451 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               2524 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               2684 drivers/md/dm-raid.c 	    to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
md               2694 drivers/md/dm-raid.c 	if (rs->md.recovery_cp < rs->md.dev_sectors)
md               2695 drivers/md/dm-raid.c 		rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
md               2698 drivers/md/dm-raid.c 	rdev_for_each(rdev, &rs->md) {
md               2714 drivers/md/dm-raid.c 	rdev_for_each(rdev, &rs->md) {
md               2727 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               2773 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               2834 drivers/md/dm-raid.c 	rdev_for_each(rdev, &rs->md)
md               2857 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               2948 drivers/md/dm-raid.c 		rdev_for_each(rdev, &rs->md)
md               3062 drivers/md/dm-raid.c 	rs->md.sync_super = super_sync;
md               3074 drivers/md/dm-raid.c 	calculated_dev_sectors = rs->md.dev_sectors;
md               3099 drivers/md/dm-raid.c 	INIT_WORK(&rs->md.event_work, do_table_event);
md               3112 drivers/md/dm-raid.c 	if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
md               3220 drivers/md/dm-raid.c 	rs->md.ro = 1;
md               3221 drivers/md/dm-raid.c 	rs->md.in_sync = 1;
md               3224 drivers/md/dm-raid.c 	set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
md               3227 drivers/md/dm-raid.c 	mddev_lock_nointr(&rs->md);
md               3228 drivers/md/dm-raid.c 	r = md_run(&rs->md);
md               3229 drivers/md/dm-raid.c 	rs->md.in_sync = 0; /* Assume already marked dirty */
md               3232 drivers/md/dm-raid.c 		mddev_unlock(&rs->md);
md               3236 drivers/md/dm-raid.c 	r = md_start(&rs->md);
md               3240 drivers/md/dm-raid.c 		mddev_unlock(&rs->md);
md               3249 drivers/md/dm-raid.c 		r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
md               3252 drivers/md/dm-raid.c 			mddev_unlock(&rs->md);
md               3257 drivers/md/dm-raid.c 	mddev_suspend(&rs->md);
md               3276 drivers/md/dm-raid.c 		if (rs->md.pers->start_reshape) {
md               3277 drivers/md/dm-raid.c 			r = rs->md.pers->check_reshape(&rs->md);
md               3288 drivers/md/dm-raid.c 	mddev_unlock(&rs->md);
md               3295 drivers/md/dm-raid.c 	md_stop(&rs->md);
md               3307 drivers/md/dm-raid.c 	md_stop(&rs->md);
md               3314 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               3413 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               3506 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               3529 drivers/md/dm-raid.c 		recovery = rs->md.recovery;
md               3536 drivers/md/dm-raid.c 		sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
md               3673 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               3727 drivers/md/dm-raid.c 	for (i = 0; !r && i < rs->md.raid_disks; i++)
md               3732 drivers/md/dm-raid.c 				 rs->md.dev_sectors,
md               3741 drivers/md/dm-raid.c 	unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
md               3752 drivers/md/dm-raid.c 		limits->max_discard_sectors = rs->md.chunk_sectors;
md               3762 drivers/md/dm-raid.c 		if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
md               3763 drivers/md/dm-raid.c 			md_stop_writes(&rs->md);
md               3765 drivers/md/dm-raid.c 		mddev_lock_nointr(&rs->md);
md               3766 drivers/md/dm-raid.c 		mddev_suspend(&rs->md);
md               3767 drivers/md/dm-raid.c 		mddev_unlock(&rs->md);
md               3778 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               3839 drivers/md/dm-raid.c 		rdev_for_each(r, &rs->md) {
md               3861 drivers/md/dm-raid.c 		r = md_bitmap_load(&rs->md);
md               3872 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               3891 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               3938 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md               3993 drivers/md/dm-raid.c 	struct mddev *mddev = &rs->md;
md                 19 drivers/md/dm-rq.c 	struct mapped_device *md;
md                 60 drivers/md/dm-rq.c int dm_request_based(struct mapped_device *md)
md                 62 drivers/md/dm-rq.c 	return queue_is_mq(md->queue);
md                131 drivers/md/dm-rq.c static void rq_end_stats(struct mapped_device *md, struct request *orig)
md                133 drivers/md/dm-rq.c 	if (unlikely(dm_stats_used(&md->stats))) {
md                136 drivers/md/dm-rq.c 		dm_stats_account_io(&md->stats, rq_data_dir(orig),
md                147 drivers/md/dm-rq.c static void rq_completed(struct mapped_device *md)
md                150 drivers/md/dm-rq.c 	if (unlikely(wq_has_sleeper(&md->wait)))
md                151 drivers/md/dm-rq.c 		wake_up(&md->wait);
md                156 drivers/md/dm-rq.c 	dm_put(md);
md                167 drivers/md/dm-rq.c 	struct mapped_device *md = tio->md;
md                173 drivers/md/dm-rq.c 	rq_end_stats(md, rq);
md                175 drivers/md/dm-rq.c 	rq_completed(md);
md                183 drivers/md/dm-rq.c void dm_mq_kick_requeue_list(struct mapped_device *md)
md                185 drivers/md/dm-rq.c 	__dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
md                197 drivers/md/dm-rq.c 	struct mapped_device *md = tio->md;
md                201 drivers/md/dm-rq.c 	rq_end_stats(md, rq);
md                208 drivers/md/dm-rq.c 	rq_completed(md);
md                227 drivers/md/dm-rq.c 			disable_discard(tio->md);
md                230 drivers/md/dm-rq.c 			disable_write_same(tio->md);
md                233 drivers/md/dm-rq.c 			disable_write_zeroes(tio->md);
md                268 drivers/md/dm-rq.c 		struct mapped_device *md = tio->md;
md                270 drivers/md/dm-rq.c 		rq_end_stats(md, rq);
md                272 drivers/md/dm-rq.c 		rq_completed(md);
md                347 drivers/md/dm-rq.c 	r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
md                361 drivers/md/dm-rq.c 		     struct mapped_device *md)
md                363 drivers/md/dm-rq.c 	tio->md = md;
md                374 drivers/md/dm-rq.c 	if (!md->init_tio_pdu)
md                388 drivers/md/dm-rq.c 	struct mapped_device *md = tio->md;
md                406 drivers/md/dm-rq.c 		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
md                437 drivers/md/dm-rq.c ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
md                442 drivers/md/dm-rq.c ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
md                448 drivers/md/dm-rq.c static void dm_start_request(struct mapped_device *md, struct request *orig)
md                452 drivers/md/dm-rq.c 	if (unlikely(dm_stats_used(&md->stats))) {
md                456 drivers/md/dm-rq.c 		dm_stats_account_io(&md->stats, rq_data_dir(orig),
md                468 drivers/md/dm-rq.c 	dm_get(md);
md                474 drivers/md/dm-rq.c 	struct mapped_device *md = set->driver_data;
md                481 drivers/md/dm-rq.c 	tio->md = md;
md                483 drivers/md/dm-rq.c 	if (md->init_tio_pdu) {
md                496 drivers/md/dm-rq.c 	struct mapped_device *md = tio->md;
md                497 drivers/md/dm-rq.c 	struct dm_target *ti = md->immutable_target;
md                501 drivers/md/dm-rq.c 		struct dm_table *map = dm_get_live_table(md, &srcu_idx);
md                504 drivers/md/dm-rq.c 		dm_put_live_table(md, srcu_idx);
md                510 drivers/md/dm-rq.c 	dm_start_request(md, rq);
md                513 drivers/md/dm-rq.c 	init_tio(tio, rq, md);
md                523 drivers/md/dm-rq.c 		rq_end_stats(md, rq);
md                524 drivers/md/dm-rq.c 		rq_completed(md);
md                537 drivers/md/dm-rq.c int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
md                543 drivers/md/dm-rq.c 	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
md                544 drivers/md/dm-rq.c 	if (!md->tag_set)
md                547 drivers/md/dm-rq.c 	md->tag_set->ops = &dm_mq_ops;
md                548 drivers/md/dm-rq.c 	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
md                549 drivers/md/dm-rq.c 	md->tag_set->numa_node = md->numa_node_id;
md                550 drivers/md/dm-rq.c 	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
md                551 drivers/md/dm-rq.c 	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
md                552 drivers/md/dm-rq.c 	md->tag_set->driver_data = md;
md                554 drivers/md/dm-rq.c 	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
md                558 drivers/md/dm-rq.c 		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
md                559 drivers/md/dm-rq.c 		md->init_tio_pdu = true;
md                562 drivers/md/dm-rq.c 	err = blk_mq_alloc_tag_set(md->tag_set);
md                566 drivers/md/dm-rq.c 	q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
md                575 drivers/md/dm-rq.c 	blk_mq_free_tag_set(md->tag_set);
md                577 drivers/md/dm-rq.c 	kfree(md->tag_set);
md                582 drivers/md/dm-rq.c void dm_mq_cleanup_mapped_device(struct mapped_device *md)
md                584 drivers/md/dm-rq.c 	if (md->tag_set) {
md                585 drivers/md/dm-rq.c 		blk_mq_free_tag_set(md->tag_set);
md                586 drivers/md/dm-rq.c 		kfree(md->tag_set);
md                 33 drivers/md/dm-rq.h int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
md                 34 drivers/md/dm-rq.h void dm_mq_cleanup_mapped_device(struct mapped_device *md);
md                 39 drivers/md/dm-rq.h void dm_mq_kick_requeue_list(struct mapped_device *md);
md                 43 drivers/md/dm-rq.h ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
md                 44 drivers/md/dm-rq.h ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
md                241 drivers/md/dm-stats.c 			   struct mapped_device *md)
md                359 drivers/md/dm-stats.c 	suspend_callback(md);
md                381 drivers/md/dm-stats.c 	resume_callback(md);
md                387 drivers/md/dm-stats.c 	resume_callback(md);
md                944 drivers/md/dm-stats.c static int message_stats_create(struct mapped_device *md,
md                978 drivers/md/dm-stats.c 		len = dm_get_size(md);
md               1049 drivers/md/dm-stats.c 	id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
md               1051 drivers/md/dm-stats.c 			     dm_internal_suspend_fast, dm_internal_resume_fast, md);
md               1069 drivers/md/dm-stats.c static int message_stats_delete(struct mapped_device *md,
md               1081 drivers/md/dm-stats.c 	return dm_stats_delete(dm_get_stats(md), id);
md               1084 drivers/md/dm-stats.c static int message_stats_clear(struct mapped_device *md,
md               1096 drivers/md/dm-stats.c 	return dm_stats_clear(dm_get_stats(md), id);
md               1099 drivers/md/dm-stats.c static int message_stats_list(struct mapped_device *md,
md               1115 drivers/md/dm-stats.c 	r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
md               1122 drivers/md/dm-stats.c static int message_stats_print(struct mapped_device *md,
md               1145 drivers/md/dm-stats.c 	return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
md               1149 drivers/md/dm-stats.c static int message_stats_set_aux(struct mapped_device *md,
md               1161 drivers/md/dm-stats.c 	return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
md               1164 drivers/md/dm-stats.c int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
md               1171 drivers/md/dm-stats.c 		r = message_stats_create(md, argc, argv, result, maxlen);
md               1173 drivers/md/dm-stats.c 		r = message_stats_delete(md, argc, argv);
md               1175 drivers/md/dm-stats.c 		r = message_stats_clear(md, argc, argv);
md               1177 drivers/md/dm-stats.c 		r = message_stats_list(md, argc, argv, result, maxlen);
md               1179 drivers/md/dm-stats.c 		r = message_stats_print(md, argc, argv, false, result, maxlen);
md               1181 drivers/md/dm-stats.c 		r = message_stats_print(md, argc, argv, true, result, maxlen);
md               1183 drivers/md/dm-stats.c 		r = message_stats_set_aux(md, argc, argv);
md                 30 drivers/md/dm-stats.h int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
md                 26 drivers/md/dm-sysfs.c 	struct mapped_device *md;
md                 33 drivers/md/dm-sysfs.c 	md = dm_get_from_kobject(kobj);
md                 34 drivers/md/dm-sysfs.c 	if (!md)
md                 37 drivers/md/dm-sysfs.c 	ret = dm_attr->show(md, page);
md                 38 drivers/md/dm-sysfs.c 	dm_put(md);
md                 51 drivers/md/dm-sysfs.c 	struct mapped_device *md;
md                 58 drivers/md/dm-sysfs.c 	md = dm_get_from_kobject(kobj);
md                 59 drivers/md/dm-sysfs.c 	if (!md)
md                 62 drivers/md/dm-sysfs.c 	ret = dm_attr->store(md, page, count);
md                 63 drivers/md/dm-sysfs.c 	dm_put(md);
md                 68 drivers/md/dm-sysfs.c static ssize_t dm_attr_name_show(struct mapped_device *md, char *buf)
md                 70 drivers/md/dm-sysfs.c 	if (dm_copy_name_and_uuid(md, buf, NULL))
md                 77 drivers/md/dm-sysfs.c static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf)
md                 79 drivers/md/dm-sysfs.c 	if (dm_copy_name_and_uuid(md, NULL, buf))
md                 86 drivers/md/dm-sysfs.c static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
md                 88 drivers/md/dm-sysfs.c 	sprintf(buf, "%d\n", dm_suspended_md(md));
md                 93 drivers/md/dm-sysfs.c static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
md                131 drivers/md/dm-sysfs.c int dm_sysfs_init(struct mapped_device *md)
md                133 drivers/md/dm-sysfs.c 	return kobject_init_and_add(dm_kobject(md), &dm_ktype,
md                134 drivers/md/dm-sysfs.c 				    &disk_to_dev(dm_disk(md))->kobj,
md                141 drivers/md/dm-sysfs.c void dm_sysfs_exit(struct mapped_device *md)
md                143 drivers/md/dm-sysfs.c 	struct kobject *kobj = dm_kobject(md);
md                 33 drivers/md/dm-table.c 	struct mapped_device *md;
md                185 drivers/md/dm-table.c 		    unsigned num_targets, struct mapped_device *md)
md                212 drivers/md/dm-table.c 	t->md = md;
md                217 drivers/md/dm-table.c static void free_devices(struct list_head *devices, struct mapped_device *md)
md                225 drivers/md/dm-table.c 		       dm_device_name(md), dd->dm_dev->name);
md                226 drivers/md/dm-table.c 		dm_put_table_device(md, dd->dm_dev);
md                255 drivers/md/dm-table.c 	free_devices(&t->devices, t->md);
md                300 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), bdevname(bdev, b),
md                313 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), bdevname(bdev, b),
md                329 drivers/md/dm-table.c 			       dm_device_name(ti->table->md),
md                346 drivers/md/dm-table.c 			       dm_device_name(ti->table->md),
md                359 drivers/md/dm-table.c 		       dm_device_name(ti->table->md),
md                368 drivers/md/dm-table.c 		       dm_device_name(ti->table->md),
md                384 drivers/md/dm-table.c 			struct mapped_device *md)
md                391 drivers/md/dm-table.c 	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
md                397 drivers/md/dm-table.c 	dm_put_table_device(md, old_dev);
md                446 drivers/md/dm-table.c 		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
md                456 drivers/md/dm-table.c 		r = upgrade_mode(dd, mode, t->md);
md                477 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), bdevname(bdev, b));
md                485 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), bdevname(bdev, b),
md                513 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), d->name);
md                517 drivers/md/dm-table.c 		dm_put_table_device(ti->table->md, d);
md                698 drivers/md/dm-table.c 		       dm_device_name(table->md), i,
md                717 drivers/md/dm-table.c 		      dm_device_name(t->md), t->targets->type->name);
md                727 drivers/md/dm-table.c 		DMERR("%s: zero-length target", dm_device_name(t->md));
md                733 drivers/md/dm-table.c 		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
md                794 drivers/md/dm-table.c 		       dm_device_name(t->md), type);
md                799 drivers/md/dm-table.c 	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
md                947 drivers/md/dm-table.c 	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
md               1028 drivers/md/dm-table.c 		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
md               1033 drivers/md/dm-table.c 		dm_put_live_table(t->md, srcu_idx);
md               1104 drivers/md/dm-table.c static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
md               1124 drivers/md/dm-table.c 	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
md               1228 drivers/md/dm-table.c 		       dm_device_name(t->md),
md               1246 drivers/md/dm-table.c 	struct mapped_device *md = t->md;
md               1257 drivers/md/dm-table.c 	if (!integrity_profile_exists(dm_disk(md))) {
md               1263 drivers/md/dm-table.c 		blk_integrity_register(dm_disk(md),
md               1272 drivers/md/dm-table.c 	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
md               1275 drivers/md/dm-table.c 		       dm_device_name(t->md),
md               1311 drivers/md/dm-table.c 	r = dm_table_alloc_md_mempools(t, t->md);
md               1484 drivers/md/dm-table.c 		      dm_device_name(table->md));
md               1494 drivers/md/dm-table.c 		      dm_device_name(table->md));
md               1559 drivers/md/dm-table.c 			       dm_device_name(table->md),
md               1620 drivers/md/dm-table.c 		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
md               1624 drivers/md/dm-table.c 	if (integrity_profile_exists(dm_disk(t->md))) {
md               1626 drivers/md/dm-table.c 		       dm_device_name(t->md));
md               1627 drivers/md/dm-table.c 		blk_integrity_unregister(dm_disk(t->md));
md               1926 drivers/md/dm-table.c 			set_dax_synchronous(t->md->dax_dev);
md               1932 drivers/md/dm-table.c 		dax_write_cache(t->md->dax_dev, true);
md               1973 drivers/md/dm-table.c 		blk_revalidate_disk_zones(t->md->disk);
md               2006 drivers/md/dm-table.c 	lockdep_assert_held(&t->md->suspend_lock);
md               2055 drivers/md/dm-table.c 	lockdep_assert_held(&t->md->suspend_lock);
md               2066 drivers/md/dm-table.c 			      dm_device_name(t->md), ti->type->name, r);
md               2102 drivers/md/dm-table.c 				     dm_device_name(t->md),
md               2115 drivers/md/dm-table.c 	return t->md;
md               2121 drivers/md/dm-table.c 	return dm_device_name(t->md);
md               2127 drivers/md/dm-table.c 	struct mapped_device *md;
md               2133 drivers/md/dm-table.c 	md = dm_table_get_md(t);
md               2134 drivers/md/dm-table.c 	queue = dm_get_md_queue(md);
md                551 drivers/md/dm-thin.c static struct pool *__pool_table_lookup(struct mapped_device *md)
md                558 drivers/md/dm-thin.c 		if (tmp->pool_md == md) {
md                 31 drivers/md/dm-uevent.c 	struct mapped_device *md;
md                 44 drivers/md/dm-uevent.c static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
md                 53 drivers/md/dm-uevent.c 	event->md = md;
md                 58 drivers/md/dm-uevent.c static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
md                 67 drivers/md/dm-uevent.c 	event = dm_uevent_alloc(md);
md                 88 drivers/md/dm-uevent.c 			   dm_next_uevent_seq(md))) {
md                133 drivers/md/dm-uevent.c 		if (dm_copy_name_and_uuid(event->md, event->name,
md                173 drivers/md/dm-uevent.c 	struct mapped_device *md = dm_table_get_md(ti->table);
md                181 drivers/md/dm-uevent.c 	event = dm_build_path_uevent(md, ti,
md                188 drivers/md/dm-uevent.c 	dm_uevent_add(md, &event->elist);
md                218 drivers/md/dm-verity-target.c 	struct mapped_device *md = dm_table_get_md(v->ti->table);
md                248 drivers/md/dm-verity-target.c 	kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
md                 94 drivers/md/dm.c 	struct mapped_device *md;
md                324 drivers/md/dm.c int dm_deleting_md(struct mapped_device *md)
md                326 drivers/md/dm.c 	return test_bit(DMF_DELETING, &md->flags);
md                331 drivers/md/dm.c 	struct mapped_device *md;
md                335 drivers/md/dm.c 	md = bdev->bd_disk->private_data;
md                336 drivers/md/dm.c 	if (!md)
md                339 drivers/md/dm.c 	if (test_bit(DMF_FREEING, &md->flags) ||
md                340 drivers/md/dm.c 	    dm_deleting_md(md)) {
md                341 drivers/md/dm.c 		md = NULL;
md                345 drivers/md/dm.c 	dm_get(md);
md                346 drivers/md/dm.c 	atomic_inc(&md->open_count);
md                350 drivers/md/dm.c 	return md ? 0 : -ENXIO;
md                355 drivers/md/dm.c 	struct mapped_device *md;
md                359 drivers/md/dm.c 	md = disk->private_data;
md                360 drivers/md/dm.c 	if (WARN_ON(!md))
md                363 drivers/md/dm.c 	if (atomic_dec_and_test(&md->open_count) &&
md                364 drivers/md/dm.c 	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
md                367 drivers/md/dm.c 	dm_put(md);
md                372 drivers/md/dm.c int dm_open_count(struct mapped_device *md)
md                374 drivers/md/dm.c 	return atomic_read(&md->open_count);
md                380 drivers/md/dm.c int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
md                386 drivers/md/dm.c 	if (dm_open_count(md)) {
md                389 drivers/md/dm.c 			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
md                390 drivers/md/dm.c 	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
md                393 drivers/md/dm.c 		set_bit(DMF_DELETING, &md->flags);
md                400 drivers/md/dm.c int dm_cancel_deferred_remove(struct mapped_device *md)
md                406 drivers/md/dm.c 	if (test_bit(DMF_DELETING, &md->flags))
md                409 drivers/md/dm.c 		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
md                421 drivers/md/dm.c sector_t dm_get_size(struct mapped_device *md)
md                423 drivers/md/dm.c 	return get_capacity(md->disk);
md                426 drivers/md/dm.c struct request_queue *dm_get_md_queue(struct mapped_device *md)
md                428 drivers/md/dm.c 	return md->queue;
md                431 drivers/md/dm.c struct dm_stats *dm_get_stats(struct mapped_device *md)
md                433 drivers/md/dm.c 	return &md->stats;
md                438 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
md                440 drivers/md/dm.c 	return dm_get_geometry(md, geo);
md                447 drivers/md/dm.c 	struct mapped_device *md = disk->private_data;
md                452 drivers/md/dm.c 	if (dm_suspended_md(md))
md                455 drivers/md/dm.c 	map = dm_get_live_table(md, &srcu_idx);
md                485 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md                492 drivers/md/dm.c static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
md                494 drivers/md/dm.c 	__acquires(md->io_barrier)
md                502 drivers/md/dm.c 	map = dm_get_live_table(md, srcu_idx);
md                514 drivers/md/dm.c 	if (dm_suspended_md(md))
md                519 drivers/md/dm.c 		dm_put_live_table(md, *srcu_idx);
md                527 drivers/md/dm.c static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
md                528 drivers/md/dm.c 	__releases(md->io_barrier)
md                530 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md                536 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
md                539 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
md                559 drivers/md/dm.c 	dm_unprepare_ioctl(md, srcu_idx);
md                565 drivers/md/dm.c static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
md                571 drivers/md/dm.c 	clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
md                584 drivers/md/dm.c 	io->md = md;
md                592 drivers/md/dm.c static void free_io(struct mapped_device *md, struct dm_io *io)
md                606 drivers/md/dm.c 		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
md                629 drivers/md/dm.c static bool md_in_flight_bios(struct mapped_device *md)
md                632 drivers/md/dm.c 	struct hd_struct *part = &dm_disk(md)->part0;
md                643 drivers/md/dm.c static bool md_in_flight(struct mapped_device *md)
md                645 drivers/md/dm.c 	if (queue_is_mq(md->queue))
md                646 drivers/md/dm.c 		return blk_mq_queue_inflight(md->queue);
md                648 drivers/md/dm.c 		return md_in_flight_bios(md);
md                653 drivers/md/dm.c 	struct mapped_device *md = io->md;
md                658 drivers/md/dm.c 	generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
md                659 drivers/md/dm.c 			      &dm_disk(md)->part0);
md                661 drivers/md/dm.c 	if (unlikely(dm_stats_used(&md->stats)))
md                662 drivers/md/dm.c 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
md                669 drivers/md/dm.c 	struct mapped_device *md = io->md;
md                673 drivers/md/dm.c 	generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
md                676 drivers/md/dm.c 	if (unlikely(dm_stats_used(&md->stats)))
md                677 drivers/md/dm.c 		dm_stats_account_io(&md->stats, bio_data_dir(bio),
md                682 drivers/md/dm.c 	if (unlikely(wq_has_sleeper(&md->wait)))
md                683 drivers/md/dm.c 		wake_up(&md->wait);
md                689 drivers/md/dm.c static void queue_io(struct mapped_device *md, struct bio *bio)
md                693 drivers/md/dm.c 	spin_lock_irqsave(&md->deferred_lock, flags);
md                694 drivers/md/dm.c 	bio_list_add(&md->deferred, bio);
md                695 drivers/md/dm.c 	spin_unlock_irqrestore(&md->deferred_lock, flags);
md                696 drivers/md/dm.c 	queue_work(md->wq, &md->work);
md                704 drivers/md/dm.c struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
md                706 drivers/md/dm.c 	*srcu_idx = srcu_read_lock(&md->io_barrier);
md                708 drivers/md/dm.c 	return srcu_dereference(md->map, &md->io_barrier);
md                711 drivers/md/dm.c void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
md                713 drivers/md/dm.c 	srcu_read_unlock(&md->io_barrier, srcu_idx);
md                716 drivers/md/dm.c void dm_sync_table(struct mapped_device *md)
md                718 drivers/md/dm.c 	synchronize_srcu(&md->io_barrier);
md                726 drivers/md/dm.c static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
md                729 drivers/md/dm.c 	return rcu_dereference(md->map);
md                732 drivers/md/dm.c static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
md                743 drivers/md/dm.c 			     struct mapped_device *md)
md                755 drivers/md/dm.c 	r = bd_link_disk_holder(bdev, dm_disk(md));
md                769 drivers/md/dm.c static void close_table_device(struct table_device *td, struct mapped_device *md)
md                774 drivers/md/dm.c 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
md                793 drivers/md/dm.c int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
md                799 drivers/md/dm.c 	mutex_lock(&md->table_devices_lock);
md                800 drivers/md/dm.c 	td = find_table_device(&md->table_devices, dev, mode);
md                802 drivers/md/dm.c 		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
md                804 drivers/md/dm.c 			mutex_unlock(&md->table_devices_lock);
md                811 drivers/md/dm.c 		if ((r = open_table_device(td, dev, md))) {
md                812 drivers/md/dm.c 			mutex_unlock(&md->table_devices_lock);
md                820 drivers/md/dm.c 		list_add(&td->list, &md->table_devices);
md                824 drivers/md/dm.c 	mutex_unlock(&md->table_devices_lock);
md                831 drivers/md/dm.c void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
md                835 drivers/md/dm.c 	mutex_lock(&md->table_devices_lock);
md                837 drivers/md/dm.c 		close_table_device(td, md);
md                841 drivers/md/dm.c 	mutex_unlock(&md->table_devices_lock);
md                861 drivers/md/dm.c int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
md                863 drivers/md/dm.c 	*geo = md->geometry;
md                871 drivers/md/dm.c int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
md                880 drivers/md/dm.c 	md->geometry = *geo;
md                885 drivers/md/dm.c static int __noflush_suspending(struct mapped_device *md)
md                887 drivers/md/dm.c 	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
md                899 drivers/md/dm.c 	struct mapped_device *md = io->md;
md                904 drivers/md/dm.c 		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
md                914 drivers/md/dm.c 			spin_lock_irqsave(&md->deferred_lock, flags);
md                915 drivers/md/dm.c 			if (__noflush_suspending(md))
md                917 drivers/md/dm.c 				bio_list_add_head(&md->deferred, io->orig_bio);
md                921 drivers/md/dm.c 			spin_unlock_irqrestore(&md->deferred_lock, flags);
md                927 drivers/md/dm.c 		free_io(md, io);
md                938 drivers/md/dm.c 			queue_io(md, bio);
md                948 drivers/md/dm.c void disable_discard(struct mapped_device *md)
md                950 drivers/md/dm.c 	struct queue_limits *limits = dm_get_queue_limits(md);
md                954 drivers/md/dm.c 	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
md                957 drivers/md/dm.c void disable_write_same(struct mapped_device *md)
md                959 drivers/md/dm.c 	struct queue_limits *limits = dm_get_queue_limits(md);
md                965 drivers/md/dm.c void disable_write_zeroes(struct mapped_device *md)
md                967 drivers/md/dm.c 	struct queue_limits *limits = dm_get_queue_limits(md);
md                978 drivers/md/dm.c 	struct mapped_device *md = tio->io->md;
md                981 drivers/md/dm.c 	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
md                984 drivers/md/dm.c 			disable_discard(md);
md                987 drivers/md/dm.c 			disable_write_same(md);
md                990 drivers/md/dm.c 			disable_write_zeroes(md);
md               1063 drivers/md/dm.c static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
md               1065 drivers/md/dm.c 	__acquires(md->io_barrier)
md               1070 drivers/md/dm.c 	map = dm_get_live_table(md, srcu_idx);
md               1084 drivers/md/dm.c 	struct mapped_device *md = dax_get_private(dax_dev);
md               1090 drivers/md/dm.c 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
md               1103 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md               1111 drivers/md/dm.c 	struct mapped_device *md = dax_get_private(dax_dev);
md               1116 drivers/md/dm.c 	map = dm_get_live_table(md, &srcu_idx);
md               1122 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md               1130 drivers/md/dm.c 	struct mapped_device *md = dax_get_private(dax_dev);
md               1136 drivers/md/dm.c 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
md               1146 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md               1154 drivers/md/dm.c 	struct mapped_device *md = dax_get_private(dax_dev);
md               1160 drivers/md/dm.c 	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
md               1170 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md               1269 drivers/md/dm.c 	struct mapped_device *md = io->md;
md               1291 drivers/md/dm.c 		if (md->type == DM_TYPE_NVME_BIO_BASED)
md               1334 drivers/md/dm.c 				dm_device_name(tio->io->md),
md               1373 drivers/md/dm.c 			mutex_lock(&ci->io->md->table_devices_lock);
md               1382 drivers/md/dm.c 			mutex_unlock(&ci->io->md->table_devices_lock);
md               1434 drivers/md/dm.c 	bio_set_dev(ci->bio, ci->io->md->bdev);
md               1593 drivers/md/dm.c static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
md               1597 drivers/md/dm.c 	ci->io = alloc_io(md, bio);
md               1607 drivers/md/dm.c static blk_qc_t __split_and_process_bio(struct mapped_device *md,
md               1614 drivers/md/dm.c 	init_clone_info(&ci, md, map, bio);
md               1649 drivers/md/dm.c 							  GFP_NOIO, &md->queue->bio_split);
md               1660 drivers/md/dm.c 				__dm_part_stat_sub(&dm_disk(md)->part0,
md               1665 drivers/md/dm.c 				trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
md               1681 drivers/md/dm.c static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
md               1688 drivers/md/dm.c 	init_clone_info(&ci, md, map, bio);
md               1721 drivers/md/dm.c static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
md               1729 drivers/md/dm.c 		struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
md               1732 drivers/md/dm.c 		trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
md               1738 drivers/md/dm.c static blk_qc_t dm_process_bio(struct mapped_device *md,
md               1742 drivers/md/dm.c 	struct dm_target *ti = md->immutable_target;
md               1764 drivers/md/dm.c 			blk_queue_split(md->queue, &bio);
md               1766 drivers/md/dm.c 			dm_queue_split(md, ti, &bio);
md               1769 drivers/md/dm.c 	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
md               1770 drivers/md/dm.c 		return __process_bio(md, map, bio, ti);
md               1772 drivers/md/dm.c 		return __split_and_process_bio(md, map, bio);
md               1777 drivers/md/dm.c 	struct mapped_device *md = q->queuedata;
md               1782 drivers/md/dm.c 	map = dm_get_live_table(md, &srcu_idx);
md               1785 drivers/md/dm.c 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
md               1786 drivers/md/dm.c 		dm_put_live_table(md, srcu_idx);
md               1789 drivers/md/dm.c 			queue_io(md, bio);
md               1795 drivers/md/dm.c 	ret = dm_process_bio(md, map, bio);
md               1797 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md               1804 drivers/md/dm.c 	struct mapped_device *md = congested_data;
md               1807 drivers/md/dm.c 	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
md               1808 drivers/md/dm.c 		if (dm_request_based(md)) {
md               1813 drivers/md/dm.c 			struct backing_dev_info *bdi = md->queue->backing_dev_info;
md               1816 drivers/md/dm.c 			map = dm_get_live_table_fast(md);
md               1819 drivers/md/dm.c 			dm_put_live_table_fast(md);
md               1880 drivers/md/dm.c static void cleanup_mapped_device(struct mapped_device *md)
md               1882 drivers/md/dm.c 	if (md->wq)
md               1883 drivers/md/dm.c 		destroy_workqueue(md->wq);
md               1884 drivers/md/dm.c 	bioset_exit(&md->bs);
md               1885 drivers/md/dm.c 	bioset_exit(&md->io_bs);
md               1887 drivers/md/dm.c 	if (md->dax_dev) {
md               1888 drivers/md/dm.c 		kill_dax(md->dax_dev);
md               1889 drivers/md/dm.c 		put_dax(md->dax_dev);
md               1890 drivers/md/dm.c 		md->dax_dev = NULL;
md               1893 drivers/md/dm.c 	if (md->disk) {
md               1895 drivers/md/dm.c 		md->disk->private_data = NULL;
md               1897 drivers/md/dm.c 		del_gendisk(md->disk);
md               1898 drivers/md/dm.c 		put_disk(md->disk);
md               1901 drivers/md/dm.c 	if (md->queue)
md               1902 drivers/md/dm.c 		blk_cleanup_queue(md->queue);
md               1904 drivers/md/dm.c 	cleanup_srcu_struct(&md->io_barrier);
md               1906 drivers/md/dm.c 	if (md->bdev) {
md               1907 drivers/md/dm.c 		bdput(md->bdev);
md               1908 drivers/md/dm.c 		md->bdev = NULL;
md               1911 drivers/md/dm.c 	mutex_destroy(&md->suspend_lock);
md               1912 drivers/md/dm.c 	mutex_destroy(&md->type_lock);
md               1913 drivers/md/dm.c 	mutex_destroy(&md->table_devices_lock);
md               1915 drivers/md/dm.c 	dm_mq_cleanup_mapped_device(md);
md               1924 drivers/md/dm.c 	struct mapped_device *md;
md               1927 drivers/md/dm.c 	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
md               1928 drivers/md/dm.c 	if (!md) {
md               1944 drivers/md/dm.c 	r = init_srcu_struct(&md->io_barrier);
md               1948 drivers/md/dm.c 	md->numa_node_id = numa_node_id;
md               1949 drivers/md/dm.c 	md->init_tio_pdu = false;
md               1950 drivers/md/dm.c 	md->type = DM_TYPE_NONE;
md               1951 drivers/md/dm.c 	mutex_init(&md->suspend_lock);
md               1952 drivers/md/dm.c 	mutex_init(&md->type_lock);
md               1953 drivers/md/dm.c 	mutex_init(&md->table_devices_lock);
md               1954 drivers/md/dm.c 	spin_lock_init(&md->deferred_lock);
md               1955 drivers/md/dm.c 	atomic_set(&md->holders, 1);
md               1956 drivers/md/dm.c 	atomic_set(&md->open_count, 0);
md               1957 drivers/md/dm.c 	atomic_set(&md->event_nr, 0);
md               1958 drivers/md/dm.c 	atomic_set(&md->uevent_seq, 0);
md               1959 drivers/md/dm.c 	INIT_LIST_HEAD(&md->uevent_list);
md               1960 drivers/md/dm.c 	INIT_LIST_HEAD(&md->table_devices);
md               1961 drivers/md/dm.c 	spin_lock_init(&md->uevent_lock);
md               1963 drivers/md/dm.c 	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
md               1964 drivers/md/dm.c 	if (!md->queue)
md               1966 drivers/md/dm.c 	md->queue->queuedata = md;
md               1972 drivers/md/dm.c 	blk_queue_make_request(md->queue, dm_make_request);
md               1974 drivers/md/dm.c 	md->disk = alloc_disk_node(1, md->numa_node_id);
md               1975 drivers/md/dm.c 	if (!md->disk)
md               1978 drivers/md/dm.c 	init_waitqueue_head(&md->wait);
md               1979 drivers/md/dm.c 	INIT_WORK(&md->work, dm_wq_work);
md               1980 drivers/md/dm.c 	init_waitqueue_head(&md->eventq);
md               1981 drivers/md/dm.c 	init_completion(&md->kobj_holder.completion);
md               1983 drivers/md/dm.c 	md->disk->major = _major;
md               1984 drivers/md/dm.c 	md->disk->first_minor = minor;
md               1985 drivers/md/dm.c 	md->disk->fops = &dm_blk_dops;
md               1986 drivers/md/dm.c 	md->disk->queue = md->queue;
md               1987 drivers/md/dm.c 	md->disk->private_data = md;
md               1988 drivers/md/dm.c 	sprintf(md->disk->disk_name, "dm-%d", minor);
md               1991 drivers/md/dm.c 		md->dax_dev = alloc_dax(md, md->disk->disk_name,
md               1993 drivers/md/dm.c 		if (!md->dax_dev)
md               1997 drivers/md/dm.c 	add_disk_no_queue_reg(md->disk);
md               1998 drivers/md/dm.c 	format_dev_t(md->name, MKDEV(_major, minor));
md               2000 drivers/md/dm.c 	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
md               2001 drivers/md/dm.c 	if (!md->wq)
md               2004 drivers/md/dm.c 	md->bdev = bdget_disk(md->disk, 0);
md               2005 drivers/md/dm.c 	if (!md->bdev)
md               2008 drivers/md/dm.c 	dm_stats_init(&md->stats);
md               2012 drivers/md/dm.c 	old_md = idr_replace(&_minor_idr, md, minor);
md               2017 drivers/md/dm.c 	return md;
md               2020 drivers/md/dm.c 	cleanup_mapped_device(md);
md               2026 drivers/md/dm.c 	kvfree(md);
md               2030 drivers/md/dm.c static void unlock_fs(struct mapped_device *md);
md               2032 drivers/md/dm.c static void free_dev(struct mapped_device *md)
md               2034 drivers/md/dm.c 	int minor = MINOR(disk_devt(md->disk));
md               2036 drivers/md/dm.c 	unlock_fs(md);
md               2038 drivers/md/dm.c 	cleanup_mapped_device(md);
md               2040 drivers/md/dm.c 	free_table_devices(&md->table_devices);
md               2041 drivers/md/dm.c 	dm_stats_cleanup(&md->stats);
md               2045 drivers/md/dm.c 	kvfree(md);
md               2048 drivers/md/dm.c static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
md               2059 drivers/md/dm.c 		bioset_exit(&md->bs);
md               2060 drivers/md/dm.c 		bioset_exit(&md->io_bs);
md               2062 drivers/md/dm.c 	} else if (bioset_initialized(&md->bs)) {
md               2075 drivers/md/dm.c 	       bioset_initialized(&md->bs) ||
md               2076 drivers/md/dm.c 	       bioset_initialized(&md->io_bs));
md               2078 drivers/md/dm.c 	ret = bioset_init_from_src(&md->bs, &p->bs);
md               2081 drivers/md/dm.c 	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
md               2083 drivers/md/dm.c 		bioset_exit(&md->bs);
md               2097 drivers/md/dm.c 	struct mapped_device *md = (struct mapped_device *) context;
md               2099 drivers/md/dm.c 	spin_lock_irqsave(&md->uevent_lock, flags);
md               2100 drivers/md/dm.c 	list_splice_init(&md->uevent_list, &uevents);
md               2101 drivers/md/dm.c 	spin_unlock_irqrestore(&md->uevent_lock, flags);
md               2103 drivers/md/dm.c 	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
md               2105 drivers/md/dm.c 	atomic_inc(&md->event_nr);
md               2106 drivers/md/dm.c 	wake_up(&md->eventq);
md               2113 drivers/md/dm.c static void __set_size(struct mapped_device *md, sector_t size)
md               2115 drivers/md/dm.c 	lockdep_assert_held(&md->suspend_lock);
md               2117 drivers/md/dm.c 	set_capacity(md->disk, size);
md               2119 drivers/md/dm.c 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
md               2125 drivers/md/dm.c static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
md               2129 drivers/md/dm.c 	struct request_queue *q = md->queue;
md               2134 drivers/md/dm.c 	lockdep_assert_held(&md->suspend_lock);
md               2141 drivers/md/dm.c 	if (size != dm_get_size(md))
md               2142 drivers/md/dm.c 		memset(&md->geometry, 0, sizeof(md->geometry));
md               2144 drivers/md/dm.c 	__set_size(md, size);
md               2146 drivers/md/dm.c 	dm_table_event_callback(t, event_callback, md);
md               2158 drivers/md/dm.c 	if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
md               2165 drivers/md/dm.c 		md->immutable_target = dm_table_get_immutable_target(t);
md               2168 drivers/md/dm.c 	ret = __bind_mempools(md, t);
md               2174 drivers/md/dm.c 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
md               2175 drivers/md/dm.c 	rcu_assign_pointer(md->map, (void *)t);
md               2176 drivers/md/dm.c 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
md               2180 drivers/md/dm.c 		dm_sync_table(md);
md               2189 drivers/md/dm.c static struct dm_table *__unbind(struct mapped_device *md)
md               2191 drivers/md/dm.c 	struct dm_table *map = rcu_dereference_protected(md->map, 1);
md               2197 drivers/md/dm.c 	RCU_INIT_POINTER(md->map, NULL);
md               2198 drivers/md/dm.c 	dm_sync_table(md);
md               2209 drivers/md/dm.c 	struct mapped_device *md;
md               2211 drivers/md/dm.c 	md = alloc_dev(minor);
md               2212 drivers/md/dm.c 	if (!md)
md               2215 drivers/md/dm.c 	r = dm_sysfs_init(md);
md               2217 drivers/md/dm.c 		free_dev(md);
md               2221 drivers/md/dm.c 	*result = md;
md               2229 drivers/md/dm.c void dm_lock_md_type(struct mapped_device *md)
md               2231 drivers/md/dm.c 	mutex_lock(&md->type_lock);
md               2234 drivers/md/dm.c void dm_unlock_md_type(struct mapped_device *md)
md               2236 drivers/md/dm.c 	mutex_unlock(&md->type_lock);
md               2239 drivers/md/dm.c void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
md               2241 drivers/md/dm.c 	BUG_ON(!mutex_is_locked(&md->type_lock));
md               2242 drivers/md/dm.c 	md->type = type;
md               2245 drivers/md/dm.c enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
md               2247 drivers/md/dm.c 	return md->type;
md               2250 drivers/md/dm.c struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
md               2252 drivers/md/dm.c 	return md->immutable_target_type;
md               2259 drivers/md/dm.c struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
md               2261 drivers/md/dm.c 	BUG_ON(!atomic_read(&md->holders));
md               2262 drivers/md/dm.c 	return &md->queue->limits;
md               2266 drivers/md/dm.c static void dm_init_congested_fn(struct mapped_device *md)
md               2268 drivers/md/dm.c 	md->queue->backing_dev_info->congested_data = md;
md               2269 drivers/md/dm.c 	md->queue->backing_dev_info->congested_fn = dm_any_congested;
md               2275 drivers/md/dm.c int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
md               2279 drivers/md/dm.c 	enum dm_queue_mode type = dm_get_md_type(md);
md               2283 drivers/md/dm.c 		r = dm_mq_init_request_queue(md, t);
md               2288 drivers/md/dm.c 		dm_init_congested_fn(md);
md               2293 drivers/md/dm.c 		dm_init_congested_fn(md);
md               2305 drivers/md/dm.c 	dm_table_set_restrictions(t, md->queue, &limits);
md               2306 drivers/md/dm.c 	blk_register_queue(md->disk);
md               2313 drivers/md/dm.c 	struct mapped_device *md;
md               2321 drivers/md/dm.c 	md = idr_find(&_minor_idr, minor);
md               2322 drivers/md/dm.c 	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
md               2323 drivers/md/dm.c 	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
md               2324 drivers/md/dm.c 		md = NULL;
md               2327 drivers/md/dm.c 	dm_get(md);
md               2331 drivers/md/dm.c 	return md;
md               2335 drivers/md/dm.c void *dm_get_mdptr(struct mapped_device *md)
md               2337 drivers/md/dm.c 	return md->interface_ptr;
md               2340 drivers/md/dm.c void dm_set_mdptr(struct mapped_device *md, void *ptr)
md               2342 drivers/md/dm.c 	md->interface_ptr = ptr;
md               2345 drivers/md/dm.c void dm_get(struct mapped_device *md)
md               2347 drivers/md/dm.c 	atomic_inc(&md->holders);
md               2348 drivers/md/dm.c 	BUG_ON(test_bit(DMF_FREEING, &md->flags));
md               2351 drivers/md/dm.c int dm_hold(struct mapped_device *md)
md               2354 drivers/md/dm.c 	if (test_bit(DMF_FREEING, &md->flags)) {
md               2358 drivers/md/dm.c 	dm_get(md);
md               2364 drivers/md/dm.c const char *dm_device_name(struct mapped_device *md)
md               2366 drivers/md/dm.c 	return md->name;
md               2370 drivers/md/dm.c static void __dm_destroy(struct mapped_device *md, bool wait)
md               2378 drivers/md/dm.c 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
md               2379 drivers/md/dm.c 	set_bit(DMF_FREEING, &md->flags);
md               2382 drivers/md/dm.c 	blk_set_queue_dying(md->queue);
md               2388 drivers/md/dm.c 	mutex_lock(&md->suspend_lock);
md               2389 drivers/md/dm.c 	map = dm_get_live_table(md, &srcu_idx);
md               2390 drivers/md/dm.c 	if (!dm_suspended_md(md)) {
md               2392 drivers/md/dm.c 		set_bit(DMF_SUSPENDED, &md->flags);
md               2396 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md               2397 drivers/md/dm.c 	mutex_unlock(&md->suspend_lock);
md               2406 drivers/md/dm.c 		while (atomic_read(&md->holders))
md               2408 drivers/md/dm.c 	else if (atomic_read(&md->holders))
md               2410 drivers/md/dm.c 		       dm_device_name(md), atomic_read(&md->holders));
md               2412 drivers/md/dm.c 	dm_sysfs_exit(md);
md               2413 drivers/md/dm.c 	dm_table_destroy(__unbind(md));
md               2414 drivers/md/dm.c 	free_dev(md);
md               2417 drivers/md/dm.c void dm_destroy(struct mapped_device *md)
md               2419 drivers/md/dm.c 	__dm_destroy(md, true);
md               2422 drivers/md/dm.c void dm_destroy_immediate(struct mapped_device *md)
md               2424 drivers/md/dm.c 	__dm_destroy(md, false);
md               2427 drivers/md/dm.c void dm_put(struct mapped_device *md)
md               2429 drivers/md/dm.c 	atomic_dec(&md->holders);
md               2433 drivers/md/dm.c static int dm_wait_for_completion(struct mapped_device *md, long task_state)
md               2439 drivers/md/dm.c 		prepare_to_wait(&md->wait, &wait, task_state);
md               2441 drivers/md/dm.c 		if (!md_in_flight(md))
md               2451 drivers/md/dm.c 	finish_wait(&md->wait, &wait);
md               2461 drivers/md/dm.c 	struct mapped_device *md = container_of(work, struct mapped_device,
md               2467 drivers/md/dm.c 	map = dm_get_live_table(md, &srcu_idx);
md               2469 drivers/md/dm.c 	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
md               2470 drivers/md/dm.c 		spin_lock_irq(&md->deferred_lock);
md               2471 drivers/md/dm.c 		c = bio_list_pop(&md->deferred);
md               2472 drivers/md/dm.c 		spin_unlock_irq(&md->deferred_lock);
md               2477 drivers/md/dm.c 		if (dm_request_based(md))
md               2480 drivers/md/dm.c 			(void) dm_process_bio(md, map, c);
md               2483 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md               2486 drivers/md/dm.c static void dm_queue_flush(struct mapped_device *md)
md               2488 drivers/md/dm.c 	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
md               2490 drivers/md/dm.c 	queue_work(md->wq, &md->work);
md               2496 drivers/md/dm.c struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
md               2502 drivers/md/dm.c 	mutex_lock(&md->suspend_lock);
md               2505 drivers/md/dm.c 	if (!dm_suspended_md(md))
md               2515 drivers/md/dm.c 		live_map = dm_get_live_table_fast(md);
md               2517 drivers/md/dm.c 			limits = md->queue->limits;
md               2518 drivers/md/dm.c 		dm_put_live_table_fast(md);
md               2529 drivers/md/dm.c 	map = __bind(md, table, &limits);
md               2533 drivers/md/dm.c 	mutex_unlock(&md->suspend_lock);
md               2541 drivers/md/dm.c static int lock_fs(struct mapped_device *md)
md               2545 drivers/md/dm.c 	WARN_ON(md->frozen_sb);
md               2547 drivers/md/dm.c 	md->frozen_sb = freeze_bdev(md->bdev);
md               2548 drivers/md/dm.c 	if (IS_ERR(md->frozen_sb)) {
md               2549 drivers/md/dm.c 		r = PTR_ERR(md->frozen_sb);
md               2550 drivers/md/dm.c 		md->frozen_sb = NULL;
md               2554 drivers/md/dm.c 	set_bit(DMF_FROZEN, &md->flags);
md               2559 drivers/md/dm.c static void unlock_fs(struct mapped_device *md)
md               2561 drivers/md/dm.c 	if (!test_bit(DMF_FROZEN, &md->flags))
md               2564 drivers/md/dm.c 	thaw_bdev(md->bdev, md->frozen_sb);
md               2565 drivers/md/dm.c 	md->frozen_sb = NULL;
md               2566 drivers/md/dm.c 	clear_bit(DMF_FROZEN, &md->flags);
md               2578 drivers/md/dm.c static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
md               2586 drivers/md/dm.c 	lockdep_assert_held(&md->suspend_lock);
md               2593 drivers/md/dm.c 		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
md               2595 drivers/md/dm.c 		pr_debug("%s: suspending with flush\n", dm_device_name(md));
md               2610 drivers/md/dm.c 		r = lock_fs(md);
md               2629 drivers/md/dm.c 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
md               2631 drivers/md/dm.c 		synchronize_srcu(&md->io_barrier);
md               2637 drivers/md/dm.c 	if (dm_request_based(md))
md               2638 drivers/md/dm.c 		dm_stop_queue(md->queue);
md               2640 drivers/md/dm.c 	flush_workqueue(md->wq);
md               2647 drivers/md/dm.c 	r = dm_wait_for_completion(md, task_state);
md               2649 drivers/md/dm.c 		set_bit(dmf_suspended_flag, &md->flags);
md               2652 drivers/md/dm.c 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
md               2654 drivers/md/dm.c 		synchronize_srcu(&md->io_barrier);
md               2658 drivers/md/dm.c 		dm_queue_flush(md);
md               2660 drivers/md/dm.c 		if (dm_request_based(md))
md               2661 drivers/md/dm.c 			dm_start_queue(md->queue);
md               2663 drivers/md/dm.c 		unlock_fs(md);
md               2687 drivers/md/dm.c int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
md               2693 drivers/md/dm.c 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
md               2695 drivers/md/dm.c 	if (dm_suspended_md(md)) {
md               2700 drivers/md/dm.c 	if (dm_suspended_internally_md(md)) {
md               2702 drivers/md/dm.c 		mutex_unlock(&md->suspend_lock);
md               2703 drivers/md/dm.c 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
md               2709 drivers/md/dm.c 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
md               2711 drivers/md/dm.c 	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
md               2718 drivers/md/dm.c 	mutex_unlock(&md->suspend_lock);
md               2722 drivers/md/dm.c static int __dm_resume(struct mapped_device *md, struct dm_table *map)
md               2730 drivers/md/dm.c 	dm_queue_flush(md);
md               2737 drivers/md/dm.c 	if (dm_request_based(md))
md               2738 drivers/md/dm.c 		dm_start_queue(md->queue);
md               2740 drivers/md/dm.c 	unlock_fs(md);
md               2745 drivers/md/dm.c int dm_resume(struct mapped_device *md)
md               2752 drivers/md/dm.c 	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
md               2754 drivers/md/dm.c 	if (!dm_suspended_md(md))
md               2757 drivers/md/dm.c 	if (dm_suspended_internally_md(md)) {
md               2759 drivers/md/dm.c 		mutex_unlock(&md->suspend_lock);
md               2760 drivers/md/dm.c 		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
md               2766 drivers/md/dm.c 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
md               2770 drivers/md/dm.c 	r = __dm_resume(md, map);
md               2774 drivers/md/dm.c 	clear_bit(DMF_SUSPENDED, &md->flags);
md               2776 drivers/md/dm.c 	mutex_unlock(&md->suspend_lock);
md               2787 drivers/md/dm.c static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
md               2791 drivers/md/dm.c 	lockdep_assert_held(&md->suspend_lock);
md               2793 drivers/md/dm.c 	if (md->internal_suspend_count++)
md               2796 drivers/md/dm.c 	if (dm_suspended_md(md)) {
md               2797 drivers/md/dm.c 		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
md               2801 drivers/md/dm.c 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
md               2809 drivers/md/dm.c 	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
md               2815 drivers/md/dm.c static void __dm_internal_resume(struct mapped_device *md)
md               2817 drivers/md/dm.c 	BUG_ON(!md->internal_suspend_count);
md               2819 drivers/md/dm.c 	if (--md->internal_suspend_count)
md               2822 drivers/md/dm.c 	if (dm_suspended_md(md))
md               2829 drivers/md/dm.c 	(void) __dm_resume(md, NULL);
md               2832 drivers/md/dm.c 	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
md               2834 drivers/md/dm.c 	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
md               2837 drivers/md/dm.c void dm_internal_suspend_noflush(struct mapped_device *md)
md               2839 drivers/md/dm.c 	mutex_lock(&md->suspend_lock);
md               2840 drivers/md/dm.c 	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
md               2841 drivers/md/dm.c 	mutex_unlock(&md->suspend_lock);
md               2845 drivers/md/dm.c void dm_internal_resume(struct mapped_device *md)
md               2847 drivers/md/dm.c 	mutex_lock(&md->suspend_lock);
md               2848 drivers/md/dm.c 	__dm_internal_resume(md);
md               2849 drivers/md/dm.c 	mutex_unlock(&md->suspend_lock);
md               2858 drivers/md/dm.c void dm_internal_suspend_fast(struct mapped_device *md)
md               2860 drivers/md/dm.c 	mutex_lock(&md->suspend_lock);
md               2861 drivers/md/dm.c 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
md               2864 drivers/md/dm.c 	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
md               2865 drivers/md/dm.c 	synchronize_srcu(&md->io_barrier);
md               2866 drivers/md/dm.c 	flush_workqueue(md->wq);
md               2867 drivers/md/dm.c 	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
md               2871 drivers/md/dm.c void dm_internal_resume_fast(struct mapped_device *md)
md               2873 drivers/md/dm.c 	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
md               2876 drivers/md/dm.c 	dm_queue_flush(md);
md               2879 drivers/md/dm.c 	mutex_unlock(&md->suspend_lock);
md               2886 drivers/md/dm.c int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
md               2893 drivers/md/dm.c 		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
md               2897 drivers/md/dm.c 		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
md               2902 drivers/md/dm.c uint32_t dm_next_uevent_seq(struct mapped_device *md)
md               2904 drivers/md/dm.c 	return atomic_add_return(1, &md->uevent_seq);
md               2907 drivers/md/dm.c uint32_t dm_get_event_nr(struct mapped_device *md)
md               2909 drivers/md/dm.c 	return atomic_read(&md->event_nr);
md               2912 drivers/md/dm.c int dm_wait_event(struct mapped_device *md, int event_nr)
md               2914 drivers/md/dm.c 	return wait_event_interruptible(md->eventq,
md               2915 drivers/md/dm.c 			(event_nr != atomic_read(&md->event_nr)));
md               2918 drivers/md/dm.c void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
md               2922 drivers/md/dm.c 	spin_lock_irqsave(&md->uevent_lock, flags);
md               2923 drivers/md/dm.c 	list_add(elist, &md->uevent_list);
md               2924 drivers/md/dm.c 	spin_unlock_irqrestore(&md->uevent_lock, flags);
md               2931 drivers/md/dm.c struct gendisk *dm_disk(struct mapped_device *md)
md               2933 drivers/md/dm.c 	return md->disk;
md               2937 drivers/md/dm.c struct kobject *dm_kobject(struct mapped_device *md)
md               2939 drivers/md/dm.c 	return &md->kobj_holder.kobj;
md               2944 drivers/md/dm.c 	struct mapped_device *md;
md               2946 drivers/md/dm.c 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
md               2949 drivers/md/dm.c 	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
md               2950 drivers/md/dm.c 		md = NULL;
md               2953 drivers/md/dm.c 	dm_get(md);
md               2957 drivers/md/dm.c 	return md;
md               2960 drivers/md/dm.c int dm_suspended_md(struct mapped_device *md)
md               2962 drivers/md/dm.c 	return test_bit(DMF_SUSPENDED, &md->flags);
md               2965 drivers/md/dm.c int dm_suspended_internally_md(struct mapped_device *md)
md               2967 drivers/md/dm.c 	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
md               2970 drivers/md/dm.c int dm_test_deferred_remove_flag(struct mapped_device *md)
md               2972 drivers/md/dm.c 	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
md               2987 drivers/md/dm.c struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
md               2991 drivers/md/dm.c 	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
md               3057 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
md               3062 drivers/md/dm.c 	table = dm_get_live_table(md, &srcu_idx);
md               3077 drivers/md/dm.c 	dm_put_live_table(md, srcu_idx);
md               3122 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
md               3126 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
md               3136 drivers/md/dm.c 	dm_unprepare_ioctl(md, srcu_idx);
md               3142 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
md               3146 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
md               3156 drivers/md/dm.c 	dm_unprepare_ioctl(md, srcu_idx);
md               3163 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
md               3167 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
md               3177 drivers/md/dm.c 	dm_unprepare_ioctl(md, srcu_idx);
md               3183 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
md               3187 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
md               3197 drivers/md/dm.c 	dm_unprepare_ioctl(md, srcu_idx);
md                 80 drivers/md/dm.h void dm_lock_md_type(struct mapped_device *md);
md                 81 drivers/md/dm.h void dm_unlock_md_type(struct mapped_device *md);
md                 82 drivers/md/dm.h void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
md                 83 drivers/md/dm.h enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
md                 84 drivers/md/dm.h struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
md                 86 drivers/md/dm.h int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
md                119 drivers/md/dm.h int dm_deleting_md(struct mapped_device *md);
md                124 drivers/md/dm.h int dm_suspended_md(struct mapped_device *md);
md                129 drivers/md/dm.h int dm_suspended_internally_md(struct mapped_device *md);
md                130 drivers/md/dm.h void dm_internal_suspend_fast(struct mapped_device *md);
md                131 drivers/md/dm.h void dm_internal_resume_fast(struct mapped_device *md);
md                132 drivers/md/dm.h void dm_internal_suspend_noflush(struct mapped_device *md);
md                133 drivers/md/dm.h void dm_internal_resume(struct mapped_device *md);
md                138 drivers/md/dm.h int dm_test_deferred_remove_flag(struct mapped_device *md);
md                155 drivers/md/dm.h int dm_sysfs_init(struct mapped_device *md);
md                156 drivers/md/dm.h void dm_sysfs_exit(struct mapped_device *md);
md                157 drivers/md/dm.h struct kobject *dm_kobject(struct mapped_device *md);
md                177 drivers/md/dm.h void dm_destroy(struct mapped_device *md);
md                178 drivers/md/dm.h void dm_destroy_immediate(struct mapped_device *md);
md                179 drivers/md/dm.h int dm_open_count(struct mapped_device *md);
md                180 drivers/md/dm.h int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
md                181 drivers/md/dm.h int dm_cancel_deferred_remove(struct mapped_device *md);
md                182 drivers/md/dm.h int dm_request_based(struct mapped_device *md);
md                183 drivers/md/dm.h sector_t dm_get_size(struct mapped_device *md);
md                184 drivers/md/dm.h struct request_queue *dm_get_md_queue(struct mapped_device *md);
md                185 drivers/md/dm.h int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
md                187 drivers/md/dm.h void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
md                188 drivers/md/dm.h struct dm_stats *dm_get_stats(struct mapped_device *md);
md                190 drivers/md/dm.h int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
md                193 drivers/md/dm.h void dm_internal_suspend(struct mapped_device *md);
md                194 drivers/md/dm.h void dm_internal_resume(struct mapped_device *md);
md                205 drivers/md/dm.h struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
md                 48 drivers/md/raid1.c #define raid1_log(md, fmt, args...)				\
md                 49 drivers/md/raid1.c 	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
md                 77 drivers/md/raid10.c #define raid10_log(md, fmt, args...)				\
md                 78 drivers/md/raid10.c 	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
md                301 drivers/media/platform/s3c-camif/camif-core.c 	struct media_device *md = &camif->media_dev;
md                306 drivers/media/platform/s3c-camif/camif-core.c 	memset(md, 0, sizeof(*md));
md                307 drivers/media/platform/s3c-camif/camif-core.c 	snprintf(md->model, sizeof(md->model), "SAMSUNG S3C%s CAMIF",
md                309 drivers/media/platform/s3c-camif/camif-core.c 	strscpy(md->bus_info, "platform", sizeof(md->bus_info));
md                310 drivers/media/platform/s3c-camif/camif-core.c 	md->hw_revision = ip_rev;
md                312 drivers/media/platform/s3c-camif/camif-core.c 	md->dev = camif->dev;
md                315 drivers/media/platform/s3c-camif/camif-core.c 	v4l2_dev->mdev = md;
md                317 drivers/media/platform/s3c-camif/camif-core.c 	media_device_init(md);
md                693 drivers/media/tuners/mt2063.c 	u32 ma, mb, mc, md, me, mf;
md                717 drivers/media/tuners/mt2063.c 		md = (n * ((f_LO1 + hgds) / gd_Scale) -
md                721 drivers/media/tuners/mt2063.c 		if (md >= pAS_Info->maxH1)
md                728 drivers/media/tuners/mt2063.c 		if (md == ma)
md                733 drivers/media/tuners/mt2063.c 		if (mc != md) {
md                160 drivers/mmc/core/block.c 	struct mmc_blk_data *md;
md                174 drivers/mmc/core/block.c 	struct mmc_blk_data *md;
md                177 drivers/mmc/core/block.c 	md = disk->private_data;
md                178 drivers/mmc/core/block.c 	if (md && md->usage == 0)
md                179 drivers/mmc/core/block.c 		md = NULL;
md                180 drivers/mmc/core/block.c 	if (md)
md                181 drivers/mmc/core/block.c 		md->usage++;
md                184 drivers/mmc/core/block.c 	return md;
md                193 drivers/mmc/core/block.c static void mmc_blk_put(struct mmc_blk_data *md)
md                196 drivers/mmc/core/block.c 	md->usage--;
md                197 drivers/mmc/core/block.c 	if (md->usage == 0) {
md                198 drivers/mmc/core/block.c 		int devidx = mmc_get_devidx(md->disk);
md                199 drivers/mmc/core/block.c 		blk_put_queue(md->queue.queue);
md                201 drivers/mmc/core/block.c 		put_disk(md->disk);
md                202 drivers/mmc/core/block.c 		kfree(md);
md                211 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
md                212 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
md                222 drivers/mmc/core/block.c 	mmc_blk_put(md);
md                231 drivers/mmc/core/block.c 	struct mmc_blk_data *md, *part_md;
md                242 drivers/mmc/core/block.c 	md = mmc_blk_get(dev_to_disk(dev));
md                243 drivers/mmc/core/block.c 	mq = &md->queue;
md                258 drivers/mmc/core/block.c 			md->disk->disk_name);
md                259 drivers/mmc/core/block.c 		set_disk_ro(md->disk, 1);
md                261 drivers/mmc/core/block.c 		list_for_each_entry(part_md, &md->part, part)
md                268 drivers/mmc/core/block.c 	mmc_blk_put(md);
md                276 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
md                280 drivers/mmc/core/block.c 		       md->read_only);
md                281 drivers/mmc/core/block.c 	mmc_blk_put(md);
md                290 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
md                297 drivers/mmc/core/block.c 	set_disk_ro(dev_to_disk(dev), set || md->read_only);
md                300 drivers/mmc/core/block.c 	mmc_blk_put(md);
md                306 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
md                310 drivers/mmc/core/block.c 	if (md) {
md                311 drivers/mmc/core/block.c 		if (md->usage == 2)
md                315 drivers/mmc/core/block.c 		if ((mode & FMODE_WRITE) && md->read_only) {
md                316 drivers/mmc/core/block.c 			mmc_blk_put(md);
md                327 drivers/mmc/core/block.c 	struct mmc_blk_data *md = disk->private_data;
md                330 drivers/mmc/core/block.c 	mmc_blk_put(md);
md                491 drivers/mmc/core/block.c static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
md                501 drivers/mmc/core/block.c 	if (!card || !md || !idata)
md                515 drivers/mmc/core/block.c 		target_part = md->part_type;
md                644 drivers/mmc/core/block.c static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
md                661 drivers/mmc/core/block.c 	card = md->queue.card;
md                670 drivers/mmc/core/block.c 	mq = &md->queue;
md                693 drivers/mmc/core/block.c static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
md                730 drivers/mmc/core/block.c 	card = md->queue.card;
md                740 drivers/mmc/core/block.c 	mq = &md->queue;
md                784 drivers/mmc/core/block.c 	struct mmc_blk_data *md;
md                792 drivers/mmc/core/block.c 		md = mmc_blk_get(bdev->bd_disk);
md                793 drivers/mmc/core/block.c 		if (!md)
md                795 drivers/mmc/core/block.c 		ret = mmc_blk_ioctl_cmd(md,
md                798 drivers/mmc/core/block.c 		mmc_blk_put(md);
md                804 drivers/mmc/core/block.c 		md = mmc_blk_get(bdev->bd_disk);
md                805 drivers/mmc/core/block.c 		if (!md)
md                807 drivers/mmc/core/block.c 		ret = mmc_blk_ioctl_multi_cmd(md,
md                810 drivers/mmc/core/block.c 		mmc_blk_put(md);
md                988 drivers/mmc/core/block.c static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
md                993 drivers/mmc/core/block.c 	if (md->reset_done & type)
md                996 drivers/mmc/core/block.c 	md->reset_done |= type;
md               1005 drivers/mmc/core/block.c 		part_err = mmc_blk_part_switch(host->card, md->part_type);
md               1017 drivers/mmc/core/block.c static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md               1019 drivers/mmc/core/block.c 	md->reset_done &= ~type;
md               1031 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
md               1047 drivers/mmc/core/block.c 			ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
md               1062 drivers/mmc/core/block.c 			       md->disk->disk_name, ret);
md               1078 drivers/mmc/core/block.c 		       md->disk->disk_name);
md               1088 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
md               1089 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
md               1114 drivers/mmc/core/block.c 	} while (err == -EIO && !mmc_blk_reset(md, card->host, type));
md               1118 drivers/mmc/core/block.c 		mmc_blk_reset_success(md, type);
md               1126 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
md               1127 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
md               1185 drivers/mmc/core/block.c 	if (err && !mmc_blk_reset(md, card->host, type))
md               1188 drivers/mmc/core/block.c 		mmc_blk_reset_success(md, type);
md               1195 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
md               1196 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
md               1284 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
md               1285 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
md               1296 drivers/mmc/core/block.c 		    (md->flags & MMC_BLK_REL_WR);
md               1341 drivers/mmc/core/block.c 		     get_capacity(md->disk)))
md               1553 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
md               1599 drivers/mmc/core/block.c 	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
md               1760 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
md               1809 drivers/mmc/core/block.c 	    err && mmc_blk_reset(md, card->host, type)) {
md               1824 drivers/mmc/core/block.c 		mmc_blk_reset(md, card->host, type);
md               2181 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mq->blkdata;
md               2182 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
md               2186 drivers/mmc/core/block.c 	ret = mmc_blk_part_switch(card, md->part_type);
md               2253 drivers/mmc/core/block.c 	struct mmc_blk_data *md;
md               2272 drivers/mmc/core/block.c 	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
md               2273 drivers/mmc/core/block.c 	if (!md) {
md               2278 drivers/mmc/core/block.c 	md->area_type = area_type;
md               2284 drivers/mmc/core/block.c 	md->read_only = mmc_blk_readonly(card);
md               2286 drivers/mmc/core/block.c 	md->disk = alloc_disk(perdev_minors);
md               2287 drivers/mmc/core/block.c 	if (md->disk == NULL) {
md               2292 drivers/mmc/core/block.c 	INIT_LIST_HEAD(&md->part);
md               2293 drivers/mmc/core/block.c 	INIT_LIST_HEAD(&md->rpmbs);
md               2294 drivers/mmc/core/block.c 	md->usage = 1;
md               2296 drivers/mmc/core/block.c 	ret = mmc_init_queue(&md->queue, card);
md               2300 drivers/mmc/core/block.c 	md->queue.blkdata = md;
md               2308 drivers/mmc/core/block.c 	if (!blk_get_queue(md->queue.queue)) {
md               2309 drivers/mmc/core/block.c 		mmc_cleanup_queue(&md->queue);
md               2314 drivers/mmc/core/block.c 	md->disk->major	= MMC_BLOCK_MAJOR;
md               2315 drivers/mmc/core/block.c 	md->disk->first_minor = devidx * perdev_minors;
md               2316 drivers/mmc/core/block.c 	md->disk->fops = &mmc_bdops;
md               2317 drivers/mmc/core/block.c 	md->disk->private_data = md;
md               2318 drivers/mmc/core/block.c 	md->disk->queue = md->queue.queue;
md               2319 drivers/mmc/core/block.c 	md->parent = parent;
md               2320 drivers/mmc/core/block.c 	set_disk_ro(md->disk, md->read_only || default_ro);
md               2321 drivers/mmc/core/block.c 	md->disk->flags = GENHD_FL_EXT_DEVT;
md               2323 drivers/mmc/core/block.c 		md->disk->flags |= GENHD_FL_NO_PART_SCAN
md               2338 drivers/mmc/core/block.c 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
md               2341 drivers/mmc/core/block.c 	set_capacity(md->disk, size);
md               2348 drivers/mmc/core/block.c 			md->flags |= MMC_BLK_CMD23;
md               2352 drivers/mmc/core/block.c 	    md->flags & MMC_BLK_CMD23 &&
md               2355 drivers/mmc/core/block.c 		md->flags |= MMC_BLK_REL_WR;
md               2356 drivers/mmc/core/block.c 		blk_queue_write_cache(md->queue.queue, true, true);
md               2359 drivers/mmc/core/block.c 	return md;
md               2362 drivers/mmc/core/block.c 	put_disk(md->disk);
md               2364 drivers/mmc/core/block.c 	kfree(md);
md               2394 drivers/mmc/core/block.c 			      struct mmc_blk_data *md,
md               2404 drivers/mmc/core/block.c 	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
md               2409 drivers/mmc/core/block.c 	list_add(&part_md->part, &md->part);
md               2436 drivers/mmc/core/block.c 		ret = mmc_blk_ioctl_cmd(rpmb->md,
md               2441 drivers/mmc/core/block.c 		ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
md               2468 drivers/mmc/core/block.c 	mmc_blk_get(rpmb->md->disk);
md               2478 drivers/mmc/core/block.c 	mmc_blk_put(rpmb->md);
md               2504 drivers/mmc/core/block.c 				   struct mmc_blk_data *md,
md               2537 drivers/mmc/core/block.c 	rpmb->md = md;
md               2547 drivers/mmc/core/block.c 	list_add(&rpmb->node, &md->rpmbs);
md               2577 drivers/mmc/core/block.c static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
md               2592 drivers/mmc/core/block.c 			ret = mmc_blk_alloc_rpmb_part(card, md,
md               2599 drivers/mmc/core/block.c 			ret = mmc_blk_alloc_part(card, md,
md               2613 drivers/mmc/core/block.c static void mmc_blk_remove_req(struct mmc_blk_data *md)
md               2617 drivers/mmc/core/block.c 	if (md) {
md               2623 drivers/mmc/core/block.c 		card = md->queue.card;
md               2624 drivers/mmc/core/block.c 		if (md->disk->flags & GENHD_FL_UP) {
md               2625 drivers/mmc/core/block.c 			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
md               2626 drivers/mmc/core/block.c 			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
md               2628 drivers/mmc/core/block.c 				device_remove_file(disk_to_dev(md->disk),
md               2629 drivers/mmc/core/block.c 					&md->power_ro_lock);
md               2631 drivers/mmc/core/block.c 			del_gendisk(md->disk);
md               2633 drivers/mmc/core/block.c 		mmc_cleanup_queue(&md->queue);
md               2634 drivers/mmc/core/block.c 		mmc_blk_put(md);
md               2639 drivers/mmc/core/block.c 				 struct mmc_blk_data *md)
md               2646 drivers/mmc/core/block.c 	list_for_each_safe(pos, q, &md->rpmbs) {
md               2652 drivers/mmc/core/block.c 	list_for_each_safe(pos, q, &md->part) {
md               2659 drivers/mmc/core/block.c static int mmc_add_disk(struct mmc_blk_data *md)
md               2662 drivers/mmc/core/block.c 	struct mmc_card *card = md->queue.card;
md               2664 drivers/mmc/core/block.c 	device_add_disk(md->parent, md->disk, NULL);
md               2665 drivers/mmc/core/block.c 	md->force_ro.show = force_ro_show;
md               2666 drivers/mmc/core/block.c 	md->force_ro.store = force_ro_store;
md               2667 drivers/mmc/core/block.c 	sysfs_attr_init(&md->force_ro.attr);
md               2668 drivers/mmc/core/block.c 	md->force_ro.attr.name = "force_ro";
md               2669 drivers/mmc/core/block.c 	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
md               2670 drivers/mmc/core/block.c 	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
md               2674 drivers/mmc/core/block.c 	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
md               2683 drivers/mmc/core/block.c 		md->power_ro_lock.show = power_ro_lock_show;
md               2684 drivers/mmc/core/block.c 		md->power_ro_lock.store = power_ro_lock_store;
md               2685 drivers/mmc/core/block.c 		sysfs_attr_init(&md->power_ro_lock.attr);
md               2686 drivers/mmc/core/block.c 		md->power_ro_lock.attr.mode = mode;
md               2687 drivers/mmc/core/block.c 		md->power_ro_lock.attr.name =
md               2689 drivers/mmc/core/block.c 		ret = device_create_file(disk_to_dev(md->disk),
md               2690 drivers/mmc/core/block.c 				&md->power_ro_lock);
md               2697 drivers/mmc/core/block.c 	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
md               2699 drivers/mmc/core/block.c 	del_gendisk(md->disk);
md               2709 drivers/mmc/core/block.c 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
md               2710 drivers/mmc/core/block.c 	struct mmc_queue *mq = &md->queue;
md               2738 drivers/mmc/core/block.c 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
md               2739 drivers/mmc/core/block.c 	struct mmc_queue *mq = &md->queue;
md               2807 drivers/mmc/core/block.c static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
md               2817 drivers/mmc/core/block.c 		md->status_dentry =
md               2821 drivers/mmc/core/block.c 		if (!md->status_dentry)
md               2826 drivers/mmc/core/block.c 		md->ext_csd_dentry =
md               2829 drivers/mmc/core/block.c 		if (!md->ext_csd_dentry)
md               2837 drivers/mmc/core/block.c 				   struct mmc_blk_data *md)
md               2842 drivers/mmc/core/block.c 	if (!IS_ERR_OR_NULL(md->status_dentry)) {
md               2843 drivers/mmc/core/block.c 		debugfs_remove(md->status_dentry);
md               2844 drivers/mmc/core/block.c 		md->status_dentry = NULL;
md               2847 drivers/mmc/core/block.c 	if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
md               2848 drivers/mmc/core/block.c 		debugfs_remove(md->ext_csd_dentry);
md               2849 drivers/mmc/core/block.c 		md->ext_csd_dentry = NULL;
md               2855 drivers/mmc/core/block.c static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
md               2861 drivers/mmc/core/block.c 				   struct mmc_blk_data *md)
md               2869 drivers/mmc/core/block.c 	struct mmc_blk_data *md, *part_md;
md               2887 drivers/mmc/core/block.c 	md = mmc_blk_alloc(card);
md               2888 drivers/mmc/core/block.c 	if (IS_ERR(md))
md               2889 drivers/mmc/core/block.c 		return PTR_ERR(md);
md               2891 drivers/mmc/core/block.c 	string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
md               2894 drivers/mmc/core/block.c 		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
md               2895 drivers/mmc/core/block.c 		cap_str, md->read_only ? "(ro)" : "");
md               2897 drivers/mmc/core/block.c 	if (mmc_blk_alloc_parts(card, md))
md               2900 drivers/mmc/core/block.c 	dev_set_drvdata(&card->dev, md);
md               2902 drivers/mmc/core/block.c 	if (mmc_add_disk(md))
md               2905 drivers/mmc/core/block.c 	list_for_each_entry(part_md, &md->part, part) {
md               2911 drivers/mmc/core/block.c 	mmc_blk_add_debugfs(card, md);
md               2928 drivers/mmc/core/block.c 	mmc_blk_remove_parts(card, md);
md               2929 drivers/mmc/core/block.c 	mmc_blk_remove_req(md);
md               2935 drivers/mmc/core/block.c 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
md               2937 drivers/mmc/core/block.c 	mmc_blk_remove_debugfs(card, md);
md               2938 drivers/mmc/core/block.c 	mmc_blk_remove_parts(card, md);
md               2940 drivers/mmc/core/block.c 	if (md->part_curr != md->part_type) {
md               2942 drivers/mmc/core/block.c 		mmc_blk_part_switch(card, md->part_type);
md               2948 drivers/mmc/core/block.c 	mmc_blk_remove_req(md);
md               2956 drivers/mmc/core/block.c 	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
md               2958 drivers/mmc/core/block.c 	if (md) {
md               2959 drivers/mmc/core/block.c 		mmc_queue_suspend(&md->queue);
md               2960 drivers/mmc/core/block.c 		list_for_each_entry(part_md, &md->part, part) {
md               2983 drivers/mmc/core/block.c 	struct mmc_blk_data *md = dev_get_drvdata(dev);
md               2985 drivers/mmc/core/block.c 	if (md) {
md               2990 drivers/mmc/core/block.c 		md->part_curr = md->part_type;
md               2991 drivers/mmc/core/block.c 		mmc_queue_resume(&md->queue);
md               2992 drivers/mmc/core/block.c 		list_for_each_entry(part_md, &md->part, part) {
md                390 drivers/mtd/nand/raw/nand_bbt.c 			  struct nand_bbt_descr *td, struct nand_bbt_descr *md)
md                404 drivers/mtd/nand/raw/nand_bbt.c 	if (md && (md->options & NAND_BBT_VERSION)) {
md                405 drivers/mtd/nand/raw/nand_bbt.c 		scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
md                406 drivers/mtd/nand/raw/nand_bbt.c 			  mtd->writesize, md);
md                407 drivers/mtd/nand/raw/nand_bbt.c 		md->version[0] = buf[bbt_get_ver_offs(this, md)];
md                409 drivers/mtd/nand/raw/nand_bbt.c 			 md->pages[0], md->version[0]);
md                598 drivers/mtd/nand/raw/nand_bbt.c 			     struct nand_bbt_descr *md)
md                604 drivers/mtd/nand/raw/nand_bbt.c 	if (md)
md                605 drivers/mtd/nand/raw/nand_bbt.c 		search_bbt(this, buf, md);
md                622 drivers/mtd/nand/raw/nand_bbt.c 			 struct nand_bbt_descr *md, int chip)
md                665 drivers/mtd/nand/raw/nand_bbt.c 		if (!md || md->pages[chip] != page)
md                713 drivers/mtd/nand/raw/nand_bbt.c 		     struct nand_bbt_descr *td, struct nand_bbt_descr *md,
md                754 drivers/mtd/nand/raw/nand_bbt.c 		block = get_bbt_block(this, td, md, chip);
md                922 drivers/mtd/nand/raw/nand_bbt.c 	struct nand_bbt_descr *md = this->bbt_md;
md                940 drivers/mtd/nand/raw/nand_bbt.c 		if (md) {
md                941 drivers/mtd/nand/raw/nand_bbt.c 			if (td->pages[i] == -1 && md->pages[i] == -1) {
md                945 drivers/mtd/nand/raw/nand_bbt.c 				rd = md;
md                947 drivers/mtd/nand/raw/nand_bbt.c 			} else if (md->pages[i] == -1) {
md                950 drivers/mtd/nand/raw/nand_bbt.c 			} else if (td->version[i] == md->version[i]) {
md                953 drivers/mtd/nand/raw/nand_bbt.c 					rd2 = md;
md                954 drivers/mtd/nand/raw/nand_bbt.c 			} else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
md                958 drivers/mtd/nand/raw/nand_bbt.c 				rd = md;
md                980 drivers/mtd/nand/raw/nand_bbt.c 			if (md)
md                981 drivers/mtd/nand/raw/nand_bbt.c 				md->version[i] = 1;
md               1012 drivers/mtd/nand/raw/nand_bbt.c 		if (md) {
md               1013 drivers/mtd/nand/raw/nand_bbt.c 			td->version[i] = max(td->version[i], md->version[i]);
md               1014 drivers/mtd/nand/raw/nand_bbt.c 			md->version[i] = td->version[i];
md               1019 drivers/mtd/nand/raw/nand_bbt.c 			res = write_bbt(this, buf, td, md, chipsel);
md               1025 drivers/mtd/nand/raw/nand_bbt.c 		if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
md               1026 drivers/mtd/nand/raw/nand_bbt.c 			res = write_bbt(this, buf, md, td, chipsel);
md               1048 drivers/mtd/nand/raw/nand_bbt.c 	struct nand_bbt_descr *md = this->bbt_md;
md               1070 drivers/mtd/nand/raw/nand_bbt.c 	if (md)
md               1071 drivers/mtd/nand/raw/nand_bbt.c 		md->version[chip]++;
md               1075 drivers/mtd/nand/raw/nand_bbt.c 		res = write_bbt(this, buf, td, md, chipsel);
md               1080 drivers/mtd/nand/raw/nand_bbt.c 	if (md && (md->options & NAND_BBT_WRITE)) {
md               1081 drivers/mtd/nand/raw/nand_bbt.c 		res = write_bbt(this, buf, md, td, chipsel);
md               1217 drivers/mtd/nand/raw/nand_bbt.c 	struct nand_bbt_descr *md = this->bbt_md;
md               1240 drivers/mtd/nand/raw/nand_bbt.c 	verify_bbt_descr(this, md);
md               1253 drivers/mtd/nand/raw/nand_bbt.c 		read_abs_bbts(this, buf, td, md);
md               1256 drivers/mtd/nand/raw/nand_bbt.c 		search_read_bbts(this, buf, td, md);
md               1265 drivers/mtd/nand/raw/nand_bbt.c 	if (md)
md               1266 drivers/mtd/nand/raw/nand_bbt.c 		mark_bbt_region(this, md);
md                132 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	struct cudbg_mem_desc *md;
md                141 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md  = meminfo_buff->mem;
md                225 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
md                226 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
md                227 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
md                228 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
md                229 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
md                230 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
md                231 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
md                232 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
md                233 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
md                236 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
md                237 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->limit = md->base - 1 +
md                240 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md++;
md                242 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
md                243 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->limit = md->base - 1 +
md                246 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md++;
md                251 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 			md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
md                254 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 			md->base = t4_read_reg(padap,
md                257 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 		md->limit = 0;
md                259 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 		md->base = 0;
md                260 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
md                262 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md++;
md                265 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
md                266 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	(md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
md                278 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->base = 0;
md                279 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->idx = ARRAY_SIZE(cudbg_region);
md                293 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 			md->base = BASEADDR_G(t4_read_reg(padap,
md                295 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 			md->limit = md->base + (size << 2) - 1;
md                299 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md++;
md                301 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
md                302 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->limit = 0;
md                303 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md++;
md                304 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
md                305 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->limit = 0;
md                306 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md++;
md                308 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md->base = padap->vres.ocq.start;
md                310 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 		md->limit = md->base + padap->vres.ocq.size - 1;
md                312 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
md                313 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	md++;
md                319 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 			(md++)->base = meminfo_buff->avail[n].limit;
md                322 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 		(md++)->base = meminfo_buff->avail[n].limit;
md                324 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c 	n = md - meminfo_buff->mem;
md                 56 drivers/net/phy/mdio-mux-bcm-iproc.c static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md)
md                 62 drivers/net/phy/mdio-mux-bcm-iproc.c 	val = readl(md->base + MDIO_SCAN_CTRL_OFFSET);
md                 64 drivers/net/phy/mdio-mux-bcm-iproc.c 	writel(val, md->base + MDIO_SCAN_CTRL_OFFSET);
md                 66 drivers/net/phy/mdio-mux-bcm-iproc.c 	if (md->core_clk) {
md                 70 drivers/net/phy/mdio-mux-bcm-iproc.c 		divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY;
md                 74 drivers/net/phy/mdio-mux-bcm-iproc.c 		writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET);
md                 75 drivers/net/phy/mdio-mux-bcm-iproc.c 		writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET);
md                142 drivers/net/phy/mdio-mux-bcm-iproc.c 	struct iproc_mdiomux_desc *md = bus->priv;
md                145 drivers/net/phy/mdio-mux-bcm-iproc.c 	ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP);
md                155 drivers/net/phy/mdio-mux-bcm-iproc.c 	struct iproc_mdiomux_desc *md = bus->priv;
md                159 drivers/net/phy/mdio-mux-bcm-iproc.c 	ret = start_miim_ops(md->base, phyid, reg, val, MDIO_CTRL_WRITE_OP);
md                169 drivers/net/phy/mdio-mux-bcm-iproc.c 	struct iproc_mdiomux_desc *md = data;
md                180 drivers/net/phy/mdio-mux-bcm-iproc.c 	writel(param, md->base + MDIO_PARAM_OFFSET);
md                186 drivers/net/phy/mdio-mux-bcm-iproc.c 	struct iproc_mdiomux_desc *md;
md                191 drivers/net/phy/mdio-mux-bcm-iproc.c 	md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL);
md                192 drivers/net/phy/mdio-mux-bcm-iproc.c 	if (!md)
md                194 drivers/net/phy/mdio-mux-bcm-iproc.c 	md->dev = &pdev->dev;
md                205 drivers/net/phy/mdio-mux-bcm-iproc.c 	md->base = devm_ioremap_resource(&pdev->dev, res);
md                206 drivers/net/phy/mdio-mux-bcm-iproc.c 	if (IS_ERR(md->base)) {
md                208 drivers/net/phy/mdio-mux-bcm-iproc.c 		return PTR_ERR(md->base);
md                211 drivers/net/phy/mdio-mux-bcm-iproc.c 	md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
md                212 drivers/net/phy/mdio-mux-bcm-iproc.c 	if (!md->mii_bus) {
md                217 drivers/net/phy/mdio-mux-bcm-iproc.c 	md->core_clk = devm_clk_get(&pdev->dev, NULL);
md                218 drivers/net/phy/mdio-mux-bcm-iproc.c 	if (md->core_clk == ERR_PTR(-ENOENT) ||
md                219 drivers/net/phy/mdio-mux-bcm-iproc.c 	    md->core_clk == ERR_PTR(-EINVAL))
md                220 drivers/net/phy/mdio-mux-bcm-iproc.c 		md->core_clk = NULL;
md                221 drivers/net/phy/mdio-mux-bcm-iproc.c 	else if (IS_ERR(md->core_clk))
md                222 drivers/net/phy/mdio-mux-bcm-iproc.c 		return PTR_ERR(md->core_clk);
md                224 drivers/net/phy/mdio-mux-bcm-iproc.c 	rc = clk_prepare_enable(md->core_clk);
md                230 drivers/net/phy/mdio-mux-bcm-iproc.c 	bus = md->mii_bus;
md                231 drivers/net/phy/mdio-mux-bcm-iproc.c 	bus->priv = md;
md                246 drivers/net/phy/mdio-mux-bcm-iproc.c 	platform_set_drvdata(pdev, md);
md                248 drivers/net/phy/mdio-mux-bcm-iproc.c 	rc = mdio_mux_init(md->dev, md->dev->of_node, mdio_mux_iproc_switch_fn,
md                249 drivers/net/phy/mdio-mux-bcm-iproc.c 			   &md->mux_handle, md, md->mii_bus);
md                251 drivers/net/phy/mdio-mux-bcm-iproc.c 		dev_info(md->dev, "mdiomux initialization failed\n");
md                255 drivers/net/phy/mdio-mux-bcm-iproc.c 	mdio_mux_iproc_config(md);
md                257 drivers/net/phy/mdio-mux-bcm-iproc.c 	dev_info(md->dev, "iProc mdiomux registered\n");
md                263 drivers/net/phy/mdio-mux-bcm-iproc.c 	clk_disable_unprepare(md->core_clk);
md                269 drivers/net/phy/mdio-mux-bcm-iproc.c 	struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
md                271 drivers/net/phy/mdio-mux-bcm-iproc.c 	mdio_mux_uninit(md->mux_handle);
md                272 drivers/net/phy/mdio-mux-bcm-iproc.c 	mdiobus_unregister(md->mii_bus);
md                273 drivers/net/phy/mdio-mux-bcm-iproc.c 	clk_disable_unprepare(md->core_clk);
md                281 drivers/net/phy/mdio-mux-bcm-iproc.c 	struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
md                283 drivers/net/phy/mdio-mux-bcm-iproc.c 	clk_disable_unprepare(md->core_clk);
md                290 drivers/net/phy/mdio-mux-bcm-iproc.c 	struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
md                293 drivers/net/phy/mdio-mux-bcm-iproc.c 	rc = clk_prepare_enable(md->core_clk);
md                295 drivers/net/phy/mdio-mux-bcm-iproc.c 		dev_err(md->dev, "failed to enable core clk\n");
md                298 drivers/net/phy/mdio-mux-bcm-iproc.c 	mdio_mux_iproc_config(md);
md               1535 drivers/net/vxlan.c 				struct vxlan_metadata *md)
md               1543 drivers/net/vxlan.c 	md->gbp = ntohs(gbp->policy_id);
md               1548 drivers/net/vxlan.c 		tun_dst->u.tun_info.options_len = sizeof(*md);
md               1551 drivers/net/vxlan.c 		md->gbp |= VXLAN_GBP_DONT_LEARN;
md               1554 drivers/net/vxlan.c 		md->gbp |= VXLAN_GBP_POLICY_APPLIED;
md               1558 drivers/net/vxlan.c 		skb->mark = md->gbp;
md               1657 drivers/net/vxlan.c 	struct vxlan_metadata *md = &_md;
md               1706 drivers/net/vxlan.c 					 key32_to_tunnel_id(vni), sizeof(*md));
md               1711 drivers/net/vxlan.c 		md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
md               1715 drivers/net/vxlan.c 		memset(md, 0, sizeof(*md));
md               1722 drivers/net/vxlan.c 		vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
md               2123 drivers/net/vxlan.c 				struct vxlan_metadata *md)
md               2127 drivers/net/vxlan.c 	if (!md->gbp)
md               2133 drivers/net/vxlan.c 	if (md->gbp & VXLAN_GBP_DONT_LEARN)
md               2136 drivers/net/vxlan.c 	if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
md               2139 drivers/net/vxlan.c 	gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
md               2156 drivers/net/vxlan.c 			   struct vxlan_metadata *md, u32 vxflags,
md               2206 drivers/net/vxlan.c 		vxlan_build_gbp_hdr(vxh, vxflags, md);
md               2426 drivers/net/vxlan.c 	struct vxlan_metadata *md = &_md;
md               2455 drivers/net/vxlan.c 		md->gbp = skb->mark;
md               2493 drivers/net/vxlan.c 			if (info->options_len < sizeof(*md))
md               2495 drivers/net/vxlan.c 			md = ip_tunnel_info_opts(info);
md               2552 drivers/net/vxlan.c 				      vni, md, flags, udp_sum);
md               2593 drivers/net/vxlan.c 				      vni, md, flags, udp_sum);
md                334 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
md                342 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	md->sg_len = sg_cnt;
md                343 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	md->blocks = req_sz / func_blk_sz;
md                345 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	mc->arg |= md->blocks & 0x1FF;	/* block count */
md                350 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	mmc_set_data_timeout(md, func->card);
md                353 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c 	ret = mc->error ? mc->error : md->error;
md                929 drivers/net/wireless/mac80211_hwsim.c 	struct mac80211_hwsim_addr_match_data *md = data;
md                931 drivers/net/wireless/mac80211_hwsim.c 	if (memcmp(mac, md->addr, ETH_ALEN) == 0)
md                932 drivers/net/wireless/mac80211_hwsim.c 		md->ret = true;
md                938 drivers/net/wireless/mac80211_hwsim.c 	struct mac80211_hwsim_addr_match_data md = {
md                945 drivers/net/wireless/mac80211_hwsim.c 	memcpy(md.addr, addr, ETH_ALEN);
md                950 drivers/net/wireless/mac80211_hwsim.c 						   &md);
md                952 drivers/net/wireless/mac80211_hwsim.c 	return md.ret;
md                129 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h 	u32 md:1;
md                187 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h 	u32 md:1;
md                323 drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h 	u32 md:1;
md                123 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md;
md                199 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev	*md;
md                261 drivers/rapidio/devices/rio_mport_cdev.c 	struct rio_mport *mport = priv->md->mport;
md                306 drivers/rapidio/devices/rio_mport_cdev.c 	struct rio_mport *mport = priv->md->mport;
md                359 drivers/rapidio/devices/rio_mport_cdev.c rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
md                363 drivers/rapidio/devices/rio_mport_cdev.c 	struct rio_mport *mport = md->mport;
md                383 drivers/rapidio/devices/rio_mport_cdev.c 	map->md = md;
md                385 drivers/rapidio/devices/rio_mport_cdev.c 	list_add_tail(&map->node, &md->mappings);
md                393 drivers/rapidio/devices/rio_mport_cdev.c rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
md                400 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md                401 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry(map, &md->mappings, node) {
md                419 drivers/rapidio/devices/rio_mport_cdev.c 		err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
md                421 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md                428 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *data = priv->md;
md                462 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md                466 drivers/rapidio/devices/rio_mport_cdev.c 	if (!md->mport->ops->unmap_outb)
md                474 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md                475 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
md                485 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md                497 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md                503 drivers/rapidio/devices/rio_mport_cdev.c 	md->mport->host_deviceid = hdid;
md                504 drivers/rapidio/devices/rio_mport_cdev.c 	md->properties.hdid = hdid;
md                505 drivers/rapidio/devices/rio_mport_cdev.c 	rio_local_set_device_id(md->mport, hdid);
md                519 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md                525 drivers/rapidio/devices/rio_mport_cdev.c 	rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
md                553 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md =
md                556 drivers/rapidio/devices/rio_mport_cdev.c 	rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
md                557 drivers/rapidio/devices/rio_mport_cdev.c 	rio_release_dma(md->dma_chan);
md                558 drivers/rapidio/devices/rio_mport_cdev.c 	md->dma_chan = NULL;
md                587 drivers/rapidio/devices/rio_mport_cdev.c 		mutex_lock(&req->map->md->buf_mutex);
md                589 drivers/rapidio/devices/rio_mport_cdev.c 		mutex_unlock(&req->map->md->buf_mutex);
md                657 drivers/rapidio/devices/rio_mport_cdev.c 		priv->dmach = rio_request_mport_dma(priv->md->mport);
md                660 drivers/rapidio/devices/rio_mport_cdev.c 			if (priv->md->dma_chan) {
md                661 drivers/rapidio/devices/rio_mport_cdev.c 				priv->dmach = priv->md->dma_chan;
md                662 drivers/rapidio/devices/rio_mport_cdev.c 				kref_get(&priv->md->dma_ref);
md                668 drivers/rapidio/devices/rio_mport_cdev.c 		} else if (!priv->md->dma_chan) {
md                670 drivers/rapidio/devices/rio_mport_cdev.c 			priv->md->dma_chan = priv->dmach;
md                671 drivers/rapidio/devices/rio_mport_cdev.c 			kref_init(&priv->md->dma_ref);
md                816 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md                903 drivers/rapidio/devices/rio_mport_cdev.c 		mutex_lock(&md->buf_mutex);
md                904 drivers/rapidio/devices/rio_mport_cdev.c 		list_for_each_entry(map, &md->mappings, node) {
md                912 drivers/rapidio/devices/rio_mport_cdev.c 		mutex_unlock(&md->buf_mutex);
md                978 drivers/rapidio/devices/rio_mport_cdev.c 	     priv->md->properties.transfer_mode) == 0)
md               1087 drivers/rapidio/devices/rio_mport_cdev.c static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
md               1096 drivers/rapidio/devices/rio_mport_cdev.c 	map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
md               1106 drivers/rapidio/devices/rio_mport_cdev.c 	map->md = md;
md               1108 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md               1109 drivers/rapidio/devices/rio_mport_cdev.c 	list_add_tail(&map->node, &md->mappings);
md               1110 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md               1119 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1127 drivers/rapidio/devices/rio_mport_cdev.c 	ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
md               1134 drivers/rapidio/devices/rio_mport_cdev.c 		mutex_lock(&md->buf_mutex);
md               1136 drivers/rapidio/devices/rio_mport_cdev.c 		mutex_unlock(&md->buf_mutex);
md               1146 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1155 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md               1156 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
md               1164 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md               1200 drivers/rapidio/devices/rio_mport_cdev.c rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
md               1204 drivers/rapidio/devices/rio_mport_cdev.c 	struct rio_mport *mport = md->mport;
md               1233 drivers/rapidio/devices/rio_mport_cdev.c 	map->md = md;
md               1235 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md               1236 drivers/rapidio/devices/rio_mport_cdev.c 	list_add_tail(&map->node, &md->mappings);
md               1237 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md               1250 drivers/rapidio/devices/rio_mport_cdev.c rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
md               1260 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md               1261 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry(map, &md->mappings, node) {
md               1275 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md               1281 drivers/rapidio/devices/rio_mport_cdev.c 	return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
md               1287 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1292 drivers/rapidio/devices/rio_mport_cdev.c 	if (!md->mport->ops->map_inb)
md               1297 drivers/rapidio/devices/rio_mport_cdev.c 	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
md               1299 drivers/rapidio/devices/rio_mport_cdev.c 	ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
md               1310 drivers/rapidio/devices/rio_mport_cdev.c 			mutex_lock(&md->buf_mutex);
md               1312 drivers/rapidio/devices/rio_mport_cdev.c 			mutex_unlock(&md->buf_mutex);
md               1329 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1333 drivers/rapidio/devices/rio_mport_cdev.c 	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
md               1335 drivers/rapidio/devices/rio_mport_cdev.c 	if (!md->mport->ops->unmap_inb)
md               1341 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md               1342 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
md               1351 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md               1363 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1364 drivers/rapidio/devices/rio_mport_cdev.c 	u32 port_idx = md->mport->index;
md               1391 drivers/rapidio/devices/rio_mport_cdev.c 		dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
md               1434 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1446 drivers/rapidio/devices/rio_mport_cdev.c 	ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
md               1450 drivers/rapidio/devices/rio_mport_cdev.c 			   dev_name(&md->dev), ret);
md               1456 drivers/rapidio/devices/rio_mport_cdev.c 		rio_release_inb_dbell(md->mport, filter.low, filter.high);
md               1462 drivers/rapidio/devices/rio_mport_cdev.c 	spin_lock_irqsave(&md->db_lock, flags);
md               1464 drivers/rapidio/devices/rio_mport_cdev.c 	list_add_tail(&db_filter->data_node, &md->doorbells);
md               1465 drivers/rapidio/devices/rio_mport_cdev.c 	spin_unlock_irqrestore(&md->db_lock, flags);
md               1491 drivers/rapidio/devices/rio_mport_cdev.c 	spin_lock_irqsave(&priv->md->db_lock, flags);
md               1501 drivers/rapidio/devices/rio_mport_cdev.c 	spin_unlock_irqrestore(&priv->md->db_lock, flags);
md               1504 drivers/rapidio/devices/rio_mport_cdev.c 		rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
md               1521 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = context;
md               1531 drivers/rapidio/devices/rio_mport_cdev.c 	spin_lock(&md->pw_lock);
md               1532 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry(pw_filter, &md->portwrites, md_node) {
md               1539 drivers/rapidio/devices/rio_mport_cdev.c 	spin_unlock(&md->pw_lock);
md               1553 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1568 drivers/rapidio/devices/rio_mport_cdev.c 	spin_lock_irqsave(&md->pw_lock, flags);
md               1569 drivers/rapidio/devices/rio_mport_cdev.c 	if (list_empty(&md->portwrites))
md               1572 drivers/rapidio/devices/rio_mport_cdev.c 	list_add_tail(&pw_filter->md_node, &md->portwrites);
md               1573 drivers/rapidio/devices/rio_mport_cdev.c 	spin_unlock_irqrestore(&md->pw_lock, flags);
md               1578 drivers/rapidio/devices/rio_mport_cdev.c 		ret = rio_add_mport_pw_handler(md->mport, md,
md               1581 drivers/rapidio/devices/rio_mport_cdev.c 			dev_err(&md->dev,
md               1586 drivers/rapidio/devices/rio_mport_cdev.c 		rio_pw_enable(md->mport, 1);
md               1610 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1620 drivers/rapidio/devices/rio_mport_cdev.c 	spin_lock_irqsave(&md->pw_lock, flags);
md               1629 drivers/rapidio/devices/rio_mport_cdev.c 	if (list_empty(&md->portwrites))
md               1631 drivers/rapidio/devices/rio_mport_cdev.c 	spin_unlock_irqrestore(&md->pw_lock, flags);
md               1634 drivers/rapidio/devices/rio_mport_cdev.c 		rio_del_mport_pw_handler(md->mport, priv->md,
md               1636 drivers/rapidio/devices/rio_mport_cdev.c 		rio_pw_enable(md->mport, 0);
md               1680 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = priv->md;
md               1705 drivers/rapidio/devices/rio_mport_cdev.c 	mport = md->mport;
md               1828 drivers/rapidio/devices/rio_mport_cdev.c 	mport = priv->md->mport;
md               1901 drivers/rapidio/devices/rio_mport_cdev.c 	priv->md = chdev;
md               1945 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md;
md               1959 drivers/rapidio/devices/rio_mport_cdev.c 	md = priv->md;
md               1988 drivers/rapidio/devices/rio_mport_cdev.c 	if (priv->dmach != priv->md->dma_chan) {
md               1994 drivers/rapidio/devices/rio_mport_cdev.c 		kref_put(&md->dma_ref, mport_release_def_dma);
md               2015 drivers/rapidio/devices/rio_mport_cdev.c 	rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
md               2017 drivers/rapidio/devices/rio_mport_cdev.c 	chdev = priv->md;
md               2067 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md = data->md;
md               2069 drivers/rapidio/devices/rio_mport_cdev.c 	if (atomic_read(&md->active) == 0)
md               2088 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.hdid = md->mport->host_deviceid;
md               2089 drivers/rapidio/devices/rio_mport_cdev.c 		if (copy_to_user((void __user *)arg, &(md->properties),
md               2090 drivers/rapidio/devices/rio_mport_cdev.c 				 sizeof(md->properties)))
md               2146 drivers/rapidio/devices/rio_mport_cdev.c 	struct rio_mport *mport = map->md->mport;
md               2182 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&map->md->buf_mutex);
md               2184 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&map->md->buf_mutex);
md               2195 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md;
md               2205 drivers/rapidio/devices/rio_mport_cdev.c 	md = priv->md;
md               2208 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md               2209 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry(map, &md->mappings, node) {
md               2216 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md               2230 drivers/rapidio/devices/rio_mport_cdev.c 		ret = dma_mmap_coherent(md->mport->dev.parent, vma,
md               2299 drivers/rapidio/devices/rio_mport_cdev.c 	struct rio_mport *mport = priv->md->mport;
md               2348 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md;
md               2351 drivers/rapidio/devices/rio_mport_cdev.c 	md = container_of(dev, struct mport_dev, dev);
md               2352 drivers/rapidio/devices/rio_mport_cdev.c 	kfree(md);
md               2362 drivers/rapidio/devices/rio_mport_cdev.c 	struct mport_dev *md;
md               2365 drivers/rapidio/devices/rio_mport_cdev.c 	md = kzalloc(sizeof(*md), GFP_KERNEL);
md               2366 drivers/rapidio/devices/rio_mport_cdev.c 	if (!md) {
md               2371 drivers/rapidio/devices/rio_mport_cdev.c 	md->mport = mport;
md               2372 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_init(&md->buf_mutex);
md               2373 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_init(&md->file_mutex);
md               2374 drivers/rapidio/devices/rio_mport_cdev.c 	INIT_LIST_HEAD(&md->file_list);
md               2376 drivers/rapidio/devices/rio_mport_cdev.c 	device_initialize(&md->dev);
md               2377 drivers/rapidio/devices/rio_mport_cdev.c 	md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
md               2378 drivers/rapidio/devices/rio_mport_cdev.c 	md->dev.class = dev_class;
md               2379 drivers/rapidio/devices/rio_mport_cdev.c 	md->dev.parent = &mport->dev;
md               2380 drivers/rapidio/devices/rio_mport_cdev.c 	md->dev.release = mport_device_release;
md               2381 drivers/rapidio/devices/rio_mport_cdev.c 	dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
md               2382 drivers/rapidio/devices/rio_mport_cdev.c 	atomic_set(&md->active, 1);
md               2384 drivers/rapidio/devices/rio_mport_cdev.c 	cdev_init(&md->cdev, &mport_fops);
md               2385 drivers/rapidio/devices/rio_mport_cdev.c 	md->cdev.owner = THIS_MODULE;
md               2387 drivers/rapidio/devices/rio_mport_cdev.c 	ret = cdev_device_add(&md->cdev, &md->dev);
md               2394 drivers/rapidio/devices/rio_mport_cdev.c 	INIT_LIST_HEAD(&md->doorbells);
md               2395 drivers/rapidio/devices/rio_mport_cdev.c 	spin_lock_init(&md->db_lock);
md               2396 drivers/rapidio/devices/rio_mport_cdev.c 	INIT_LIST_HEAD(&md->portwrites);
md               2397 drivers/rapidio/devices/rio_mport_cdev.c 	spin_lock_init(&md->pw_lock);
md               2398 drivers/rapidio/devices/rio_mport_cdev.c 	INIT_LIST_HEAD(&md->mappings);
md               2400 drivers/rapidio/devices/rio_mport_cdev.c 	md->properties.id = mport->id;
md               2401 drivers/rapidio/devices/rio_mport_cdev.c 	md->properties.sys_size = mport->sys_size;
md               2402 drivers/rapidio/devices/rio_mport_cdev.c 	md->properties.hdid = mport->host_deviceid;
md               2403 drivers/rapidio/devices/rio_mport_cdev.c 	md->properties.index = mport->index;
md               2409 drivers/rapidio/devices/rio_mport_cdev.c 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
md               2411 drivers/rapidio/devices/rio_mport_cdev.c 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
md               2415 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.flags = attr.flags;
md               2416 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.link_speed = attr.link_speed;
md               2417 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.link_width = attr.link_width;
md               2418 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.dma_max_sge = attr.dma_max_sge;
md               2419 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.dma_max_size = attr.dma_max_size;
md               2420 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.dma_align = attr.dma_align;
md               2421 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.cap_sys_size = 0;
md               2422 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.cap_transfer_mode = 0;
md               2423 drivers/rapidio/devices/rio_mport_cdev.c 		md->properties.cap_addr_size = 0;
md               2429 drivers/rapidio/devices/rio_mport_cdev.c 	list_add_tail(&md->node, &mport_devs);
md               2435 drivers/rapidio/devices/rio_mport_cdev.c 	return md;
md               2438 drivers/rapidio/devices/rio_mport_cdev.c 	put_device(&md->dev);
md               2446 drivers/rapidio/devices/rio_mport_cdev.c static void mport_cdev_terminate_dma(struct mport_dev *md)
md               2451 drivers/rapidio/devices/rio_mport_cdev.c 	rmcd_debug(DMA, "%s", dev_name(&md->dev));
md               2453 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->file_mutex);
md               2454 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry(client, &md->file_list, list) {
md               2460 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->file_mutex);
md               2462 drivers/rapidio/devices/rio_mport_cdev.c 	if (md->dma_chan) {
md               2463 drivers/rapidio/devices/rio_mport_cdev.c 		dmaengine_terminate_all(md->dma_chan);
md               2464 drivers/rapidio/devices/rio_mport_cdev.c 		rio_release_dma(md->dma_chan);
md               2465 drivers/rapidio/devices/rio_mport_cdev.c 		md->dma_chan = NULL;
md               2475 drivers/rapidio/devices/rio_mport_cdev.c static int mport_cdev_kill_fasync(struct mport_dev *md)
md               2480 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->file_mutex);
md               2481 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry(client, &md->file_list, list) {
md               2486 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->file_mutex);
md               2494 drivers/rapidio/devices/rio_mport_cdev.c static void mport_cdev_remove(struct mport_dev *md)
md               2498 drivers/rapidio/devices/rio_mport_cdev.c 	rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
md               2499 drivers/rapidio/devices/rio_mport_cdev.c 	atomic_set(&md->active, 0);
md               2500 drivers/rapidio/devices/rio_mport_cdev.c 	mport_cdev_terminate_dma(md);
md               2501 drivers/rapidio/devices/rio_mport_cdev.c 	rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
md               2502 drivers/rapidio/devices/rio_mport_cdev.c 	cdev_device_del(&md->cdev, &md->dev);
md               2503 drivers/rapidio/devices/rio_mport_cdev.c 	mport_cdev_kill_fasync(md);
md               2513 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_lock(&md->buf_mutex);
md               2514 drivers/rapidio/devices/rio_mport_cdev.c 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
md               2517 drivers/rapidio/devices/rio_mport_cdev.c 	mutex_unlock(&md->buf_mutex);
md               2519 drivers/rapidio/devices/rio_mport_cdev.c 	if (!list_empty(&md->mappings))
md               2521 drivers/rapidio/devices/rio_mport_cdev.c 			  md->mport->name);
md               2523 drivers/rapidio/devices/rio_mport_cdev.c 	rio_release_inb_dbell(md->mport, 0, 0x0fff);
md               2525 drivers/rapidio/devices/rio_mport_cdev.c 	put_device(&md->dev);
md                989 drivers/s390/char/tape_3590.c 			sense->fmt.f70.md);
md               1000 drivers/s390/char/tape_3590.c 			"procedure %i", sense->fmt.f70.md);
md               1043 drivers/s390/char/tape_3590.c 			"interface 0x%02x", sense->fmt.f71.md[0]);
md               1047 drivers/s390/char/tape_3590.c 			"0x%02x", sense->fmt.f71.md[0]);
md               1051 drivers/s390/char/tape_3590.c 			"0x%02x", sense->fmt.f71.md[0]);
md               1055 drivers/s390/char/tape_3590.c 			sense->fmt.f71.md[0]);
md               1059 drivers/s390/char/tape_3590.c 			"0x%02x", sense->fmt.f71.md[0]);
md               1077 drivers/s390/char/tape_3590.c 				"0x%x on CU", sense->fmt.f71.md[1]);
md               1080 drivers/s390/char/tape_3590.c 				"nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1],
md               1081 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[2]);
md               1087 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1]);
md               1091 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
md               1096 drivers/s390/char/tape_3590.c 				" path 0x%x on CU", sense->fmt.f71.md[1]);
md               1100 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
md               1106 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1]);
md               1110 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
md               1154 drivers/s390/char/tape_3590.c 			"interface 0x%02x", sense->fmt.f71.md[0]);
md               1158 drivers/s390/char/tape_3590.c 			sense->fmt.f71.md[0]);
md               1162 drivers/s390/char/tape_3590.c 			" 0x%02x", sense->fmt.f71.md[0]);
md               1187 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1]);
md               1191 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
md               1196 drivers/s390/char/tape_3590.c 				"interface 0x%x on DV", sense->fmt.f71.md[1]);
md               1200 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
md               1205 drivers/s390/char/tape_3590.c 				" 0x%x on DV", sense->fmt.f71.md[1]);
md               1209 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
md               1218 drivers/s390/char/tape_3590.c 				sense->fmt.f71.md[1]);
md               1222 drivers/s390/char/tape_3590.c 				 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
md                 97 drivers/s390/char/tape_3590.h 			unsigned int md:8;
md                110 drivers/s390/char/tape_3590.h 			unsigned char md[3];
md               1298 drivers/scsi/ibmvscsi/ibmvfc.c 			       struct srp_direct_buf *md)
md               1304 drivers/scsi/ibmvscsi/ibmvfc.c 		md[i].va = cpu_to_be64(sg_dma_address(sg));
md               1305 drivers/scsi/ibmvscsi/ibmvfc.c 		md[i].len = cpu_to_be32(sg_dma_len(sg));
md               1306 drivers/scsi/ibmvscsi/ibmvfc.c 		md[i].key = 0;
md                653 drivers/scsi/ibmvscsi/ibmvscsi.c 		       struct srp_direct_buf *md)
md                660 drivers/scsi/ibmvscsi/ibmvscsi.c 		struct srp_direct_buf *descr = md + i;
md               3204 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 			  int nsg, struct srp_direct_buf *md, int nmd,
md               3234 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 			client_ioba = be64_to_cpu(md[md_idx].va);
md               3235 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 			client_len = be32_to_cpu(md[md_idx].len);
md                161 drivers/scsi/ibmvscsi_tgt/libsrp.c static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
md                179 drivers/scsi/ibmvscsi_tgt/libsrp.c 		len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
md                181 drivers/scsi/ibmvscsi_tgt/libsrp.c 		len = be32_to_cpu(md->len);
md                184 drivers/scsi/ibmvscsi_tgt/libsrp.c 	err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
md                198 drivers/scsi/ibmvscsi_tgt/libsrp.c 	struct srp_direct_buf *md = NULL;
md                213 drivers/scsi/ibmvscsi_tgt/libsrp.c 		md = &id->desc_list[0];
md                218 drivers/scsi/ibmvscsi_tgt/libsrp.c 		md = dma_alloc_coherent(iue->target->dev,
md                221 drivers/scsi/ibmvscsi_tgt/libsrp.c 		if (!md) {
md                227 drivers/scsi/ibmvscsi_tgt/libsrp.c 		sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
md                256 drivers/scsi/ibmvscsi_tgt/libsrp.c 	err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
md                264 drivers/scsi/ibmvscsi_tgt/libsrp.c 				  be32_to_cpu(id->table_desc.len), md, token);
md                298 drivers/scsi/ibmvscsi_tgt/libsrp.c 	struct srp_direct_buf *md;
md                322 drivers/scsi/ibmvscsi_tgt/libsrp.c 		md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
md                323 drivers/scsi/ibmvscsi_tgt/libsrp.c 		err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
md                340 drivers/scsi/ibmvscsi_tgt/libsrp.c 	struct srp_direct_buf *md;
md                357 drivers/scsi/ibmvscsi_tgt/libsrp.c 		md = (struct srp_direct_buf *)(cmd->add_data + offset);
md                358 drivers/scsi/ibmvscsi_tgt/libsrp.c 		len = be32_to_cpu(md->len);
md                306 drivers/scsi/mesh.c 	volatile struct dbdma_regs __iomem *md = ms->dma;
md                311 drivers/scsi/mesh.c 	       ms, mr, md);
md                321 drivers/scsi/mesh.c 	       in_le32(&md->status), in_le32(&md->cmdptr));
md                361 drivers/scsi/mesh.c 	volatile struct dbdma_regs __iomem *md = ms->dma;
md                367 drivers/scsi/mesh.c 	out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16);	/* stop dma */
md                683 drivers/scsi/mesh.c 	volatile struct dbdma_regs __iomem *md = ms->dma;
md                787 drivers/scsi/mesh.c 			out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds));
md                788 drivers/scsi/mesh.c 			out_le32(&md->control, (RUN << 16) | RUN);
md               1321 drivers/scsi/mesh.c 	volatile struct dbdma_regs __iomem *md = ms->dma;
md               1330 drivers/scsi/mesh.c 		       && (in_le32(&md->status) & ACTIVE) != 0) {
md               1335 drivers/scsi/mesh.c 	out_le32(&md->control, RUN << 16);	/* turn off RUN bit */
md               1708 drivers/scsi/mesh.c 	volatile struct dbdma_regs __iomem *md = ms->dma;
md               1716 drivers/scsi/mesh.c 	out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16);	/* stop dma */
md               1715 drivers/scsi/sg.c 	struct rq_map_data *md, map_data;
md               1764 drivers/scsi/sg.c 		md = NULL;
md               1766 drivers/scsi/sg.c 		md = &map_data;
md               1768 drivers/scsi/sg.c 	if (md) {
md               1789 drivers/scsi/sg.c 		md->pages = req_schp->pages;
md               1790 drivers/scsi/sg.c 		md->page_order = req_schp->page_order;
md               1791 drivers/scsi/sg.c 		md->nr_entries = req_schp->k_use_sg;
md               1792 drivers/scsi/sg.c 		md->offset = 0;
md               1793 drivers/scsi/sg.c 		md->null_mapped = hp->dxferp ? 0 : 1;
md               1795 drivers/scsi/sg.c 			md->from_user = 1;
md               1797 drivers/scsi/sg.c 			md->from_user = 0;
md               1814 drivers/scsi/sg.c 		res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
md               1817 drivers/scsi/sg.c 		res = blk_rq_map_user(q, rq, md, hp->dxferp,
md               1823 drivers/scsi/sg.c 		if (!md) {
md                 75 drivers/soc/xilinx/zynqmp_power.c 	int md;
md                 77 drivers/soc/xilinx/zynqmp_power.c 	for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
md                 78 drivers/soc/xilinx/zynqmp_power.c 		if (suspend_modes[md]) {
md                 79 drivers/soc/xilinx/zynqmp_power.c 			if (md == suspend_mode)
md                 80 drivers/soc/xilinx/zynqmp_power.c 				s += sprintf(s, "[%s] ", suspend_modes[md]);
md                 82 drivers/soc/xilinx/zynqmp_power.c 				s += sprintf(s, "%s ", suspend_modes[md]);
md                 95 drivers/soc/xilinx/zynqmp_power.c 	int md, ret = -EINVAL;
md                100 drivers/soc/xilinx/zynqmp_power.c 	for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
md                101 drivers/soc/xilinx/zynqmp_power.c 		if (suspend_modes[md] &&
md                102 drivers/soc/xilinx/zynqmp_power.c 		    sysfs_streq(suspend_modes[md], buf)) {
md                107 drivers/soc/xilinx/zynqmp_power.c 	if (!ret && md != suspend_mode) {
md                108 drivers/soc/xilinx/zynqmp_power.c 		ret = eemi_ops->set_suspend_mode(md);
md                110 drivers/soc/xilinx/zynqmp_power.c 			suspend_mode = md;
md                 38 drivers/staging/media/imx/imx-media-capture.c 	struct imx_media_dev  *md;
md                572 drivers/staging/media/imx/imx-media-capture.c 	ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
md                605 drivers/staging/media/imx/imx-media-capture.c 	ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
md                741 drivers/staging/media/imx/imx-media-capture.c 	priv->md = container_of(v4l2_dev->mdev, struct imx_media_dev, md);
md                808 drivers/staging/media/imx/imx-media-capture.c 	imx_media_add_video_device(priv->md, vdev);
md                 39 drivers/staging/media/imx/imx-media-csc-scaler.c 	struct imx_media_dev		*md;
md                504 drivers/staging/media/imx/imx-media-csc-scaler.c 	struct ipu_soc *ipu = priv->md->ipu[0];
md                850 drivers/staging/media/imx/imx-media-csc-scaler.c 	vfd->v4l2_dev = &priv->md->v4l2_dev;
md                877 drivers/staging/media/imx/imx-media-csc-scaler.c imx_media_csc_scaler_device_init(struct imx_media_dev *md)
md                887 drivers/staging/media/imx/imx-media-csc-scaler.c 	priv->md = md;
md                888 drivers/staging/media/imx/imx-media-csc-scaler.c 	priv->dev = md->md.dev;
md                909 drivers/staging/media/imx/imx-media-csc-scaler.c 		v4l2_err(&md->v4l2_dev, "Failed to init mem2mem device: %d\n",
md                108 drivers/staging/media/imx/imx-media-dev-common.c 	dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
md                111 drivers/staging/media/imx/imx-media-dev-common.c 	pad_vdev = devm_kzalloc(imxmd->md.dev, sizeof(*pad_vdev), GFP_KERNEL);
md                153 drivers/staging/media/imx/imx-media-dev-common.c 		vdev_lists = devm_kcalloc(imxmd->md.dev,
md                213 drivers/staging/media/imx/imx-media-dev-common.c 	return media_device_register(&imxmd->md);
md                230 drivers/staging/media/imx/imx-media-dev-common.c 		dev_dbg(imxmd->md.dev,
md                264 drivers/staging/media/imx/imx-media-dev-common.c 						   struct imx_media_dev, md);
md                300 drivers/staging/media/imx/imx-media-dev-common.c 			dev_dbg(imxmd->md.dev,
md                310 drivers/staging/media/imx/imx-media-dev-common.c 			dev_dbg(imxmd->md.dev,
md                366 drivers/staging/media/imx/imx-media-dev-common.c 	strscpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
md                367 drivers/staging/media/imx/imx-media-dev-common.c 	imxmd->md.ops = ops ? ops : &imx_media_md_ops;
md                368 drivers/staging/media/imx/imx-media-dev-common.c 	imxmd->md.dev = dev;
md                372 drivers/staging/media/imx/imx-media-dev-common.c 	imxmd->v4l2_dev.mdev = &imxmd->md;
md                377 drivers/staging/media/imx/imx-media-dev-common.c 	media_device_init(&imxmd->md);
md                393 drivers/staging/media/imx/imx-media-dev-common.c 	media_device_cleanup(&imxmd->md);
md                 98 drivers/staging/media/imx/imx-media-dev.c 	media_device_cleanup(&imxmd->md);
md                114 drivers/staging/media/imx/imx-media-dev.c 	media_device_unregister(&imxmd->md);
md                116 drivers/staging/media/imx/imx-media-dev.c 	media_device_cleanup(&imxmd->md);
md                 26 drivers/staging/media/imx/imx-media-of.c 		dev_dbg(imxmd->md.dev, "%s: %pOFn not enabled\n", __func__,
md                 38 drivers/staging/media/imx/imx-media-of.c 			dev_dbg(imxmd->md.dev, "%s: already added %pOFn\n",
md                925 drivers/staging/media/imx/imx-media-utils.c 	mutex_lock(&imxmd->md.graph_mutex);
md                941 drivers/staging/media/imx/imx-media-utils.c 	mutex_unlock(&imxmd->md.graph_mutex);
md                128 drivers/staging/media/imx/imx-media.h 	struct media_device md;
md               1283 drivers/staging/media/imx/imx7-media-csi.c 	media_device_unregister(&imxmd->md);
md               1284 drivers/staging/media/imx/imx7-media-csi.c 	media_device_cleanup(&imxmd->md);
md               1301 drivers/staging/media/imx/imx7-media-csi.c 	media_device_unregister(&imxmd->md);
md               1303 drivers/staging/media/imx/imx7-media-csi.c 	media_device_cleanup(&imxmd->md);
md                 64 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c 		pattrib->mdata = (u8)prxreport->md;
md                 43 drivers/staging/rtl8723bs/include/rtl8723b_recv.h 	u32 md:1;
md                783 drivers/usb/gadget/function/f_uvc.c 	struct uvc_color_matching_descriptor *md;
md                831 drivers/usb/gadget/function/f_uvc.c 	md = &opts->uvc_color_matching;
md                832 drivers/usb/gadget/function/f_uvc.c 	md->bLength			= UVC_DT_COLOR_MATCHING_SIZE;
md                833 drivers/usb/gadget/function/f_uvc.c 	md->bDescriptorType		= USB_DT_CS_INTERFACE;
md                834 drivers/usb/gadget/function/f_uvc.c 	md->bDescriptorSubType		= UVC_VS_COLORFORMAT;
md                835 drivers/usb/gadget/function/f_uvc.c 	md->bColorPrimaries		= 1;
md                836 drivers/usb/gadget/function/f_uvc.c 	md->bTransferCharacteristics	= 1;
md                837 drivers/usb/gadget/function/f_uvc.c 	md->bMatrixCoefficients		= 4;
md               1148 drivers/video/fbdev/core/modedb.c 	const struct fb_videomode *m, *m1 = NULL, *md = NULL, *best = NULL;
md               1165 drivers/video/fbdev/core/modedb.c  			md = m;
md               1172 drivers/video/fbdev/core/modedb.c 		best = md;
md               1191 drivers/video/fbdev/core/modedb.c 	if (md) {
md               1192 drivers/video/fbdev/core/modedb.c 		best = md;
md                348 drivers/video/fbdev/efifb.c 	efi_memory_desc_t md;
md                457 drivers/video/fbdev/efifb.c 	    !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
md                459 drivers/video/fbdev/efifb.c 		    (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
md                469 drivers/video/fbdev/efifb.c 		md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC |
md                471 drivers/video/fbdev/efifb.c 		if (md.attribute) {
md                473 drivers/video/fbdev/efifb.c 			mem_flags &= md.attribute;
md                142 drivers/video/fbdev/matrox/matroxfb_g450.c static int g450_query_ctrl(void* md, struct v4l2_queryctrl *p) {
md                163 drivers/video/fbdev/matrox/matroxfb_g450.c static int g450_set_ctrl(void* md, struct v4l2_control *p) {
md                165 drivers/video/fbdev/matrox/matroxfb_g450.c 	struct matrox_fb_info *minfo = md;
md                217 drivers/video/fbdev/matrox/matroxfb_g450.c static int g450_get_ctrl(void* md, struct v4l2_control *p) {
md                219 drivers/video/fbdev/matrox/matroxfb_g450.c 	struct matrox_fb_info *minfo = md;
md                521 drivers/video/fbdev/matrox/matroxfb_g450.c static int matroxfb_g450_compute(void* md, struct my_timming* mt) {
md                522 drivers/video/fbdev/matrox/matroxfb_g450.c 	struct matrox_fb_info *minfo = md;
md                559 drivers/video/fbdev/matrox/matroxfb_g450.c static int matroxfb_g450_program(void* md) {
md                560 drivers/video/fbdev/matrox/matroxfb_g450.c 	struct matrox_fb_info *minfo = md;
md                568 drivers/video/fbdev/matrox/matroxfb_g450.c static int matroxfb_g450_verify_mode(void* md, u_int32_t arg) {
md                578 drivers/video/fbdev/matrox/matroxfb_g450.c static int g450_dvi_compute(void* md, struct my_timming* mt) {
md                579 drivers/video/fbdev/matrox/matroxfb_g450.c 	struct matrox_fb_info *minfo = md;
md                135 drivers/video/fbdev/matrox/matroxfb_maven.c static int* get_ctrl_ptr(struct maven_data* md, int idx) {
md                136 drivers/video/fbdev/matrox/matroxfb_maven.c 	return (int*)((char*)(md->primary_head) + maven_controls[idx].control);
md                340 drivers/video/fbdev/matrox/matroxfb_maven.c static unsigned char maven_compute_deflicker (const struct maven_data* md) {
md                343 drivers/video/fbdev/matrox/matroxfb_maven.c 	df = (md->version == MGATVO_B?0x40:0x00);
md                344 drivers/video/fbdev/matrox/matroxfb_maven.c 	switch (md->primary_head->altout.tvo_params.deflicker) {
md                358 drivers/video/fbdev/matrox/matroxfb_maven.c static void maven_compute_bwlevel (const struct maven_data* md,
md                360 drivers/video/fbdev/matrox/matroxfb_maven.c 	const int b = md->primary_head->altout.tvo_params.brightness + BLMIN;
md                361 drivers/video/fbdev/matrox/matroxfb_maven.c 	const int c = md->primary_head->altout.tvo_params.contrast;
md                367 drivers/video/fbdev/matrox/matroxfb_maven.c static const struct maven_gamma* maven_compute_gamma (const struct maven_data* md) {
md                368 drivers/video/fbdev/matrox/matroxfb_maven.c  	return maven_gamma + md->primary_head->altout.tvo_params.gamma;
md                372 drivers/video/fbdev/matrox/matroxfb_maven.c static void maven_init_TVdata(const struct maven_data* md, struct mavenregs* data) {
md                477 drivers/video/fbdev/matrox/matroxfb_maven.c 	struct matrox_fb_info *minfo = md->primary_head;
md                485 drivers/video/fbdev/matrox/matroxfb_maven.c 	data->regs[0x93] = maven_compute_deflicker(md);
md                490 drivers/video/fbdev/matrox/matroxfb_maven.c 		g = maven_compute_gamma(md);
md                505 drivers/video/fbdev/matrox/matroxfb_maven.c 		maven_compute_bwlevel (md, &bl, &wl);
md                755 drivers/video/fbdev/matrox/matroxfb_maven.c static inline int maven_compute_timming(struct maven_data* md,
md                760 drivers/video/fbdev/matrox/matroxfb_maven.c 	struct matrox_fb_info *minfo = md->primary_head;
md                770 drivers/video/fbdev/matrox/matroxfb_maven.c 		maven_init_TVdata(md, m);
md                807 drivers/video/fbdev/matrox/matroxfb_maven.c 		if (md->version == MGATVO_B) {
md                989 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_program_timming(struct maven_data* md,
md                991 drivers/video/fbdev/matrox/matroxfb_maven.c 	struct i2c_client *c = md->client;
md               1027 drivers/video/fbdev/matrox/matroxfb_maven.c static inline int maven_resync(struct maven_data* md) {
md               1028 drivers/video/fbdev/matrox/matroxfb_maven.c 	struct i2c_client *c = md->client;
md               1033 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_get_queryctrl (struct maven_data* md, 
md               1055 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_set_control (struct maven_data* md, 
md               1065 drivers/video/fbdev/matrox/matroxfb_maven.c 	if (p->value == *get_ctrl_ptr(md, i)) return 0;
md               1076 drivers/video/fbdev/matrox/matroxfb_maven.c 	*get_ctrl_ptr(md, i) = p->value;
md               1083 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_compute_bwlevel(md, &blacklevel, &whitelevel);
md               1086 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg_pair(md->client, 0x0e, blacklevel);
md               1087 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg_pair(md->client, 0x1e, whitelevel);
md               1092 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x20, p->value);
md               1093 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x22, p->value);
md               1098 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x25, p->value);
md               1104 drivers/video/fbdev/matrox/matroxfb_maven.c 		  g = maven_compute_gamma(md);
md               1105 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x83, g->reg83);
md               1106 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x84, g->reg84);
md               1107 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x85, g->reg85);
md               1108 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x86, g->reg86);
md               1109 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x87, g->reg87);
md               1110 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x88, g->reg88);
md               1111 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x89, g->reg89);
md               1112 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x8a, g->reg8a);
md               1113 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x8b, g->reg8b);
md               1119 drivers/video/fbdev/matrox/matroxfb_maven.c 			  = maven_get_reg(md->client, 0x8d);
md               1122 drivers/video/fbdev/matrox/matroxfb_maven.c 			maven_set_reg(md->client, 0x8d, val);
md               1127 drivers/video/fbdev/matrox/matroxfb_maven.c 		  maven_set_reg(md->client, 0x93, maven_compute_deflicker(md));
md               1136 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_get_control (struct maven_data* md, 
md               1142 drivers/video/fbdev/matrox/matroxfb_maven.c 	p->value = *get_ctrl_ptr(md, i);
md               1148 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_compute(void* md, struct my_timming* mt) {
md               1149 drivers/video/fbdev/matrox/matroxfb_maven.c #define mdinfo ((struct maven_data*)md)
md               1151 drivers/video/fbdev/matrox/matroxfb_maven.c 	return maven_compute_timming(md, mt, &minfo->hw.maven);
md               1156 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_program(void* md) {
md               1157 drivers/video/fbdev/matrox/matroxfb_maven.c #define mdinfo ((struct maven_data*)md)
md               1159 drivers/video/fbdev/matrox/matroxfb_maven.c 	return maven_program_timming(md, &minfo->hw.maven);
md               1164 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_start(void* md) {
md               1165 drivers/video/fbdev/matrox/matroxfb_maven.c 	return maven_resync(md);
md               1168 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_verify_mode(void* md, u_int32_t arg) {
md               1178 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_get_queryctrl(void* md, struct v4l2_queryctrl* p) {
md               1179 drivers/video/fbdev/matrox/matroxfb_maven.c         return maven_get_queryctrl(md, p);
md               1182 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_get_ctrl(void* md, struct v4l2_control* p) {
md               1183 drivers/video/fbdev/matrox/matroxfb_maven.c 	return maven_get_control(md, p);
md               1186 drivers/video/fbdev/matrox/matroxfb_maven.c static int maven_out_set_ctrl(void* md, struct v4l2_control* p) {
md               1187 drivers/video/fbdev/matrox/matroxfb_maven.c 	return maven_set_control(md, p);
md               1202 drivers/video/fbdev/matrox/matroxfb_maven.c 	struct maven_data* md = i2c_get_clientdata(clnt);
md               1207 drivers/video/fbdev/matrox/matroxfb_maven.c 	md->primary_head = minfo;
md               1208 drivers/video/fbdev/matrox/matroxfb_maven.c 	md->client = clnt;
md               1212 drivers/video/fbdev/matrox/matroxfb_maven.c 	minfo->outputs[1].data = md;
md               1216 drivers/video/fbdev/matrox/matroxfb_maven.c 		md->version = MGATVO_B;
md               1219 drivers/video/fbdev/matrox/matroxfb_maven.c 		md->version = MGATVO_C;
md               1228 drivers/video/fbdev/matrox/matroxfb_maven.c 			*get_ctrl_ptr(md, i) = maven_controls[i].desc.default_value;
md               1236 drivers/video/fbdev/matrox/matroxfb_maven.c 	struct maven_data* md = i2c_get_clientdata(clnt);
md               1238 drivers/video/fbdev/matrox/matroxfb_maven.c 	if (md->primary_head) {
md               1239 drivers/video/fbdev/matrox/matroxfb_maven.c 		struct matrox_fb_info *minfo = md->primary_head;
md               1247 drivers/video/fbdev/matrox/matroxfb_maven.c 		md->primary_head = NULL;
md                 54 drivers/video/fbdev/omap/lcd_mipid.c static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf,
md                 62 drivers/video/fbdev/omap/lcd_mipid.c 	BUG_ON(md->spi == NULL);
md                103 drivers/video/fbdev/omap/lcd_mipid.c 	r = spi_sync(md->spi, &m);
md                105 drivers/video/fbdev/omap/lcd_mipid.c 		dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
md                111 drivers/video/fbdev/omap/lcd_mipid.c static inline void mipid_cmd(struct mipid_device *md, int cmd)
md                113 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_transfer(md, cmd, NULL, 0, NULL, 0);
md                116 drivers/video/fbdev/omap/lcd_mipid.c static inline void mipid_write(struct mipid_device *md,
md                119 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_transfer(md, reg, buf, len, NULL, 0);
md                122 drivers/video/fbdev/omap/lcd_mipid.c static inline void mipid_read(struct mipid_device *md,
md                125 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_transfer(md, reg, NULL, 0, buf, len);
md                128 drivers/video/fbdev/omap/lcd_mipid.c static void set_data_lines(struct mipid_device *md, int data_lines)
md                143 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_write(md, 0x3a, (u8 *)&par, 2);
md                146 drivers/video/fbdev/omap/lcd_mipid.c static void send_init_string(struct mipid_device *md)
md                150 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_write(md, 0xc2, (u8 *)initpar, sizeof(initpar));
md                151 drivers/video/fbdev/omap/lcd_mipid.c 	set_data_lines(md, md->panel.data_lines);
md                154 drivers/video/fbdev/omap/lcd_mipid.c static void hw_guard_start(struct mipid_device *md, int guard_msec)
md                156 drivers/video/fbdev/omap/lcd_mipid.c 	md->hw_guard_wait = msecs_to_jiffies(guard_msec);
md                157 drivers/video/fbdev/omap/lcd_mipid.c 	md->hw_guard_end = jiffies + md->hw_guard_wait;
md                160 drivers/video/fbdev/omap/lcd_mipid.c static void hw_guard_wait(struct mipid_device *md)
md                162 drivers/video/fbdev/omap/lcd_mipid.c 	unsigned long wait = md->hw_guard_end - jiffies;
md                164 drivers/video/fbdev/omap/lcd_mipid.c 	if ((long)wait > 0 && time_before_eq(wait,  md->hw_guard_wait)) {
md                170 drivers/video/fbdev/omap/lcd_mipid.c static void set_sleep_mode(struct mipid_device *md, int on)
md                178 drivers/video/fbdev/omap/lcd_mipid.c 	hw_guard_wait(md);
md                179 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_cmd(md, cmd);
md                180 drivers/video/fbdev/omap/lcd_mipid.c 	hw_guard_start(md, 120);
md                192 drivers/video/fbdev/omap/lcd_mipid.c static void set_display_state(struct mipid_device *md, int enabled)
md                196 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_cmd(md, cmd);
md                201 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = to_mipid_device(panel);
md                202 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_platform_data *pd = md->spi->dev.platform_data;
md                208 drivers/video/fbdev/omap/lcd_mipid.c 	if (!md->enabled) {
md                209 drivers/video/fbdev/omap/lcd_mipid.c 		md->saved_bklight_level = level;
md                219 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = to_mipid_device(panel);
md                220 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_platform_data *pd = md->spi->dev.platform_data;
md                229 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = to_mipid_device(panel);
md                230 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_platform_data *pd = md->spi->dev.platform_data;
md                243 drivers/video/fbdev/omap/lcd_mipid.c static u16 read_first_pixel(struct mipid_device *md)
md                248 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_lock(&md->mutex);
md                249 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_read(md, MIPID_CMD_READ_RED, &red, 1);
md                250 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_read(md, MIPID_CMD_READ_GREEN, &green, 1);
md                251 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_read(md, MIPID_CMD_READ_BLUE, &blue, 1);
md                252 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_unlock(&md->mutex);
md                254 drivers/video/fbdev/omap/lcd_mipid.c 	switch (md->panel.data_lines) {
md                273 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = to_mipid_device(panel);
md                286 drivers/video/fbdev/omap/lcd_mipid.c 		omapfb_write_first_pixel(md->fbdev, test_values[i]);
md                293 drivers/video/fbdev/omap/lcd_mipid.c 			pixel = read_first_pixel(md);
md                297 drivers/video/fbdev/omap/lcd_mipid.c 				dev_err(&md->spi->dev,
md                310 drivers/video/fbdev/omap/lcd_mipid.c static void ls041y3_esd_recover(struct mipid_device *md)
md                312 drivers/video/fbdev/omap/lcd_mipid.c 	dev_err(&md->spi->dev, "performing LCD ESD recovery\n");
md                313 drivers/video/fbdev/omap/lcd_mipid.c 	set_sleep_mode(md, 1);
md                314 drivers/video/fbdev/omap/lcd_mipid.c 	set_sleep_mode(md, 0);
md                317 drivers/video/fbdev/omap/lcd_mipid.c static void ls041y3_esd_check_mode1(struct mipid_device *md)
md                321 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_read(md, MIPID_CMD_RDDSDR, &state1, 1);
md                322 drivers/video/fbdev/omap/lcd_mipid.c 	set_sleep_mode(md, 0);
md                323 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_read(md, MIPID_CMD_RDDSDR, &state2, 1);
md                324 drivers/video/fbdev/omap/lcd_mipid.c 	dev_dbg(&md->spi->dev, "ESD mode 1 state1 %02x state2 %02x\n",
md                330 drivers/video/fbdev/omap/lcd_mipid.c 		ls041y3_esd_recover(md);
md                333 drivers/video/fbdev/omap/lcd_mipid.c static void ls041y3_esd_check_mode2(struct mipid_device *md)
md                353 drivers/video/fbdev/omap/lcd_mipid.c 		mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen);
md                356 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_read(md, rd->cmd, rbuf, 2);
md                361 drivers/video/fbdev/omap/lcd_mipid.c 		mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen);
md                364 drivers/video/fbdev/omap/lcd_mipid.c 	dev_dbg(&md->spi->dev, "ESD mode 2 state %02x\n", rbuf[1]);
md                366 drivers/video/fbdev/omap/lcd_mipid.c 		ls041y3_esd_recover(md);
md                369 drivers/video/fbdev/omap/lcd_mipid.c static void ls041y3_esd_check(struct mipid_device *md)
md                371 drivers/video/fbdev/omap/lcd_mipid.c 	ls041y3_esd_check_mode1(md);
md                372 drivers/video/fbdev/omap/lcd_mipid.c 	if (md->revision >= 0x88)
md                373 drivers/video/fbdev/omap/lcd_mipid.c 		ls041y3_esd_check_mode2(md);
md                376 drivers/video/fbdev/omap/lcd_mipid.c static void mipid_esd_start_check(struct mipid_device *md)
md                378 drivers/video/fbdev/omap/lcd_mipid.c 	if (md->esd_check != NULL)
md                379 drivers/video/fbdev/omap/lcd_mipid.c 		schedule_delayed_work(&md->esd_work,
md                383 drivers/video/fbdev/omap/lcd_mipid.c static void mipid_esd_stop_check(struct mipid_device *md)
md                385 drivers/video/fbdev/omap/lcd_mipid.c 	if (md->esd_check != NULL)
md                386 drivers/video/fbdev/omap/lcd_mipid.c 		cancel_delayed_work_sync(&md->esd_work);
md                391 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = container_of(work, struct mipid_device,
md                394 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_lock(&md->mutex);
md                395 drivers/video/fbdev/omap/lcd_mipid.c 	md->esd_check(md);
md                396 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_unlock(&md->mutex);
md                397 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_esd_start_check(md);
md                402 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = to_mipid_device(panel);
md                404 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_lock(&md->mutex);
md                406 drivers/video/fbdev/omap/lcd_mipid.c 	if (md->enabled) {
md                407 drivers/video/fbdev/omap/lcd_mipid.c 		mutex_unlock(&md->mutex);
md                410 drivers/video/fbdev/omap/lcd_mipid.c 	set_sleep_mode(md, 0);
md                411 drivers/video/fbdev/omap/lcd_mipid.c 	md->enabled = 1;
md                412 drivers/video/fbdev/omap/lcd_mipid.c 	send_init_string(md);
md                413 drivers/video/fbdev/omap/lcd_mipid.c 	set_display_state(md, 1);
md                414 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_set_bklight_level(panel, md->saved_bklight_level);
md                415 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_esd_start_check(md);
md                417 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_unlock(&md->mutex);
md                423 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = to_mipid_device(panel);
md                429 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_esd_stop_check(md);
md                430 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_lock(&md->mutex);
md                432 drivers/video/fbdev/omap/lcd_mipid.c 	if (!md->enabled) {
md                433 drivers/video/fbdev/omap/lcd_mipid.c 		mutex_unlock(&md->mutex);
md                436 drivers/video/fbdev/omap/lcd_mipid.c 	md->saved_bklight_level = mipid_get_bklight_level(panel);
md                438 drivers/video/fbdev/omap/lcd_mipid.c 	set_display_state(md, 0);
md                439 drivers/video/fbdev/omap/lcd_mipid.c 	set_sleep_mode(md, 1);
md                440 drivers/video/fbdev/omap/lcd_mipid.c 	md->enabled = 0;
md                442 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_unlock(&md->mutex);
md                445 drivers/video/fbdev/omap/lcd_mipid.c static int panel_enabled(struct mipid_device *md)
md                450 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
md                453 drivers/video/fbdev/omap/lcd_mipid.c 	dev_dbg(&md->spi->dev,
md                462 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = to_mipid_device(panel);
md                464 drivers/video/fbdev/omap/lcd_mipid.c 	md->fbdev = fbdev;
md                465 drivers/video/fbdev/omap/lcd_mipid.c 	INIT_DELAYED_WORK(&md->esd_work, mipid_esd_work);
md                466 drivers/video/fbdev/omap/lcd_mipid.c 	mutex_init(&md->mutex);
md                468 drivers/video/fbdev/omap/lcd_mipid.c 	md->enabled = panel_enabled(md);
md                470 drivers/video/fbdev/omap/lcd_mipid.c 	if (md->enabled)
md                471 drivers/video/fbdev/omap/lcd_mipid.c 		mipid_esd_start_check(md);
md                473 drivers/video/fbdev/omap/lcd_mipid.c 		md->saved_bklight_level = mipid_get_bklight_level(panel);
md                480 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = to_mipid_device(panel);
md                482 drivers/video/fbdev/omap/lcd_mipid.c 	if (md->enabled)
md                483 drivers/video/fbdev/omap/lcd_mipid.c 		mipid_esd_stop_check(md);
md                511 drivers/video/fbdev/omap/lcd_mipid.c static int mipid_detect(struct mipid_device *md)
md                516 drivers/video/fbdev/omap/lcd_mipid.c 	pdata = md->spi->dev.platform_data;
md                518 drivers/video/fbdev/omap/lcd_mipid.c 		dev_err(&md->spi->dev, "missing platform data\n");
md                522 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_read(md, MIPID_CMD_READ_DISP_ID, display_id, 3);
md                523 drivers/video/fbdev/omap/lcd_mipid.c 	dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
md                528 drivers/video/fbdev/omap/lcd_mipid.c 		md->panel.name = "lph8923";
md                531 drivers/video/fbdev/omap/lcd_mipid.c 		md->panel.name = "ls041y3";
md                532 drivers/video/fbdev/omap/lcd_mipid.c 		md->esd_check = ls041y3_esd_check;
md                535 drivers/video/fbdev/omap/lcd_mipid.c 		md->panel.name = "unknown";
md                536 drivers/video/fbdev/omap/lcd_mipid.c 		dev_err(&md->spi->dev, "invalid display ID\n");
md                540 drivers/video/fbdev/omap/lcd_mipid.c 	md->revision = display_id[1];
md                541 drivers/video/fbdev/omap/lcd_mipid.c 	md->panel.data_lines = pdata->data_lines;
md                543 drivers/video/fbdev/omap/lcd_mipid.c 			md->panel.name, md->revision, md->panel.data_lines);
md                550 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md;
md                553 drivers/video/fbdev/omap/lcd_mipid.c 	md = kzalloc(sizeof(*md), GFP_KERNEL);
md                554 drivers/video/fbdev/omap/lcd_mipid.c 	if (md == NULL) {
md                560 drivers/video/fbdev/omap/lcd_mipid.c 	md->spi = spi;
md                561 drivers/video/fbdev/omap/lcd_mipid.c 	dev_set_drvdata(&spi->dev, md);
md                562 drivers/video/fbdev/omap/lcd_mipid.c 	md->panel = mipid_panel;
md                564 drivers/video/fbdev/omap/lcd_mipid.c 	r = mipid_detect(md);
md                568 drivers/video/fbdev/omap/lcd_mipid.c 	omapfb_register_panel(&md->panel);
md                575 drivers/video/fbdev/omap/lcd_mipid.c 	struct mipid_device *md = dev_get_drvdata(&spi->dev);
md                577 drivers/video/fbdev/omap/lcd_mipid.c 	mipid_disable(&md->panel);
md                578 drivers/video/fbdev/omap/lcd_mipid.c 	kfree(md);
md                 71 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                 73 drivers/w1/w1.c 	dev_dbg(dev, "%s: Releasing %s.\n", __func__, md->name);
md                 74 drivers/w1/w1.c 	memset(md, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master));
md                 75 drivers/w1/w1.c 	kfree(md);
md                210 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                213 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                214 drivers/w1/w1.c 	count = sprintf(buf, "%s\n", md->name);
md                215 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                225 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                232 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                233 drivers/w1/w1.c 	md->search_count = tmp;
md                234 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                237 drivers/w1/w1.c 		wake_up_process(md->thread);
md                246 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                249 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                250 drivers/w1/w1.c 	count = sprintf(buf, "%d\n", md->search_count);
md                251 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                261 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                268 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                269 drivers/w1/w1.c 	md->enable_pullup = tmp;
md                270 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                279 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                282 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                283 drivers/w1/w1.c 	count = sprintf(buf, "%d\n", md->enable_pullup);
md                284 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                291 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                294 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                295 drivers/w1/w1.c 	count = sprintf(buf, "0x%p\n", md->bus_master);
md                296 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                319 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                324 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                325 drivers/w1/w1.c 	md->max_slave_count = tmp;
md                327 drivers/w1/w1.c 	clear_bit(W1_WARN_MAX_COUNT, &md->flags);
md                328 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                335 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                338 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                339 drivers/w1/w1.c 	count = sprintf(buf, "%d\n", md->max_slave_count);
md                340 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                346 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                349 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                350 drivers/w1/w1.c 	count = sprintf(buf, "%lu\n", md->attempts);
md                351 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                357 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                360 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                361 drivers/w1/w1.c 	count = sprintf(buf, "%d\n", md->slave_count);
md                362 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                369 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                374 drivers/w1/w1.c 	mutex_lock(&md->list_mutex);
md                376 drivers/w1/w1.c 	list_for_each_safe(ent, n, &md->slist) {
md                384 drivers/w1/w1.c 	mutex_unlock(&md->list_mutex);
md                461 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                469 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                470 drivers/w1/w1.c 	sl = w1_slave_search_device(md, &rn);
md                479 drivers/w1/w1.c 		w1_attach_slave_device(md, &rn);
md                481 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                499 drivers/w1/w1.c 	struct w1_master *md = dev_to_w1_master(dev);
md                507 drivers/w1/w1.c 	mutex_lock(&md->mutex);
md                508 drivers/w1/w1.c 	sl = w1_slave_search_device(md, &rn);
md                519 drivers/w1/w1.c 	mutex_unlock(&md->mutex);
md                580 drivers/w1/w1.c 	struct w1_master *md = NULL;
md                586 drivers/w1/w1.c 		md = container_of(dev, struct w1_master, dev);
md                588 drivers/w1/w1.c 		name = md->name;
md               1664 fs/proc/task_mmu.c 	struct numa_maps md;
md               1667 fs/proc/task_mmu.c static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
md               1672 fs/proc/task_mmu.c 	md->pages += nr_pages;
md               1674 fs/proc/task_mmu.c 		md->dirty += nr_pages;
md               1677 fs/proc/task_mmu.c 		md->swapcache += nr_pages;
md               1680 fs/proc/task_mmu.c 		md->active += nr_pages;
md               1683 fs/proc/task_mmu.c 		md->writeback += nr_pages;
md               1686 fs/proc/task_mmu.c 		md->anon += nr_pages;
md               1688 fs/proc/task_mmu.c 	if (count > md->mapcount_max)
md               1689 fs/proc/task_mmu.c 		md->mapcount_max = count;
md               1691 fs/proc/task_mmu.c 	md->node[page_to_nid(page)] += nr_pages;
md               1746 fs/proc/task_mmu.c 	struct numa_maps *md = walk->private;
md               1759 fs/proc/task_mmu.c 			gather_stats(page, md, pmd_dirty(*pmd),
md               1773 fs/proc/task_mmu.c 		gather_stats(page, md, pte_dirty(*pte), 1);
md               1785 fs/proc/task_mmu.c 	struct numa_maps *md;
md               1795 fs/proc/task_mmu.c 	md = walk->private;
md               1796 fs/proc/task_mmu.c 	gather_stats(page, md, pte_dirty(huge_pte), 1);
md               1821 fs/proc/task_mmu.c 	struct numa_maps *md = &numa_priv->md;
md               1832 fs/proc/task_mmu.c 	memset(md, 0, sizeof(*md));
md               1857 fs/proc/task_mmu.c 	walk_page_vma(vma, &show_numa_ops, md);
md               1859 fs/proc/task_mmu.c 	if (!md->pages)
md               1862 fs/proc/task_mmu.c 	if (md->anon)
md               1863 fs/proc/task_mmu.c 		seq_printf(m, " anon=%lu", md->anon);
md               1865 fs/proc/task_mmu.c 	if (md->dirty)
md               1866 fs/proc/task_mmu.c 		seq_printf(m, " dirty=%lu", md->dirty);
md               1868 fs/proc/task_mmu.c 	if (md->pages != md->anon && md->pages != md->dirty)
md               1869 fs/proc/task_mmu.c 		seq_printf(m, " mapped=%lu", md->pages);
md               1871 fs/proc/task_mmu.c 	if (md->mapcount_max > 1)
md               1872 fs/proc/task_mmu.c 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
md               1874 fs/proc/task_mmu.c 	if (md->swapcache)
md               1875 fs/proc/task_mmu.c 		seq_printf(m, " swapcache=%lu", md->swapcache);
md               1877 fs/proc/task_mmu.c 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
md               1878 fs/proc/task_mmu.c 		seq_printf(m, " active=%lu", md->active);
md               1880 fs/proc/task_mmu.c 	if (md->writeback)
md               1881 fs/proc/task_mmu.c 		seq_printf(m, " writeback=%lu", md->writeback);
md               1884 fs/proc/task_mmu.c 		if (md->node[nid])
md               1885 fs/proc/task_mmu.c 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
md                386 include/linux/device-mapper.h int dm_create(int minor, struct mapped_device **md);
md                392 include/linux/device-mapper.h void dm_get(struct mapped_device *md);
md                393 include/linux/device-mapper.h int dm_hold(struct mapped_device *md);
md                394 include/linux/device-mapper.h void dm_put(struct mapped_device *md);
md                399 include/linux/device-mapper.h void dm_set_mdptr(struct mapped_device *md, void *ptr);
md                400 include/linux/device-mapper.h void *dm_get_mdptr(struct mapped_device *md);
md                405 include/linux/device-mapper.h int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
md                406 include/linux/device-mapper.h int dm_resume(struct mapped_device *md);
md                411 include/linux/device-mapper.h uint32_t dm_get_event_nr(struct mapped_device *md);
md                412 include/linux/device-mapper.h int dm_wait_event(struct mapped_device *md, int event_nr);
md                413 include/linux/device-mapper.h uint32_t dm_next_uevent_seq(struct mapped_device *md);
md                414 include/linux/device-mapper.h void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
md                419 include/linux/device-mapper.h const char *dm_device_name(struct mapped_device *md);
md                420 include/linux/device-mapper.h int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
md                421 include/linux/device-mapper.h struct gendisk *dm_disk(struct mapped_device *md);
md                437 include/linux/device-mapper.h struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
md                442 include/linux/device-mapper.h int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
md                443 include/linux/device-mapper.h int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
md                453 include/linux/device-mapper.h 		    unsigned num_targets, struct mapped_device *md);
md                492 include/linux/device-mapper.h struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
md                493 include/linux/device-mapper.h void dm_put_live_table(struct mapped_device *md, int srcu_idx);
md                494 include/linux/device-mapper.h void dm_sync_table(struct mapped_device *md);
md                519 include/linux/device-mapper.h struct dm_table *dm_swap_table(struct mapped_device *md,
md               1064 include/linux/efi.h extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
md               1082 include/linux/efi.h extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
md               1136 include/linux/efi.h #define for_each_efi_memory_desc_in_map(m, md)				   \
md               1137 include/linux/efi.h 	for ((md) = (m)->map;						   \
md               1138 include/linux/efi.h 	     (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end;	   \
md               1139 include/linux/efi.h 	     (md) = (void *)(md) + (m)->desc_size)
md               1147 include/linux/efi.h #define for_each_efi_memory_desc(md) \
md               1148 include/linux/efi.h 	for_each_efi_memory_desc_in_map(&efi.memmap, md)
md               1155 include/linux/efi.h 				     const efi_memory_desc_t *md);
md               2824 include/net/cfg80211.h 	u16 md;
md                417 include/net/ip_tunnels.h struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
md                602 lib/inflate.c    unsigned ml, md;      /* masks for bl and bd bits */
md                614 lib/inflate.c    md = mask_bits[bd];
md                650 lib/inflate.c        if ((e = (t = td + ((unsigned)b & md))->e) > 16)
md               3935 net/core/filter.c 	struct metadata_dst *md = this_cpu_ptr(md_dst);
md               3963 net/core/filter.c 	dst_hold((struct dst_entry *) md);
md               3964 net/core/filter.c 	skb_dst_set(skb, (struct dst_entry *) md);
md               3966 net/core/filter.c 	info = &md->u.tun_info;
md               4009 net/core/filter.c 	const struct metadata_dst *md = this_cpu_ptr(md_dst);
md               4011 net/core/filter.c 	if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
md                286 net/ipv4/ip_gre.c 			struct erspan_metadata *pkt_md, *md;
md                297 net/ipv4/ip_gre.c 						tun_id, sizeof(*md));
md                309 net/ipv4/ip_gre.c 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
md                310 net/ipv4/ip_gre.c 			md->version = ver;
md                311 net/ipv4/ip_gre.c 			md2 = &md->u.md2;
md                317 net/ipv4/ip_gre.c 			info->options_len = sizeof(*md);
md                496 net/ipv4/ip_gre.c 	struct erspan_metadata *md;
md                512 net/ipv4/ip_gre.c 	if (tun_info->options_len < sizeof(*md))
md                514 net/ipv4/ip_gre.c 	md = ip_tunnel_info_opts(tun_info);
md                517 net/ipv4/ip_gre.c 	version = md->version;
md                543 net/ipv4/ip_gre.c 				    ntohl(md->u.index), truncate, true);
md                548 net/ipv4/ip_gre.c 				       md->u.md2.dir,
md                549 net/ipv4/ip_gre.c 				       get_hwid(&md->u.md2),
md                488 net/ipv4/ip_tunnel.c 			    int tunnel_hlen, __be32 dst, bool md)
md                494 net/ipv4/ip_tunnel.c 	tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
md                522 net/ipv4/ip_tunnel.c 		daddr = md ? dst : tunnel->parms.iph.daddr;
md                649 net/ipv4/ip_tunnel.c 	bool md = false;
md                674 net/ipv4/ip_tunnel.c 			md = true;
md                715 net/ipv4/ip_tunnel.c 		if (!md)
md                738 net/ipv4/ip_tunnel.c 	if (connected && md) {
md                758 net/ipv4/ip_tunnel.c 		else if (!md && connected)
md                121 net/ipv4/ip_tunnel_core.c struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
md                127 net/ipv4/ip_tunnel_core.c 	if (!md || md->type != METADATA_IP_TUNNEL ||
md                128 net/ipv4/ip_tunnel_core.c 	    md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
md                137 net/ipv4/ip_tunnel_core.c 	src = &md->u.tun_info;
md                 18 net/ipv4/tcp_highspeed.c 	unsigned int md;
md                157 net/ipv4/tcp_highspeed.c 	return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
md                551 net/ipv6/ip6_gre.c 			struct erspan_metadata *pkt_md, *md;
md                563 net/ipv6/ip6_gre.c 						  sizeof(*md));
md                576 net/ipv6/ip6_gre.c 			md = ip_tunnel_info_opts(info);
md                577 net/ipv6/ip6_gre.c 			md->version = ver;
md                578 net/ipv6/ip6_gre.c 			md2 = &md->u.md2;
md                582 net/ipv6/ip6_gre.c 			info->options_len = sizeof(*md);
md                964 net/ipv6/ip6_gre.c 		struct erspan_metadata *md;
md                983 net/ipv6/ip6_gre.c 		if (tun_info->options_len < sizeof(*md))
md                985 net/ipv6/ip6_gre.c 		md = ip_tunnel_info_opts(tun_info);
md                988 net/ipv6/ip6_gre.c 		if (md->version == 1) {
md                991 net/ipv6/ip6_gre.c 					    ntohl(md->u.index), truncate,
md                993 net/ipv6/ip6_gre.c 		} else if (md->version == 2) {
md                996 net/ipv6/ip6_gre.c 					       md->u.md2.dir,
md                997 net/ipv6/ip6_gre.c 					       get_hwid(&md->u.md2),
md                153 net/netfilter/nft_tunnel.c 	struct metadata_dst	*md;
md                353 net/netfilter/nft_tunnel.c 	struct metadata_dst *md;
md                411 net/netfilter/nft_tunnel.c 	md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
md                412 net/netfilter/nft_tunnel.c 	if (!md)
md                415 net/netfilter/nft_tunnel.c 	memcpy(&md->u.tun_info, &info, sizeof(info));
md                417 net/netfilter/nft_tunnel.c 	err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
md                419 net/netfilter/nft_tunnel.c 		metadata_dst_free(md);
md                423 net/netfilter/nft_tunnel.c 	ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
md                425 net/netfilter/nft_tunnel.c 	priv->md = md;
md                438 net/netfilter/nft_tunnel.c 	dst_hold((struct dst_entry *) priv->md);
md                439 net/netfilter/nft_tunnel.c 	skb_dst_set(skb, (struct dst_entry *) priv->md);
md                539 net/netfilter/nft_tunnel.c 	struct ip_tunnel_info *info = &priv->md->u.tun_info;
md                562 net/netfilter/nft_tunnel.c 	metadata_dst_free(priv->md);
md               12815 net/wireless/nl80211.c 	ft_params.md = nla_get_u16(info->attrs[NL80211_ATTR_MDID]);
md               2124 net/wireless/trace.h 		__field(u16, md)
md               2130 net/wireless/trace.h 		__entry->md = ftie->md;
md               2134 net/wireless/trace.h 		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->md)
md                225 scripts/mod/sumversion.c static inline void add_char(unsigned char c, struct md4_ctx *md)
md                227 scripts/mod/sumversion.c 	md4_update(md, &c, 1);
md                231 scripts/mod/sumversion.c 			struct md4_ctx *md)
md                235 scripts/mod/sumversion.c 	add_char(file[0], md);
md                237 scripts/mod/sumversion.c 		add_char(file[i], md);
md                256 scripts/mod/sumversion.c static int parse_file(const char *fname, struct md4_ctx *md)
md                278 scripts/mod/sumversion.c 			i += parse_string(file+i, len - i, md);
md                288 scripts/mod/sumversion.c 		add_char(file[i], md);
md                305 scripts/mod/sumversion.c static int parse_source_files(const char *objfile, struct md4_ctx *md)
md                344 scripts/mod/sumversion.c 			if (!parse_file(p, md)) {
md                372 scripts/mod/sumversion.c 			if (!parse_file(line, md)) {
md                397 scripts/mod/sumversion.c 	struct md4_ctx md;
md                419 scripts/mod/sumversion.c 	md4_init(&md);
md                424 scripts/mod/sumversion.c 				!parse_source_files(fname, &md))
md                428 scripts/mod/sumversion.c 	md4_final_ascii(&md, sum, sumlen);
md                927 sound/oss/dmasound/dmasound_core.c static int shared_resources_are_mine(fmode_t md)
md                930 sound/oss/dmasound/dmasound_core.c 		return (shared_resource_owner & md) != 0;
md                932 sound/oss/dmasound/dmasound_core.c 		shared_resource_owner = md ;
md                  8 tools/build/feature/test-libcrypto.c 	unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
md                 13 tools/build/feature/test-libcrypto.c 	MD5_Final(&md[0], &context);
md                 15 tools/build/feature/test-libcrypto.c 	SHA1(&dat[0], sizeof(dat), &md[0]);
md                 69 tools/perf/arch/x86/tests/perf-time-to-tsc.c 	struct mmap *md;
md                119 tools/perf/arch/x86/tests/perf-time-to-tsc.c 		md = &evlist->mmap[i];
md                120 tools/perf/arch/x86/tests/perf-time-to-tsc.c 		if (perf_mmap__read_init(md) < 0)
md                123 tools/perf/arch/x86/tests/perf-time-to-tsc.c 		while ((event = perf_mmap__read_event(md)) != NULL) {
md                142 tools/perf/arch/x86/tests/perf-time-to-tsc.c 			perf_mmap__consume(md);
md                144 tools/perf/arch/x86/tests/perf-time-to-tsc.c 		perf_mmap__read_done(md);
md                755 tools/perf/builtin-kvm.c 	struct mmap *md;
md                761 tools/perf/builtin-kvm.c 	md = &evlist->mmap[idx];
md                762 tools/perf/builtin-kvm.c 	err = perf_mmap__read_init(md);
md                766 tools/perf/builtin-kvm.c 	while ((event = perf_mmap__read_event(md)) != NULL) {
md                769 tools/perf/builtin-kvm.c 			perf_mmap__consume(md);
md                779 tools/perf/builtin-kvm.c 		perf_mmap__consume(md);
md                796 tools/perf/builtin-kvm.c 	perf_mmap__read_done(md);
md                172 tools/perf/builtin-record.c static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
md                200 tools/perf/builtin-record.c 		perf_mmap__put(md);
md                218 tools/perf/builtin-record.c static int record__aio_sync(struct mmap *md, bool sync_all)
md                220 tools/perf/builtin-record.c 	struct aiocb **aiocb = md->aio.aiocb;
md                221 tools/perf/builtin-record.c 	struct aiocb *cblocks = md->aio.cblocks;
md                227 tools/perf/builtin-record.c 		for (i = 0; i < md->aio.nr_cblocks; ++i) {
md                228 tools/perf/builtin-record.c 			if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
md                246 tools/perf/builtin-record.c 		while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
md                868 tools/perf/builtin-top.c 	struct mmap *md;
md                871 tools/perf/builtin-top.c 	md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
md                872 tools/perf/builtin-top.c 	if (perf_mmap__read_init(md) < 0)
md                875 tools/perf/builtin-top.c 	while ((event = perf_mmap__read_event(md)) != NULL) {
md                886 tools/perf/builtin-top.c 		perf_mmap__consume(md);
md                896 tools/perf/builtin-top.c 	perf_mmap__read_done(md);
md               3450 tools/perf/builtin-trace.c 		struct mmap *md;
md               3452 tools/perf/builtin-trace.c 		md = &evlist->mmap[i];
md               3453 tools/perf/builtin-trace.c 		if (perf_mmap__read_init(md) < 0)
md               3456 tools/perf/builtin-trace.c 		while ((event = perf_mmap__read_event(md)) != NULL) {
md               3463 tools/perf/builtin-trace.c 			perf_mmap__consume(md);
md               3473 tools/perf/builtin-trace.c 		perf_mmap__read_done(md);
md                184 tools/perf/tests/bpf.c 		struct mmap *md;
md                186 tools/perf/tests/bpf.c 		md = &evlist->mmap[i];
md                187 tools/perf/tests/bpf.c 		if (perf_mmap__read_init(md) < 0)
md                190 tools/perf/tests/bpf.c 		while ((event = perf_mmap__read_event(md)) != NULL) {
md                196 tools/perf/tests/bpf.c 		perf_mmap__read_done(md);
md                423 tools/perf/tests/code-reading.c 	struct mmap *md;
md                427 tools/perf/tests/code-reading.c 		md = &evlist->mmap[i];
md                428 tools/perf/tests/code-reading.c 		if (perf_mmap__read_init(md) < 0)
md                431 tools/perf/tests/code-reading.c 		while ((event = perf_mmap__read_event(md)) != NULL) {
md                433 tools/perf/tests/code-reading.c 			perf_mmap__consume(md);
md                437 tools/perf/tests/code-reading.c 		perf_mmap__read_done(md);
md                 35 tools/perf/tests/keep-tracking.c 	struct mmap *md;
md                 40 tools/perf/tests/keep-tracking.c 		md = &evlist->mmap[i];
md                 41 tools/perf/tests/keep-tracking.c 		if (perf_mmap__read_init(md) < 0)
md                 43 tools/perf/tests/keep-tracking.c 		while ((event = perf_mmap__read_event(md)) != NULL) {
md                 49 tools/perf/tests/keep-tracking.c 			perf_mmap__consume(md);
md                 51 tools/perf/tests/keep-tracking.c 		perf_mmap__read_done(md);
md                 46 tools/perf/tests/mmap-basic.c 	struct mmap *md;
md                115 tools/perf/tests/mmap-basic.c 	md = &evlist->mmap[0];
md                116 tools/perf/tests/mmap-basic.c 	if (perf_mmap__read_init(md) < 0)
md                119 tools/perf/tests/mmap-basic.c 	while ((event = perf_mmap__read_event(md)) != NULL) {
md                142 tools/perf/tests/mmap-basic.c 		perf_mmap__consume(md);
md                144 tools/perf/tests/mmap-basic.c 	perf_mmap__read_done(md);
md                 92 tools/perf/tests/openat-syscall-tp-fields.c 			struct mmap *md;
md                 94 tools/perf/tests/openat-syscall-tp-fields.c 			md = &evlist->mmap[i];
md                 95 tools/perf/tests/openat-syscall-tp-fields.c 			if (perf_mmap__read_init(md) < 0)
md                 98 tools/perf/tests/openat-syscall-tp-fields.c 			while ((event = perf_mmap__read_event(md)) != NULL) {
md                106 tools/perf/tests/openat-syscall-tp-fields.c 					perf_mmap__consume(md);
md                126 tools/perf/tests/openat-syscall-tp-fields.c 			perf_mmap__read_done(md);
md                170 tools/perf/tests/perf-record.c 			struct mmap *md;
md                172 tools/perf/tests/perf-record.c 			md = &evlist->mmap[i];
md                173 tools/perf/tests/perf-record.c 			if (perf_mmap__read_init(md) < 0)
md                176 tools/perf/tests/perf-record.c 			while ((event = perf_mmap__read_event(md)) != NULL) {
md                279 tools/perf/tests/perf-record.c 				perf_mmap__consume(md);
md                281 tools/perf/tests/perf-record.c 			perf_mmap__read_done(md);
md                 46 tools/perf/tests/sw-clock.c 	struct mmap *md;
md                101 tools/perf/tests/sw-clock.c 	md = &evlist->mmap[0];
md                102 tools/perf/tests/sw-clock.c 	if (perf_mmap__read_init(md) < 0)
md                105 tools/perf/tests/sw-clock.c 	while ((event = perf_mmap__read_event(md)) != NULL) {
md                120 tools/perf/tests/sw-clock.c 		perf_mmap__consume(md);
md                122 tools/perf/tests/sw-clock.c 	perf_mmap__read_done(md);
md                267 tools/perf/tests/switch-tracking.c 	struct mmap *md;
md                271 tools/perf/tests/switch-tracking.c 		md = &evlist->mmap[i];
md                272 tools/perf/tests/switch-tracking.c 		if (perf_mmap__read_init(md) < 0)
md                275 tools/perf/tests/switch-tracking.c 		while ((event = perf_mmap__read_event(md)) != NULL) {
md                278 tools/perf/tests/switch-tracking.c 			 perf_mmap__consume(md);
md                282 tools/perf/tests/switch-tracking.c 		perf_mmap__read_done(md);
md                 55 tools/perf/tests/task-exit.c 	struct mmap *md;
md                121 tools/perf/tests/task-exit.c 	md = &evlist->mmap[0];
md                122 tools/perf/tests/task-exit.c 	if (perf_mmap__read_init(md) < 0)
md                125 tools/perf/tests/task-exit.c 	while ((event = perf_mmap__read_event(md)) != NULL) {
md                129 tools/perf/tests/task-exit.c 		perf_mmap__consume(md);
md                131 tools/perf/tests/task-exit.c 	perf_mmap__read_done(md);
md                444 tools/perf/util/mmap.c static int __perf_mmap__read_init(struct mmap *md)
md                446 tools/perf/util/mmap.c 	u64 head = perf_mmap__read_head(md);
md                447 tools/perf/util/mmap.c 	u64 old = md->core.prev;
md                448 tools/perf/util/mmap.c 	unsigned char *data = md->core.base + page_size;
md                451 tools/perf/util/mmap.c 	md->core.start = md->core.overwrite ? head : old;
md                452 tools/perf/util/mmap.c 	md->core.end = md->core.overwrite ? old : head;
md                454 tools/perf/util/mmap.c 	if ((md->core.end - md->core.start) < md->core.flush)
md                457 tools/perf/util/mmap.c 	size = md->core.end - md->core.start;
md                458 tools/perf/util/mmap.c 	if (size > (unsigned long)(md->core.mask) + 1) {
md                459 tools/perf/util/mmap.c 		if (!md->core.overwrite) {
md                462 tools/perf/util/mmap.c 			md->core.prev = head;
md                463 tools/perf/util/mmap.c 			perf_mmap__consume(md);
md                471 tools/perf/util/mmap.c 		if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
md                489 tools/perf/util/mmap.c int perf_mmap__push(struct mmap *md, void *to,
md                492 tools/perf/util/mmap.c 	u64 head = perf_mmap__read_head(md);
md                493 tools/perf/util/mmap.c 	unsigned char *data = md->core.base + page_size;
md                498 tools/perf/util/mmap.c 	rc = perf_mmap__read_init(md);
md                502 tools/perf/util/mmap.c 	size = md->core.end - md->core.start;
md                504 tools/perf/util/mmap.c 	if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
md                505 tools/perf/util/mmap.c 		buf = &data[md->core.start & md->core.mask];
md                506 tools/perf/util/mmap.c 		size = md->core.mask + 1 - (md->core.start & md->core.mask);
md                507 tools/perf/util/mmap.c 		md->core.start += size;
md                509 tools/perf/util/mmap.c 		if (push(md, to, buf, size) < 0) {
md                515 tools/perf/util/mmap.c 	buf = &data[md->core.start & md->core.mask];
md                516 tools/perf/util/mmap.c 	size = md->core.end - md->core.start;
md                517 tools/perf/util/mmap.c 	md->core.start += size;
md                519 tools/perf/util/mmap.c 	if (push(md, to, buf, size) < 0) {
md                524 tools/perf/util/mmap.c 	md->core.prev = head;
md                525 tools/perf/util/mmap.c 	perf_mmap__consume(md);
md                 57 tools/perf/util/mmap.h static inline void perf_mmap__write_tail(struct mmap *md, u64 tail)
md                 59 tools/perf/util/mmap.h 	ring_buffer_write_tail(md->core.base, tail);
md                 66 tools/perf/util/mmap.h int perf_mmap__push(struct mmap *md, void *to,
md                 71 tools/perf/util/mmap.h int perf_mmap__read_init(struct mmap *md);
md                998 tools/perf/util/python.c 		struct mmap *md = &evlist->mmap[i];
md               1000 tools/perf/util/python.c 		if (md->core.cpu == cpu)
md               1001 tools/perf/util/python.c 			return md;
md               1014 tools/perf/util/python.c 	struct mmap *md;
md               1021 tools/perf/util/python.c 	md = get_md(evlist, cpu);
md               1022 tools/perf/util/python.c 	if (!md)
md               1025 tools/perf/util/python.c 	if (perf_mmap__read_init(md) < 0)
md               1028 tools/perf/util/python.c 	event = perf_mmap__read_event(md);
md               1048 tools/perf/util/python.c 		perf_mmap__consume(md);
md               1168 tools/perf/util/symbol.c 	struct kcore_mapfn_data *md = data;
md               1171 tools/perf/util/symbol.c 	map = map__new2(start, md->dso);
md               1178 tools/perf/util/symbol.c 	list_add(&map->node, &md->maps);
md               1266 tools/perf/util/symbol.c 	struct kcore_mapfn_data md;
md               1291 tools/perf/util/symbol.c 	md.dso = dso;
md               1292 tools/perf/util/symbol.c 	INIT_LIST_HEAD(&md.maps);
md               1302 tools/perf/util/symbol.c 	err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
md               1308 tools/perf/util/symbol.c 	if (list_empty(&md.maps)) {
md               1331 tools/perf/util/symbol.c 		list_for_each_entry(new_map, &md.maps, node) {
md               1340 tools/perf/util/symbol.c 		replacement_map = list_entry(md.maps.next, struct map, node);
md               1343 tools/perf/util/symbol.c 	while (!list_empty(&md.maps)) {
md               1344 tools/perf/util/symbol.c 		new_map = list_entry(md.maps.next, struct map, node);
md               1402 tools/perf/util/symbol.c 	while (!list_empty(&md.maps)) {
md               1403 tools/perf/util/symbol.c 		map = list_entry(md.maps.next, struct map, node);
md                 78 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
md                 80 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
md                133 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	struct erspan_metadata md;
md                149 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	__builtin_memset(&md, 0, sizeof(md));
md                151 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.version = 1;
md                152 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.u.index = bpf_htonl(123);
md                157 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.version = 2;
md                158 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.u.md2.dir = direction;
md                159 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.u.md2.hwid = hwid & 0xf;
md                160 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.u.md2.hwid_upper = (hwid >> 4) & 0x3;
md                163 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
md                177 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	struct erspan_metadata md;
md                187 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
md                194 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			key.tunnel_id, key.remote_ipv4, md.version);
md                199 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	index = bpf_ntohl(md.u.index);
md                205 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			 md.u.md2.dir,
md                206 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			 (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
md                207 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			 bpf_ntohl(md.u.md2.timestamp));
md                217 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	struct erspan_metadata md;
md                233 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	__builtin_memset(&md, 0, sizeof(md));
md                236 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.u.index = bpf_htonl(123);
md                237 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.version = 1;
md                242 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.version = 2;
md                243 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.u.md2.dir = direction;
md                244 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.u.md2.hwid = hwid & 0xf;
md                245 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.u.md2.hwid_upper = (hwid >> 4) & 0x3;
md                248 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
md                262 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	struct erspan_metadata md;
md                273 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
md                280 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			key.tunnel_id, key.remote_ipv4, md.version);
md                285 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	index = bpf_ntohl(md.u.index);
md                291 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			 md.u.md2.dir,
md                292 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			 (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
md                293 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			 bpf_ntohl(md.u.md2.timestamp));
md                304 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	struct vxlan_metadata md;
md                319 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
md                320 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
md                334 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	struct vxlan_metadata md;
md                343 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 	ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
md                350 tools/testing/selftests/bpf/progs/test_tunnel_kern.c 			key.tunnel_id, key.remote_ipv4, md.gbp);
md               2999 tools/testing/selftests/seccomp/seccomp_bpf.c 	struct seccomp_metadata md;
md               3042 tools/testing/selftests/seccomp/seccomp_bpf.c 	md.filter_off = 0;
md               3044 tools/testing/selftests/seccomp/seccomp_bpf.c 	ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
md               3045 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(sizeof(md), ret) {
md               3050 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
md               3051 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(md.filter_off, 0);
md               3053 tools/testing/selftests/seccomp/seccomp_bpf.c 	md.filter_off = 1;
md               3054 tools/testing/selftests/seccomp/seccomp_bpf.c 	ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
md               3055 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(sizeof(md), ret);
md               3056 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(md.flags, 0);
md               3057 tools/testing/selftests/seccomp/seccomp_bpf.c 	EXPECT_EQ(md.filter_off, 1);