mdev              110 arch/powerpc/include/asm/macio.h static inline struct device_node *macio_get_of_node(struct macio_dev *mdev)
mdev              112 arch/powerpc/include/asm/macio.h 	return mdev->ofdev.dev.of_node;
mdev              116 arch/powerpc/include/asm/macio.h static inline struct pci_dev *macio_get_pci_dev(struct macio_dev *mdev)
mdev              118 arch/powerpc/include/asm/macio.h 	return mdev->bus->pdev;
mdev              232 drivers/ata/pata_macio.c 	struct macio_dev		*mdev;
mdev             1115 drivers/ata/pata_macio.c static int pata_macio_attach(struct macio_dev *mdev,
mdev             1124 drivers/ata/pata_macio.c 	if (macio_resource_count(mdev) == 0) {
mdev             1125 drivers/ata/pata_macio.c 		dev_err(&mdev->ofdev.dev,
mdev             1131 drivers/ata/pata_macio.c 	macio_enable_devres(mdev);
mdev             1134 drivers/ata/pata_macio.c 	priv = devm_kzalloc(&mdev->ofdev.dev,
mdev             1139 drivers/ata/pata_macio.c 	priv->node = of_node_get(mdev->ofdev.dev.of_node);
mdev             1140 drivers/ata/pata_macio.c 	priv->mdev = mdev;
mdev             1141 drivers/ata/pata_macio.c 	priv->dev = &mdev->ofdev.dev;
mdev             1144 drivers/ata/pata_macio.c 	if (macio_request_resource(mdev, 0, "pata-macio")) {
mdev             1145 drivers/ata/pata_macio.c 		dev_err(&mdev->ofdev.dev,
mdev             1149 drivers/ata/pata_macio.c 	tfregs = macio_resource_start(mdev, 0);
mdev             1152 drivers/ata/pata_macio.c 	if (macio_resource_count(mdev) >= 2) {
mdev             1153 drivers/ata/pata_macio.c 		if (macio_request_resource(mdev, 1, "pata-macio-dma"))
mdev             1154 drivers/ata/pata_macio.c 			dev_err(&mdev->ofdev.dev,
mdev             1157 drivers/ata/pata_macio.c 			dmaregs = macio_resource_start(mdev, 1);
mdev             1169 drivers/ata/pata_macio.c 	if (macio_irq_count(mdev) == 0) {
mdev             1170 drivers/ata/pata_macio.c 		dev_warn(&mdev->ofdev.dev,
mdev             1174 drivers/ata/pata_macio.c 		irq = macio_irq(mdev, 0);
mdev             1177 drivers/ata/pata_macio.c 	lock_media_bay(priv->mdev->media_bay);
mdev             1185 drivers/ata/pata_macio.c 	unlock_media_bay(priv->mdev->media_bay);
mdev             1190 drivers/ata/pata_macio.c static int pata_macio_detach(struct macio_dev *mdev)
mdev             1192 drivers/ata/pata_macio.c 	struct ata_host *host = macio_get_drvdata(mdev);
mdev             1195 drivers/ata/pata_macio.c 	lock_media_bay(priv->mdev->media_bay);
mdev             1204 drivers/ata/pata_macio.c 	unlock_media_bay(priv->mdev->media_bay);
mdev             1210 drivers/ata/pata_macio.c static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
mdev             1212 drivers/ata/pata_macio.c 	struct ata_host *host = macio_get_drvdata(mdev);
mdev             1217 drivers/ata/pata_macio.c static int pata_macio_resume(struct macio_dev *mdev)
mdev             1219 drivers/ata/pata_macio.c 	struct ata_host *host = macio_get_drvdata(mdev);
mdev             1226 drivers/ata/pata_macio.c static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
mdev             1228 drivers/ata/pata_macio.c 	struct ata_host *host = macio_get_drvdata(mdev);
mdev               85 drivers/bcma/driver_mips.c 	struct bcma_device *mdev = dev->bus->drv_mips.core;
mdev               94 drivers/bcma/driver_mips.c 		if (bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq)) &
mdev              105 drivers/bcma/driver_mips.c 	struct bcma_device *mdev = bus->drv_mips.core;
mdev              115 drivers/bcma/driver_mips.c 		bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
mdev              116 drivers/bcma/driver_mips.c 			    bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
mdev              119 drivers/bcma/driver_mips.c 		bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
mdev              123 drivers/bcma/driver_mips.c 		bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
mdev              124 drivers/bcma/driver_mips.c 			    bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) |
mdev              127 drivers/bcma/driver_mips.c 		u32 irqinitmask = bcma_read32(mdev,
mdev              143 drivers/bcma/driver_mips.c 		bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq),
mdev              201 drivers/block/swim3.c 	struct macio_dev *mdev;
mdev              208 drivers/block/swim3.c #define swim3_err(fmt, arg...)	dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
mdev              209 drivers/block/swim3.c #define swim3_warn(fmt, arg...)	dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
mdev              210 drivers/block/swim3.c #define swim3_info(fmt, arg...)	dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
mdev              213 drivers/block/swim3.c #define swim3_dbg(fmt, arg...)	dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
mdev              322 drivers/block/swim3.c 	if (fs->mdev->media_bay &&
mdev              323 drivers/block/swim3.c 	    check_media_bay(fs->mdev->media_bay) != MB_FD) {
mdev              877 drivers/block/swim3.c 	if (fs->mdev->media_bay &&
mdev              878 drivers/block/swim3.c 	    check_media_bay(fs->mdev->media_bay) != MB_FD)
mdev              915 drivers/block/swim3.c 		if (fs->mdev->media_bay &&
mdev              916 drivers/block/swim3.c 		    check_media_bay(fs->mdev->media_bay) != MB_FD)
mdev             1019 drivers/block/swim3.c 	if (fs->mdev->media_bay &&
mdev             1020 drivers/block/swim3.c 	    check_media_bay(fs->mdev->media_bay) != MB_FD)
mdev             1065 drivers/block/swim3.c static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
mdev             1067 drivers/block/swim3.c 	struct floppy_state *fs = macio_get_drvdata(mdev);
mdev             1084 drivers/block/swim3.c static int swim3_add_device(struct macio_dev *mdev, int index)
mdev             1086 drivers/block/swim3.c 	struct device_node *swim = mdev->ofdev.dev.of_node;
mdev             1090 drivers/block/swim3.c 	fs->mdev = mdev;
mdev             1094 drivers/block/swim3.c 	if (macio_resource_count(mdev) < 2) {
mdev             1098 drivers/block/swim3.c 	if (macio_irq_count(mdev) < 1) {
mdev             1102 drivers/block/swim3.c 	if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
mdev             1106 drivers/block/swim3.c 	if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
mdev             1108 drivers/block/swim3.c 		macio_release_resource(mdev, 0);
mdev             1111 drivers/block/swim3.c 	dev_set_drvdata(&mdev->ofdev.dev, fs);
mdev             1113 drivers/block/swim3.c 	if (mdev->media_bay == NULL)
mdev             1118 drivers/block/swim3.c 		ioremap(macio_resource_start(mdev, 0), 0x200);
mdev             1125 drivers/block/swim3.c 		ioremap(macio_resource_start(mdev, 1), 0x200);
mdev             1132 drivers/block/swim3.c 	fs->swim3_intr = macio_irq(mdev, 0);
mdev             1133 drivers/block/swim3.c 	fs->dma_intr = macio_irq(mdev, 1);
mdev             1145 drivers/block/swim3.c 	if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
mdev             1146 drivers/block/swim3.c 		swim3_mb_event(mdev, MB_FD);
mdev             1157 drivers/block/swim3.c 		mdev->media_bay ? "in media bay" : "");
mdev             1166 drivers/block/swim3.c 	macio_release_resource(mdev, 0);
mdev             1167 drivers/block/swim3.c 	macio_release_resource(mdev, 1);
mdev             1172 drivers/block/swim3.c static int swim3_attach(struct macio_dev *mdev,
mdev             1207 drivers/block/swim3.c 	rc = swim3_add_device(mdev, floppy_count);
mdev               59 drivers/bus/moxtet.c 	struct moxtet_device *mdev = to_moxtet_device(dev);		\
mdev               64 drivers/bus/moxtet.c DEF_MODULE_ATTR(id, "0x%x\n", mdev->id);
mdev               65 drivers/bus/moxtet.c DEF_MODULE_ATTR(name, "%s\n", mox_module_name(mdev->id));
mdev               67 drivers/bus/moxtet.c 		mox_module_known(mdev->id) ? mox_module_table[mdev->id].desc
mdev               88 drivers/bus/moxtet.c 	struct moxtet_device *mdev = to_moxtet_device(dev);
mdev               99 drivers/bus/moxtet.c 		if (*t == mdev->id)
mdev              123 drivers/bus/moxtet.c 	struct moxtet_device *mdev = to_moxtet_device(dev);
mdev              126 drivers/bus/moxtet.c 	if (mdev->moxtet == new_dev->moxtet && mdev->id == new_dev->id &&
mdev              127 drivers/bus/moxtet.c 	    mdev->idx == new_dev->idx)
mdev              134 drivers/bus/moxtet.c 	struct moxtet_device *mdev = to_moxtet_device(dev);
mdev              136 drivers/bus/moxtet.c 	put_device(mdev->moxtet->dev);
mdev              137 drivers/bus/moxtet.c 	kfree(mdev);
mdev              406 drivers/bus/moxtet.c 	struct moxtet_device *mdev = to_moxtet_device(dev);
mdev              407 drivers/bus/moxtet.c 	struct moxtet *moxtet = mdev->moxtet;
mdev              411 drivers/bus/moxtet.c 	if (mdev->idx >= moxtet->count)
mdev              418 drivers/bus/moxtet.c 	return buf[mdev->idx + 1] >> 4;
mdev              424 drivers/bus/moxtet.c 	struct moxtet_device *mdev = to_moxtet_device(dev);
mdev              425 drivers/bus/moxtet.c 	struct moxtet *moxtet = mdev->moxtet;
mdev              428 drivers/bus/moxtet.c 	if (mdev->idx >= moxtet->count)
mdev              433 drivers/bus/moxtet.c 	moxtet->tx[moxtet->count - mdev->idx] = val;
mdev              446 drivers/bus/moxtet.c 	struct moxtet_device *mdev = to_moxtet_device(dev);
mdev              447 drivers/bus/moxtet.c 	struct moxtet *moxtet = mdev->moxtet;
mdev              449 drivers/bus/moxtet.c 	if (mdev->idx >= moxtet->count)
mdev              452 drivers/bus/moxtet.c 	return moxtet->tx[moxtet->count - mdev->idx];
mdev              208 drivers/dma/altera-msgdma.c static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
mdev              213 drivers/dma/altera-msgdma.c 	spin_lock_irqsave(&mdev->lock, flags);
mdev              214 drivers/dma/altera-msgdma.c 	desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
mdev              216 drivers/dma/altera-msgdma.c 	spin_unlock_irqrestore(&mdev->lock, flags);
mdev              228 drivers/dma/altera-msgdma.c static void msgdma_free_descriptor(struct msgdma_device *mdev,
mdev              233 drivers/dma/altera-msgdma.c 	mdev->desc_free_cnt++;
mdev              234 drivers/dma/altera-msgdma.c 	list_add_tail(&desc->node, &mdev->free_list);
mdev              236 drivers/dma/altera-msgdma.c 		mdev->desc_free_cnt++;
mdev              237 drivers/dma/altera-msgdma.c 		list_move_tail(&child->node, &mdev->free_list);
mdev              246 drivers/dma/altera-msgdma.c static void msgdma_free_desc_list(struct msgdma_device *mdev,
mdev              252 drivers/dma/altera-msgdma.c 		msgdma_free_descriptor(mdev, desc);
mdev              303 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = to_mdev(tx->chan);
mdev              309 drivers/dma/altera-msgdma.c 	spin_lock_irqsave(&mdev->lock, flags);
mdev              312 drivers/dma/altera-msgdma.c 	list_add_tail(&new->node, &mdev->pending_list);
mdev              313 drivers/dma/altera-msgdma.c 	spin_unlock_irqrestore(&mdev->lock, flags);
mdev              332 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = to_mdev(dchan);
mdev              341 drivers/dma/altera-msgdma.c 	spin_lock_irqsave(&mdev->lock, irqflags);
mdev              342 drivers/dma/altera-msgdma.c 	if (desc_cnt > mdev->desc_free_cnt) {
mdev              343 drivers/dma/altera-msgdma.c 		spin_unlock_irqrestore(&mdev->lock, irqflags);
mdev              344 drivers/dma/altera-msgdma.c 		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
mdev              347 drivers/dma/altera-msgdma.c 	mdev->desc_free_cnt -= desc_cnt;
mdev              348 drivers/dma/altera-msgdma.c 	spin_unlock_irqrestore(&mdev->lock, irqflags);
mdev              352 drivers/dma/altera-msgdma.c 		new = msgdma_get_descriptor(mdev);
mdev              390 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = to_mdev(dchan);
mdev              391 drivers/dma/altera-msgdma.c 	struct dma_slave_config *cfg = &mdev->slave_cfg;
mdev              404 drivers/dma/altera-msgdma.c 	spin_lock_irqsave(&mdev->lock, irqflags);
mdev              405 drivers/dma/altera-msgdma.c 	if (desc_cnt > mdev->desc_free_cnt) {
mdev              406 drivers/dma/altera-msgdma.c 		spin_unlock_irqrestore(&mdev->lock, irqflags);
mdev              407 drivers/dma/altera-msgdma.c 		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
mdev              410 drivers/dma/altera-msgdma.c 	mdev->desc_free_cnt -= desc_cnt;
mdev              411 drivers/dma/altera-msgdma.c 	spin_unlock_irqrestore(&mdev->lock, irqflags);
mdev              418 drivers/dma/altera-msgdma.c 		new = msgdma_get_descriptor(mdev);
mdev              461 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = to_mdev(dchan);
mdev              463 drivers/dma/altera-msgdma.c 	memcpy(&mdev->slave_cfg, config, sizeof(*config));
mdev              468 drivers/dma/altera-msgdma.c static void msgdma_reset(struct msgdma_device *mdev)
mdev              474 drivers/dma/altera-msgdma.c 	iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
mdev              475 drivers/dma/altera-msgdma.c 	iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
mdev              477 drivers/dma/altera-msgdma.c 	ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
mdev              481 drivers/dma/altera-msgdma.c 		dev_err(mdev->dev, "DMA channel did not reset\n");
mdev              484 drivers/dma/altera-msgdma.c 	iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
mdev              488 drivers/dma/altera-msgdma.c 		  MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
mdev              490 drivers/dma/altera-msgdma.c 	mdev->idle = true;
mdev              493 drivers/dma/altera-msgdma.c static void msgdma_copy_one(struct msgdma_device *mdev,
mdev              496 drivers/dma/altera-msgdma.c 	void __iomem *hw_desc = mdev->desc;
mdev              502 drivers/dma/altera-msgdma.c 	while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
mdev              519 drivers/dma/altera-msgdma.c 	mdev->idle = false;
mdev              531 drivers/dma/altera-msgdma.c static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
mdev              536 drivers/dma/altera-msgdma.c 	msgdma_copy_one(mdev, desc);
mdev              539 drivers/dma/altera-msgdma.c 		msgdma_copy_one(mdev, sdesc);
mdev              546 drivers/dma/altera-msgdma.c static void msgdma_start_transfer(struct msgdma_device *mdev)
mdev              550 drivers/dma/altera-msgdma.c 	if (!mdev->idle)
mdev              553 drivers/dma/altera-msgdma.c 	desc = list_first_entry_or_null(&mdev->pending_list,
mdev              558 drivers/dma/altera-msgdma.c 	list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
mdev              559 drivers/dma/altera-msgdma.c 	msgdma_copy_desc_to_fifo(mdev, desc);
mdev              568 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = to_mdev(chan);
mdev              571 drivers/dma/altera-msgdma.c 	spin_lock_irqsave(&mdev->lock, flags);
mdev              572 drivers/dma/altera-msgdma.c 	msgdma_start_transfer(mdev);
mdev              573 drivers/dma/altera-msgdma.c 	spin_unlock_irqrestore(&mdev->lock, flags);
mdev              580 drivers/dma/altera-msgdma.c static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
mdev              584 drivers/dma/altera-msgdma.c 	list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
mdev              593 drivers/dma/altera-msgdma.c 			spin_unlock(&mdev->lock);
mdev              595 drivers/dma/altera-msgdma.c 			spin_lock(&mdev->lock);
mdev              599 drivers/dma/altera-msgdma.c 		msgdma_free_descriptor(mdev, desc);
mdev              607 drivers/dma/altera-msgdma.c static void msgdma_complete_descriptor(struct msgdma_device *mdev)
mdev              611 drivers/dma/altera-msgdma.c 	desc = list_first_entry_or_null(&mdev->active_list,
mdev              617 drivers/dma/altera-msgdma.c 	list_add_tail(&desc->node, &mdev->done_list);
mdev              624 drivers/dma/altera-msgdma.c static void msgdma_free_descriptors(struct msgdma_device *mdev)
mdev              626 drivers/dma/altera-msgdma.c 	msgdma_free_desc_list(mdev, &mdev->active_list);
mdev              627 drivers/dma/altera-msgdma.c 	msgdma_free_desc_list(mdev, &mdev->pending_list);
mdev              628 drivers/dma/altera-msgdma.c 	msgdma_free_desc_list(mdev, &mdev->done_list);
mdev              637 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = to_mdev(dchan);
mdev              640 drivers/dma/altera-msgdma.c 	spin_lock_irqsave(&mdev->lock, flags);
mdev              641 drivers/dma/altera-msgdma.c 	msgdma_free_descriptors(mdev);
mdev              642 drivers/dma/altera-msgdma.c 	spin_unlock_irqrestore(&mdev->lock, flags);
mdev              643 drivers/dma/altera-msgdma.c 	kfree(mdev->sw_desq);
mdev              654 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = to_mdev(dchan);
mdev              658 drivers/dma/altera-msgdma.c 	mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
mdev              659 drivers/dma/altera-msgdma.c 	if (!mdev->sw_desq)
mdev              662 drivers/dma/altera-msgdma.c 	mdev->idle = true;
mdev              663 drivers/dma/altera-msgdma.c 	mdev->desc_free_cnt = MSGDMA_DESC_NUM;
mdev              665 drivers/dma/altera-msgdma.c 	INIT_LIST_HEAD(&mdev->free_list);
mdev              668 drivers/dma/altera-msgdma.c 		desc = mdev->sw_desq + i;
mdev              669 drivers/dma/altera-msgdma.c 		dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
mdev              671 drivers/dma/altera-msgdma.c 		list_add_tail(&desc->node, &mdev->free_list);
mdev              683 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = (struct msgdma_device *)data;
mdev              689 drivers/dma/altera-msgdma.c 	spin_lock_irqsave(&mdev->lock, flags);
mdev              692 drivers/dma/altera-msgdma.c 	count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
mdev              693 drivers/dma/altera-msgdma.c 	dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
mdev              703 drivers/dma/altera-msgdma.c 		size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
mdev              704 drivers/dma/altera-msgdma.c 		status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
mdev              706 drivers/dma/altera-msgdma.c 		msgdma_complete_descriptor(mdev);
mdev              707 drivers/dma/altera-msgdma.c 		msgdma_chan_desc_cleanup(mdev);
mdev              710 drivers/dma/altera-msgdma.c 	spin_unlock_irqrestore(&mdev->lock, flags);
mdev              722 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = data;
mdev              725 drivers/dma/altera-msgdma.c 	status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
mdev              728 drivers/dma/altera-msgdma.c 		spin_lock(&mdev->lock);
mdev              729 drivers/dma/altera-msgdma.c 		mdev->idle = true;
mdev              730 drivers/dma/altera-msgdma.c 		msgdma_start_transfer(mdev);
mdev              731 drivers/dma/altera-msgdma.c 		spin_unlock(&mdev->lock);
mdev              734 drivers/dma/altera-msgdma.c 	tasklet_schedule(&mdev->irq_tasklet);
mdev              737 drivers/dma/altera-msgdma.c 	iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
mdev              746 drivers/dma/altera-msgdma.c static void msgdma_dev_remove(struct msgdma_device *mdev)
mdev              748 drivers/dma/altera-msgdma.c 	if (!mdev)
mdev              751 drivers/dma/altera-msgdma.c 	devm_free_irq(mdev->dev, mdev->irq, mdev);
mdev              752 drivers/dma/altera-msgdma.c 	tasklet_kill(&mdev->irq_tasklet);
mdev              753 drivers/dma/altera-msgdma.c 	list_del(&mdev->dmachan.device_node);
mdev              793 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev;
mdev              798 drivers/dma/altera-msgdma.c 	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
mdev              799 drivers/dma/altera-msgdma.c 	if (!mdev)
mdev              802 drivers/dma/altera-msgdma.c 	mdev->dev = &pdev->dev;
mdev              805 drivers/dma/altera-msgdma.c 	ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
mdev              810 drivers/dma/altera-msgdma.c 	ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
mdev              815 drivers/dma/altera-msgdma.c 	ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
mdev              819 drivers/dma/altera-msgdma.c 	platform_set_drvdata(pdev, mdev);
mdev              822 drivers/dma/altera-msgdma.c 	mdev->irq = platform_get_irq(pdev, 0);
mdev              823 drivers/dma/altera-msgdma.c 	if (mdev->irq < 0)
mdev              826 drivers/dma/altera-msgdma.c 	ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
mdev              827 drivers/dma/altera-msgdma.c 			       0, dev_name(&pdev->dev), mdev);
mdev              831 drivers/dma/altera-msgdma.c 	tasklet_init(&mdev->irq_tasklet, msgdma_tasklet, (unsigned long)mdev);
mdev              833 drivers/dma/altera-msgdma.c 	dma_cookie_init(&mdev->dmachan);
mdev              835 drivers/dma/altera-msgdma.c 	spin_lock_init(&mdev->lock);
mdev              837 drivers/dma/altera-msgdma.c 	INIT_LIST_HEAD(&mdev->active_list);
mdev              838 drivers/dma/altera-msgdma.c 	INIT_LIST_HEAD(&mdev->pending_list);
mdev              839 drivers/dma/altera-msgdma.c 	INIT_LIST_HEAD(&mdev->done_list);
mdev              840 drivers/dma/altera-msgdma.c 	INIT_LIST_HEAD(&mdev->free_list);
mdev              842 drivers/dma/altera-msgdma.c 	dma_dev = &mdev->dmadev;
mdev              871 drivers/dma/altera-msgdma.c 	mdev->dmachan.device = dma_dev;
mdev              872 drivers/dma/altera-msgdma.c 	list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
mdev              883 drivers/dma/altera-msgdma.c 	msgdma_reset(mdev);
mdev              894 drivers/dma/altera-msgdma.c 	msgdma_dev_remove(mdev);
mdev              907 drivers/dma/altera-msgdma.c 	struct msgdma_device *mdev = platform_get_drvdata(pdev);
mdev              909 drivers/dma/altera-msgdma.c 	dma_async_device_unregister(&mdev->dmadev);
mdev              910 drivers/dma/altera-msgdma.c 	msgdma_dev_remove(mdev);
mdev              622 drivers/dma/qcom/hidma.c 	struct hidma_dev *mdev = dev_get_drvdata(dev);
mdev              627 drivers/dma/qcom/hidma.c 		sprintf(buf, "%d\n", mdev->chidx);
mdev               14 drivers/dma/qcom/hidma_mgmt_sys.c 	struct hidma_mgmt_dev *mdev;
mdev               22 drivers/dma/qcom/hidma_mgmt_sys.c 	int (*get)(struct hidma_mgmt_dev *mdev);
mdev               23 drivers/dma/qcom/hidma_mgmt_sys.c 	int (*set)(struct hidma_mgmt_dev *mdev, u64 val);
mdev               27 drivers/dma/qcom/hidma_mgmt_sys.c static int get_##name(struct hidma_mgmt_dev *mdev)		\
mdev               29 drivers/dma/qcom/hidma_mgmt_sys.c 	return mdev->name;					\
mdev               31 drivers/dma/qcom/hidma_mgmt_sys.c static int set_##name(struct hidma_mgmt_dev *mdev, u64 val)	\
mdev               36 drivers/dma/qcom/hidma_mgmt_sys.c 	tmp = mdev->name;					\
mdev               37 drivers/dma/qcom/hidma_mgmt_sys.c 	mdev->name = val;					\
mdev               38 drivers/dma/qcom/hidma_mgmt_sys.c 	rc = hidma_mgmt_setup(mdev);				\
mdev               40 drivers/dma/qcom/hidma_mgmt_sys.c 		mdev->name = tmp;				\
mdev               56 drivers/dma/qcom/hidma_mgmt_sys.c static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
mdev               61 drivers/dma/qcom/hidma_mgmt_sys.c 	if (i >= mdev->dma_channels)
mdev               64 drivers/dma/qcom/hidma_mgmt_sys.c 	tmp = mdev->priority[i];
mdev               65 drivers/dma/qcom/hidma_mgmt_sys.c 	mdev->priority[i] = val;
mdev               66 drivers/dma/qcom/hidma_mgmt_sys.c 	rc = hidma_mgmt_setup(mdev);
mdev               68 drivers/dma/qcom/hidma_mgmt_sys.c 		mdev->priority[i] = tmp;
mdev               72 drivers/dma/qcom/hidma_mgmt_sys.c static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
mdev               77 drivers/dma/qcom/hidma_mgmt_sys.c 	if (i >= mdev->dma_channels)
mdev               80 drivers/dma/qcom/hidma_mgmt_sys.c 	tmp = mdev->weight[i];
mdev               81 drivers/dma/qcom/hidma_mgmt_sys.c 	mdev->weight[i] = val;
mdev               82 drivers/dma/qcom/hidma_mgmt_sys.c 	rc = hidma_mgmt_setup(mdev);
mdev               84 drivers/dma/qcom/hidma_mgmt_sys.c 		mdev->weight[i] = tmp;
mdev              102 drivers/dma/qcom/hidma_mgmt_sys.c 	struct hidma_mgmt_dev *mdev = dev_get_drvdata(dev);
mdev              109 drivers/dma/qcom/hidma_mgmt_sys.c 			sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev));
mdev              119 drivers/dma/qcom/hidma_mgmt_sys.c 	struct hidma_mgmt_dev *mdev = dev_get_drvdata(dev);
mdev              130 drivers/dma/qcom/hidma_mgmt_sys.c 			rc = hidma_mgmt_files[i].set(mdev, tmp);
mdev              144 drivers/dma/qcom/hidma_mgmt_sys.c 	struct hidma_mgmt_dev *mdev;
mdev              148 drivers/dma/qcom/hidma_mgmt_sys.c 	mdev = chattr->mdev;
mdev              150 drivers/dma/qcom/hidma_mgmt_sys.c 		sprintf(buf, "%d\n", mdev->priority[chattr->index]);
mdev              152 drivers/dma/qcom/hidma_mgmt_sys.c 		sprintf(buf, "%d\n", mdev->weight[chattr->index]);
mdev              162 drivers/dma/qcom/hidma_mgmt_sys.c 	struct hidma_mgmt_dev *mdev;
mdev              167 drivers/dma/qcom/hidma_mgmt_sys.c 	mdev = chattr->mdev;
mdev              174 drivers/dma/qcom/hidma_mgmt_sys.c 		rc = set_priority(mdev, chattr->index, tmp);
mdev              178 drivers/dma/qcom/hidma_mgmt_sys.c 		rc = set_weight(mdev, chattr->index, tmp);
mdev              208 drivers/dma/qcom/hidma_mgmt_sys.c static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name,
mdev              215 drivers/dma/qcom/hidma_mgmt_sys.c 	chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL);
mdev              219 drivers/dma/qcom/hidma_mgmt_sys.c 	name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL);
mdev              223 drivers/dma/qcom/hidma_mgmt_sys.c 	chattr->mdev = mdev;
mdev              234 drivers/dma/qcom/hidma_mgmt_sys.c int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev)
mdev              241 drivers/dma/qcom/hidma_mgmt_sys.c 	required = sizeof(*mdev->chroots) * mdev->dma_channels;
mdev              242 drivers/dma/qcom/hidma_mgmt_sys.c 	mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL);
mdev              243 drivers/dma/qcom/hidma_mgmt_sys.c 	if (!mdev->chroots)
mdev              246 drivers/dma/qcom/hidma_mgmt_sys.c 	chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj);
mdev              251 drivers/dma/qcom/hidma_mgmt_sys.c 	for (i = 0; i < mdev->dma_channels; i++) {
mdev              255 drivers/dma/qcom/hidma_mgmt_sys.c 		mdev->chroots[i] = kobject_create_and_add(name, chanops);
mdev              256 drivers/dma/qcom/hidma_mgmt_sys.c 		if (!mdev->chroots[i])
mdev              262 drivers/dma/qcom/hidma_mgmt_sys.c 		rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name,
mdev              269 drivers/dma/qcom/hidma_mgmt_sys.c 	for (i = 0; i < mdev->dma_channels; i++) {
mdev              270 drivers/dma/qcom/hidma_mgmt_sys.c 		rc = create_sysfs_entry_channel(mdev, "priority",
mdev              272 drivers/dma/qcom/hidma_mgmt_sys.c 						mdev->chroots[i]);
mdev              276 drivers/dma/qcom/hidma_mgmt_sys.c 		rc = create_sysfs_entry_channel(mdev, "weight",
mdev              278 drivers/dma/qcom/hidma_mgmt_sys.c 						mdev->chroots[i]);
mdev               61 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_device *mdev;
mdev              109 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_device *mdev = mc->mdev;
mdev              141 drivers/dma/uniphier-mdmac.c 	writel(BIT(mc->chan_id), mdev->reg_base + UNIPHIER_MDMAC_CMD);
mdev              157 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_device *mdev = mc->mdev;
mdev              165 drivers/dma/uniphier-mdmac.c 	       mdev->reg_base + UNIPHIER_MDMAC_CMD);
mdev              348 drivers/dma/uniphier-mdmac.c 				    struct uniphier_mdmac_device *mdev,
mdev              352 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_chan *mc = &mdev->channels[chan_id];
mdev              370 drivers/dma/uniphier-mdmac.c 	mc->mdev = mdev;
mdev              371 drivers/dma/uniphier-mdmac.c 	mc->reg_ch_base = mdev->reg_base + UNIPHIER_MDMAC_CH_OFFSET +
mdev              375 drivers/dma/uniphier-mdmac.c 	vchan_init(&mc->vc, &mdev->ddev);
mdev              383 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_device *mdev;
mdev              396 drivers/dma/uniphier-mdmac.c 	mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
mdev              398 drivers/dma/uniphier-mdmac.c 	if (!mdev)
mdev              402 drivers/dma/uniphier-mdmac.c 	mdev->reg_base = devm_ioremap_resource(dev, res);
mdev              403 drivers/dma/uniphier-mdmac.c 	if (IS_ERR(mdev->reg_base))
mdev              404 drivers/dma/uniphier-mdmac.c 		return PTR_ERR(mdev->reg_base);
mdev              406 drivers/dma/uniphier-mdmac.c 	mdev->clk = devm_clk_get(dev, NULL);
mdev              407 drivers/dma/uniphier-mdmac.c 	if (IS_ERR(mdev->clk)) {
mdev              409 drivers/dma/uniphier-mdmac.c 		return PTR_ERR(mdev->clk);
mdev              412 drivers/dma/uniphier-mdmac.c 	ret = clk_prepare_enable(mdev->clk);
mdev              416 drivers/dma/uniphier-mdmac.c 	ddev = &mdev->ddev;
mdev              432 drivers/dma/uniphier-mdmac.c 		ret = uniphier_mdmac_chan_init(pdev, mdev, i);
mdev              446 drivers/dma/uniphier-mdmac.c 	platform_set_drvdata(pdev, mdev);
mdev              453 drivers/dma/uniphier-mdmac.c 	clk_disable_unprepare(mdev->clk);
mdev              460 drivers/dma/uniphier-mdmac.c 	struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev);
mdev              471 drivers/dma/uniphier-mdmac.c 	list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
mdev              479 drivers/dma/uniphier-mdmac.c 	dma_async_device_unregister(&mdev->ddev);
mdev              480 drivers/dma/uniphier-mdmac.c 	clk_disable_unprepare(mdev->clk);
mdev               63 drivers/edac/i10nm_base.c 	struct pci_dev *mdev;
mdev               90 drivers/edac/i10nm_base.c 			mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
mdev               92 drivers/edac/i10nm_base.c 			if (i == 0 && !mdev) {
mdev               96 drivers/edac/i10nm_base.c 			if (!mdev)
mdev               99 drivers/edac/i10nm_base.c 			d->imc[i].mdev = mdev;
mdev              280 drivers/edac/i10nm_base.c 			if (!d->imc[i].mdev)
mdev              288 drivers/edac/i10nm_base.c 			rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
mdev              634 drivers/edac/skx_common.c 			if (d->imc[i].mdev)
mdev              635 drivers/edac/skx_common.c 				pci_dev_put(d->imc[i].mdev);
mdev               60 drivers/edac/skx_common.h 		struct pci_dev *mdev; /* for i10nm CPU */
mdev               45 drivers/fsi/fsi-occ.c 	struct miscdevice mdev;
mdev               49 drivers/fsi/fsi-occ.c #define to_occ(x)	container_of((x), struct occ, mdev)
mdev               74 drivers/fsi/fsi-occ.c 	struct miscdevice *mdev = file->private_data;
mdev               75 drivers/fsi/fsi-occ.c 	struct occ *occ = to_occ(mdev);
mdev              544 drivers/fsi/fsi-occ.c 	occ->mdev.fops = &occ_fops;
mdev              545 drivers/fsi/fsi-occ.c 	occ->mdev.minor = MISC_DYNAMIC_MINOR;
mdev              546 drivers/fsi/fsi-occ.c 	occ->mdev.name = occ->name;
mdev              547 drivers/fsi/fsi-occ.c 	occ->mdev.parent = dev;
mdev              549 drivers/fsi/fsi-occ.c 	rc = misc_register(&occ->mdev);
mdev              568 drivers/fsi/fsi-occ.c 	misc_deregister(&occ->mdev);
mdev              130 drivers/gpio/gpio-menz127.c static int men_z127_probe(struct mcb_device *mdev,
mdev              134 drivers/gpio/gpio-menz127.c 	struct device *dev = &mdev->dev;
mdev              142 drivers/gpio/gpio-menz127.c 	men_z127_gpio->mem = mcb_request_mem(mdev, dev_name(dev));
mdev              155 drivers/gpio/gpio-menz127.c 	mcb_set_drvdata(mdev, men_z127_gpio);
mdev              157 drivers/gpio/gpio-menz127.c 	ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
mdev              185 drivers/gpio/gpio-menz127.c static void men_z127_remove(struct mcb_device *mdev)
mdev              187 drivers/gpio/gpio-menz127.c 	struct men_z127_gpio *men_z127_gpio = mcb_get_drvdata(mdev);
mdev              167 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
mdev              169 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev              207 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static int d71_enable_irq(struct komeda_dev *mdev)
mdev              209 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev              227 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static int d71_disable_irq(struct komeda_dev *mdev)
mdev              229 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev              246 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
mdev              248 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev              272 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
mdev              274 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev              286 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static void d71_flush(struct komeda_dev *mdev,
mdev              289 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev              327 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static void d71_cleanup(struct komeda_dev *mdev)
mdev              329 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev              334 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	devm_kfree(mdev->dev, d71);
mdev              335 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	mdev->chip_data = NULL;
mdev              338 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static int d71_enum_resources(struct komeda_dev *mdev)
mdev              347 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
mdev              351 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	mdev->chip_data = d71;
mdev              352 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	d71->mdev = mdev;
mdev              353 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	d71->gcu_addr = mdev->reg_base;
mdev              354 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
mdev              392 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 		pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
mdev              405 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 		blk_base = mdev->reg_base + (offset >> 2);
mdev              424 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	d71_cleanup(mdev);
mdev              507 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static void d71_init_fmt_tbl(struct komeda_dev *mdev)
mdev              509 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct komeda_format_caps_table *table = &mdev->fmt_tbl;
mdev              516 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static int d71_connect_iommu(struct komeda_dev *mdev)
mdev              518 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev              543 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c static int d71_disconnect_iommu(struct komeda_dev *mdev)
mdev              545 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c 	struct d71_dev *d71 = mdev->chip_data;
mdev               25 drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h 	struct komeda_dev *mdev;
mdev               84 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
mdev               91 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mutex_lock(&mdev->lock);
mdev               93 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	new_mode = mdev->dpmode | BIT(master->id);
mdev               94 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	if (WARN_ON(new_mode == mdev->dpmode)) {
mdev               99 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	err = mdev->funcs->change_opmode(mdev, new_mode);
mdev              102 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 			  mdev->dpmode, new_mode);
mdev              106 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mdev->dpmode = new_mode;
mdev              113 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 		err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st));
mdev              116 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 		err = clk_prepare_enable(mdev->aclk);
mdev              129 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mutex_unlock(&mdev->lock);
mdev              137 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
mdev              142 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mutex_lock(&mdev->lock);
mdev              144 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	new_mode = mdev->dpmode & (~BIT(master->id));
mdev              146 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	if (WARN_ON(new_mode == mdev->dpmode)) {
mdev              151 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	err = mdev->funcs->change_opmode(mdev, new_mode);
mdev              154 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 			  mdev->dpmode, new_mode);
mdev              158 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mdev->dpmode = new_mode;
mdev              162 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 		clk_disable_unprepare(mdev->aclk);
mdev              165 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mutex_unlock(&mdev->lock);
mdev              222 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
mdev              244 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mdev->funcs->flush(mdev, master->id, kcrtc_st->active_pipes);
mdev              263 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	struct komeda_dev *mdev = crtc->dev->dev_private;
mdev              308 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mdev->funcs->flush(mdev, master->id, 0);
mdev              357 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	struct komeda_dev *mdev = crtc->dev->dev_private;
mdev              363 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	return clk_round_rate(mdev->aclk, min_aclk);
mdev              369 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	struct komeda_dev *mdev = crtc->dev->dev_private;
mdev              388 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	if (clk_round_rate(mdev->aclk, min_aclk) < min_aclk) {
mdev              477 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	struct komeda_dev *mdev = crtc->dev->dev_private;
mdev              480 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, true);
mdev              486 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	struct komeda_dev *mdev = crtc->dev->dev_private;
mdev              489 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, false);
mdev              505 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 			   struct komeda_dev *mdev)
mdev              514 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 	for (i = 0; i < mdev->n_pipelines; i++) {
mdev              516 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c 		master = mdev->pipelines[i];
mdev              574 drivers/gpu/drm/arm/display/komeda/komeda_crtc.c int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev)
mdev               25 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	struct komeda_dev *mdev = sf->private;
mdev               28 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (mdev->funcs->dump_register)
mdev               29 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		mdev->funcs->dump_register(mdev, sf);
mdev               31 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	for (i = 0; i < mdev->n_pipelines; i++)
mdev               32 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		komeda_pipeline_dump_register(mdev->pipelines[i], sf);
mdev               51 drivers/gpu/drm/arm/display/komeda/komeda_dev.c static void komeda_debugfs_init(struct komeda_dev *mdev)
mdev               56 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
mdev               57 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	debugfs_create_file("register", 0444, mdev->debugfs_root,
mdev               58 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 			    mdev, &komeda_register_fops);
mdev               65 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	struct komeda_dev *mdev = dev_to_mdev(dev);
mdev               67 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	return snprintf(buf, PAGE_SIZE, "0x%08x\n", mdev->chip.core_id);
mdev               74 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	struct komeda_dev *mdev = dev_to_mdev(dev);
mdev               75 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	struct komeda_pipeline *pipe = mdev->pipelines[0];
mdev               82 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	config_id.n_pipelines = mdev->n_pipelines;
mdev              104 drivers/gpu/drm/arm/display/komeda/komeda_dev.c static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
mdev              112 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (ret != 0 || pipe_id >= mdev->n_pipelines)
mdev              115 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	pipe = mdev->pipelines[pipe_id];
mdev              138 drivers/gpu/drm/arm/display/komeda/komeda_dev.c static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
mdev              144 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->irq  = platform_get_irq(pdev, 0);
mdev              145 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (mdev->irq < 0) {
mdev              147 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		return mdev->irq;
mdev              158 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 			ret = komeda_parse_pipe_dt(mdev, child);
mdev              174 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	struct komeda_dev *mdev;
mdev              188 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev = devm_kzalloc(dev, sizeof(*mdev), GFP_KERNEL);
mdev              189 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (!mdev)
mdev              192 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mutex_init(&mdev->lock);
mdev              194 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->dev = dev;
mdev              195 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->reg_base = devm_ioremap_resource(dev, io_res);
mdev              196 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (IS_ERR(mdev->reg_base)) {
mdev              198 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		err = PTR_ERR(mdev->reg_base);
mdev              199 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		mdev->reg_base = NULL;
mdev              203 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->aclk = devm_clk_get(dev, "aclk");
mdev              204 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (IS_ERR(mdev->aclk)) {
mdev              206 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		err = PTR_ERR(mdev->aclk);
mdev              207 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		mdev->aclk = NULL;
mdev              211 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	clk_prepare_enable(mdev->aclk);
mdev              213 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->funcs = product->identify(mdev->reg_base, &mdev->chip);
mdev              214 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (!komeda_product_match(mdev, product->product_id)) {
mdev              217 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 			  MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id));
mdev              223 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		 MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id),
mdev              224 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		 MALIDP_CORE_ID_MAJOR(mdev->chip.core_id),
mdev              225 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		 MALIDP_CORE_ID_MINOR(mdev->chip.core_id));
mdev              227 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->funcs->init_format_table(mdev);
mdev              229 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	err = mdev->funcs->enum_resources(mdev);
mdev              235 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	err = komeda_parse_dt(dev, mdev);
mdev              241 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	err = komeda_assemble_pipelines(mdev);
mdev              247 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	dev->dma_parms = &mdev->dma_parms;
mdev              250 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->iommu = iommu_get_domain_for_dev(mdev->dev);
mdev              251 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (!mdev->iommu)
mdev              254 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (mdev->iommu && mdev->funcs->connect_iommu) {
mdev              255 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		err = mdev->funcs->connect_iommu(mdev);
mdev              257 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 			mdev->iommu = NULL;
mdev              269 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	komeda_debugfs_init(mdev);
mdev              272 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	return mdev;
mdev              275 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	komeda_dev_destroy(mdev);
mdev              279 drivers/gpu/drm/arm/display/komeda/komeda_dev.c void komeda_dev_destroy(struct komeda_dev *mdev)
mdev              281 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	struct device *dev = mdev->dev;
mdev              282 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	const struct komeda_dev_funcs *funcs = mdev->funcs;
mdev              288 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	debugfs_remove_recursive(mdev->debugfs_root);
mdev              291 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (mdev->iommu && mdev->funcs->disconnect_iommu)
mdev              292 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		mdev->funcs->disconnect_iommu(mdev);
mdev              293 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->iommu = NULL;
mdev              295 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	for (i = 0; i < mdev->n_pipelines; i++) {
mdev              296 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
mdev              297 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		mdev->pipelines[i] = NULL;
mdev              300 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	mdev->n_pipelines = 0;
mdev              305 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		funcs->cleanup(mdev);
mdev              307 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (mdev->reg_base) {
mdev              308 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		devm_iounmap(dev, mdev->reg_base);
mdev              309 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		mdev->reg_base = NULL;
mdev              312 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	if (mdev->aclk) {
mdev              313 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		clk_disable_unprepare(mdev->aclk);
mdev              314 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		devm_clk_put(dev, mdev->aclk);
mdev              315 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 		mdev->aclk = NULL;
mdev              318 drivers/gpu/drm/arm/display/komeda/komeda_dev.c 	devm_kfree(dev, mdev);
mdev               86 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	void (*init_format_table)(struct komeda_dev *mdev);
mdev               92 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	int (*enum_resources)(struct komeda_dev *mdev);
mdev               94 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	void (*cleanup)(struct komeda_dev *mdev);
mdev               96 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	int (*connect_iommu)(struct komeda_dev *mdev);
mdev               98 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	int (*disconnect_iommu)(struct komeda_dev *mdev);
mdev              104 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	irqreturn_t (*irq_handler)(struct komeda_dev *mdev,
mdev              107 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	int (*enable_irq)(struct komeda_dev *mdev);
mdev              109 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	int (*disable_irq)(struct komeda_dev *mdev);
mdev              111 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	void (*on_off_vblank)(struct komeda_dev *mdev,
mdev              115 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	void (*dump_register)(struct komeda_dev *mdev, struct seq_file *seq);
mdev              121 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	int (*change_opmode)(struct komeda_dev *mdev, int new_mode);
mdev              123 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	void (*flush)(struct komeda_dev *mdev,
mdev              197 drivers/gpu/drm/arm/display/komeda/komeda_dev.h komeda_product_match(struct komeda_dev *mdev, u32 target)
mdev              199 drivers/gpu/drm/arm/display/komeda/komeda_dev.h 	return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
mdev              206 drivers/gpu/drm/arm/display/komeda/komeda_dev.h void komeda_dev_destroy(struct komeda_dev *mdev);
mdev               16 drivers/gpu/drm/arm/display/komeda/komeda_drv.c 	struct komeda_dev *mdev;
mdev               24 drivers/gpu/drm/arm/display/komeda/komeda_drv.c 	return mdrv ? mdrv->mdev : NULL;
mdev               35 drivers/gpu/drm/arm/display/komeda/komeda_drv.c 	komeda_dev_destroy(mdrv->mdev);
mdev               50 drivers/gpu/drm/arm/display/komeda/komeda_drv.c 	mdrv->mdev = komeda_dev_create(dev);
mdev               51 drivers/gpu/drm/arm/display/komeda/komeda_drv.c 	if (IS_ERR(mdrv->mdev)) {
mdev               52 drivers/gpu/drm/arm/display/komeda/komeda_drv.c 		err = PTR_ERR(mdrv->mdev);
mdev               56 drivers/gpu/drm/arm/display/komeda/komeda_drv.c 	mdrv->kms = komeda_kms_attach(mdrv->mdev);
mdev               67 drivers/gpu/drm/arm/display/komeda/komeda_drv.c 	komeda_dev_destroy(mdrv->mdev);
mdev              111 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
mdev              133 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c 		if ((fb->pitches[i] * block_h) % mdev->chip.bus_width) {
mdev              135 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c 				      i, fb->pitches[i], mdev->chip.bus_width);
mdev              162 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c 	struct komeda_dev *mdev = dev->dev_private;
mdev              170 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c 	kfb->format_caps = komeda_get_format_caps(&mdev->fmt_tbl,
mdev              185 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c 		ret = komeda_fb_none_afbc_size_check(mdev, kfb, file, mode_cmd);
mdev              196 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c 	kfb->is_va = mdev->iommu ? true : false;
mdev              271 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c 	struct komeda_dev *mdev = fb->dev->dev_private;
mdev              276 drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c 	supported = komeda_format_mod_supported(&mdev->fmt_tbl, layer_type,
mdev               30 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	struct komeda_dev *mdev = dev->dev_private;
mdev               33 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	args->pitch = ALIGN(pitch, mdev->chip.bus_width);
mdev               41 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	struct komeda_dev *mdev = drm->dev_private;
mdev               49 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	status = mdev->funcs->irq_handler(mdev, &evts);
mdev              240 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 					struct komeda_dev *mdev)
mdev              246 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	komeda_kms_setup_crtcs(kms, mdev);
mdev              259 drivers/gpu/drm/arm/display/komeda/komeda_kms.c struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
mdev              269 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev);
mdev              273 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	drm->dev_private = mdev;
mdev              275 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	komeda_kms_mode_config_init(kms, mdev);
mdev              277 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	err = komeda_kms_add_private_objs(kms, mdev);
mdev              281 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	err = komeda_kms_add_planes(kms, mdev);
mdev              289 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	err = komeda_kms_add_crtcs(kms, mdev);
mdev              293 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	err = komeda_kms_add_wb_connectors(kms, mdev);
mdev              297 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	err = component_bind_all(mdev->dev, kms);
mdev              303 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	err = devm_request_irq(drm->dev, mdev->irq,
mdev              309 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	err = mdev->funcs->enable_irq(mdev);
mdev              326 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	mdev->funcs->disable_irq(mdev);
mdev              328 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	component_unbind_all(mdev->dev, drm);
mdev              342 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	struct komeda_dev *mdev = drm->dev_private;
mdev              348 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	mdev->funcs->disable_irq(mdev);
mdev              349 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	component_unbind_all(mdev->dev, drm);
mdev              171 drivers/gpu/drm/arm/display/komeda/komeda_kms.h int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
mdev              173 drivers/gpu/drm/arm/display/komeda/komeda_kms.h int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
mdev              174 drivers/gpu/drm/arm/display/komeda/komeda_kms.h int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
mdev              176 drivers/gpu/drm/arm/display/komeda/komeda_kms.h 				struct komeda_dev *mdev);
mdev              178 drivers/gpu/drm/arm/display/komeda/komeda_kms.h 				 struct komeda_dev *mdev);
mdev              184 drivers/gpu/drm/arm/display/komeda/komeda_kms.h struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
mdev               14 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
mdev               19 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) {
mdev               30 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL);
mdev               34 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	pipe->mdev = mdev;
mdev               35 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	pipe->id   = mdev->n_pipelines;
mdev               38 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	mdev->pipelines[mdev->n_pipelines] = pipe;
mdev               39 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	mdev->n_pipelines++;
mdev               44 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c void komeda_pipeline_destroy(struct komeda_dev *mdev,
mdev               52 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 		komeda_component_destroy(mdev, c);
mdev               62 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	devm_kfree(mdev->dev, pipe);
mdev               68 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	struct komeda_dev *mdev = pipe->mdev;
mdev               84 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 		temp = mdev->pipelines[id - KOMEDA_COMPONENT_COMPIZ0];
mdev              103 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 		temp = mdev->pipelines[id - KOMEDA_COMPONENT_IPS0];
mdev              196 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL);
mdev              226 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c void komeda_component_destroy(struct komeda_dev *mdev,
mdev              229 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	devm_kfree(mdev->dev, c);
mdev              337 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c int komeda_assemble_pipelines(struct komeda_dev *mdev)
mdev              342 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 	for (i = 0; i < mdev->n_pipelines; i++) {
mdev              343 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 		pipe = mdev->pipelines[i];
mdev              385 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h 	struct komeda_dev *mdev;
mdev              468 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
mdev              470 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h void komeda_pipeline_destroy(struct komeda_dev *mdev,
mdev              474 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h int komeda_assemble_pipelines(struct komeda_dev *mdev);
mdev              494 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h void komeda_component_destroy(struct komeda_dev *mdev,
mdev              187 drivers/gpu/drm/arm/display/komeda/komeda_plane.c 	struct komeda_dev *mdev = plane->dev->dev_private;
mdev              191 drivers/gpu/drm/arm/display/komeda/komeda_plane.c 	return komeda_format_mod_supported(&mdev->fmt_tbl, layer_type,
mdev              251 drivers/gpu/drm/arm/display/komeda/komeda_plane.c 	struct komeda_dev *mdev = kms->base.dev_private;
mdev              265 drivers/gpu/drm/arm/display/komeda/komeda_plane.c 	formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
mdev              321 drivers/gpu/drm/arm/display/komeda/komeda_plane.c int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev)
mdev              326 drivers/gpu/drm/arm/display/komeda/komeda_plane.c 	for (i = 0; i < mdev->n_pipelines; i++) {
mdev              327 drivers/gpu/drm/arm/display/komeda/komeda_plane.c 		pipe = mdev->pipelines[i];
mdev              367 drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c 				struct komeda_dev *mdev)
mdev              372 drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c 	for (i = 0; i < mdev->n_pipelines; i++) {
mdev              373 drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c 		pipe = mdev->pipelines[i];
mdev              141 drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c 	struct komeda_dev *mdev = kms->base.dev_private;
mdev              159 drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c 	formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
mdev              181 drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c 				 struct komeda_dev *mdev)
mdev              201 drivers/gpu/drm/i915/gvt/gvt.h 		struct mdev_device *mdev;
mdev              116 drivers/gpu/drm/i915/gvt/kvmgt.c static int kvmgt_guest_init(struct mdev_device *mdev);
mdev              132 drivers/gpu/drm/i915/gvt/kvmgt.c 		ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
mdev              155 drivers/gpu/drm/i915/gvt/kvmgt.c 		ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
mdev              571 drivers/gpu/drm/i915/gvt/kvmgt.c 		mdev_dev(vgpu->vdev.mdev));
mdev              646 drivers/gpu/drm/i915/gvt/kvmgt.c static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
mdev              654 drivers/gpu/drm/i915/gvt/kvmgt.c 	pdev = mdev_parent_dev(mdev);
mdev              674 drivers/gpu/drm/i915/gvt/kvmgt.c 	vgpu->vdev.mdev = mdev;
mdev              675 drivers/gpu/drm/i915/gvt/kvmgt.c 	mdev_set_drvdata(mdev, vgpu);
mdev              678 drivers/gpu/drm/i915/gvt/kvmgt.c 		     dev_name(mdev_dev(mdev)));
mdev              685 drivers/gpu/drm/i915/gvt/kvmgt.c static int intel_vgpu_remove(struct mdev_device *mdev)
mdev              687 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
mdev              745 drivers/gpu/drm/i915/gvt/kvmgt.c static int intel_vgpu_open(struct mdev_device *mdev)
mdev              747 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
mdev              755 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
mdev              764 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
mdev              778 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = kvmgt_guest_init(mdev);
mdev              788 drivers/gpu/drm/i915/gvt/kvmgt.c 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
mdev              792 drivers/gpu/drm/i915/gvt/kvmgt.c 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
mdev              822 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
mdev              826 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
mdev              842 drivers/gpu/drm/i915/gvt/kvmgt.c static void intel_vgpu_release(struct mdev_device *mdev)
mdev              844 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
mdev              932 drivers/gpu/drm/i915/gvt/kvmgt.c static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
mdev              935 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
mdev              981 drivers/gpu/drm/i915/gvt/kvmgt.c static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
mdev              983 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
mdev             1000 drivers/gpu/drm/i915/gvt/kvmgt.c static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
mdev             1011 drivers/gpu/drm/i915/gvt/kvmgt.c 			gtt_entry(mdev, ppos)) {
mdev             1014 drivers/gpu/drm/i915/gvt/kvmgt.c 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
mdev             1026 drivers/gpu/drm/i915/gvt/kvmgt.c 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
mdev             1038 drivers/gpu/drm/i915/gvt/kvmgt.c 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
mdev             1050 drivers/gpu/drm/i915/gvt/kvmgt.c 			ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
mdev             1073 drivers/gpu/drm/i915/gvt/kvmgt.c static ssize_t intel_vgpu_write(struct mdev_device *mdev,
mdev             1085 drivers/gpu/drm/i915/gvt/kvmgt.c 			gtt_entry(mdev, ppos)) {
mdev             1091 drivers/gpu/drm/i915/gvt/kvmgt.c 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
mdev             1103 drivers/gpu/drm/i915/gvt/kvmgt.c 			ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
mdev             1115 drivers/gpu/drm/i915/gvt/kvmgt.c 			ret = intel_vgpu_rw(mdev, (char *)&val,
mdev             1127 drivers/gpu/drm/i915/gvt/kvmgt.c 			ret = intel_vgpu_rw(mdev, &val, sizeof(val),
mdev             1146 drivers/gpu/drm/i915/gvt/kvmgt.c static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
mdev             1152 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
mdev             1275 drivers/gpu/drm/i915/gvt/kvmgt.c static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
mdev             1278 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
mdev             1557 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev             1559 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (mdev) {
mdev             1561 drivers/gpu/drm/i915/gvt/kvmgt.c 			mdev_get_drvdata(mdev);
mdev             1571 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev             1573 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (mdev) {
mdev             1575 drivers/gpu/drm/i915/gvt/kvmgt.c 			mdev_get_drvdata(mdev);
mdev             1758 drivers/gpu/drm/i915/gvt/kvmgt.c static int kvmgt_guest_init(struct mdev_device *mdev)
mdev             1764 drivers/gpu/drm/i915/gvt/kvmgt.c 	vgpu = mdev_get_drvdata(mdev);
mdev               19 drivers/gpu/drm/mgag200/mgag200_cursor.c static void mga_hide_cursor(struct mga_device *mdev)
mdev               23 drivers/gpu/drm/mgag200/mgag200_cursor.c 	if (mdev->cursor.pixels_current)
mdev               24 drivers/gpu/drm/mgag200/mgag200_cursor.c 		drm_gem_vram_unpin(mdev->cursor.pixels_current);
mdev               25 drivers/gpu/drm/mgag200/mgag200_cursor.c 	mdev->cursor.pixels_current = NULL;
mdev               35 drivers/gpu/drm/mgag200/mgag200_cursor.c 	struct mga_device *mdev = (struct mga_device *)dev->dev_private;
mdev               36 drivers/gpu/drm/mgag200/mgag200_cursor.c 	struct drm_gem_vram_object *pixels_1 = mdev->cursor.pixels_1;
mdev               37 drivers/gpu/drm/mgag200/mgag200_cursor.c 	struct drm_gem_vram_object *pixels_2 = mdev->cursor.pixels_2;
mdev               38 drivers/gpu/drm/mgag200/mgag200_cursor.c 	struct drm_gem_vram_object *pixels_current = mdev->cursor.pixels_current;
mdev               69 drivers/gpu/drm/mgag200/mgag200_cursor.c 		mga_hide_cursor(mdev);
mdev              213 drivers/gpu/drm/mgag200/mgag200_cursor.c 	mdev->cursor.pixels_current = pixels_next;
mdev              237 drivers/gpu/drm/mgag200/mgag200_cursor.c 	struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
mdev               67 drivers/gpu/drm/mgag200/mgag200_drv.c static bool mgag200_pin_bo_at_0(const struct mga_device *mdev)
mdev               69 drivers/gpu/drm/mgag200/mgag200_drv.c 	return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD;
mdev               76 drivers/gpu/drm/mgag200/mgag200_drv.c 	struct mga_device *mdev = dev->dev_private;
mdev               89 drivers/gpu/drm/mgag200/mgag200_drv.c 	if (mgag200_pin_bo_at_0(mdev))
mdev               90 drivers/gpu/drm/mgag200/mgag200_drv.c 		pg_align = PFN_UP(mdev->mc.vram_size);
mdev               38 drivers/gpu/drm/mgag200/mgag200_drv.h #define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
mdev               39 drivers/gpu/drm/mgag200/mgag200_drv.h #define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
mdev               40 drivers/gpu/drm/mgag200/mgag200_drv.h #define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
mdev               41 drivers/gpu/drm/mgag200/mgag200_drv.h #define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
mdev              168 drivers/gpu/drm/mgag200/mgag200_drv.h #define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
mdev              210 drivers/gpu/drm/mgag200/mgag200_drv.h int mgag200_modeset_init(struct mga_device *mdev);
mdev              211 drivers/gpu/drm/mgag200/mgag200_drv.h void mgag200_modeset_fini(struct mga_device *mdev);
mdev              221 drivers/gpu/drm/mgag200/mgag200_drv.h int mgag200_mm_init(struct mga_device *mdev);
mdev              222 drivers/gpu/drm/mgag200/mgag200_drv.h void mgag200_mm_fini(struct mga_device *mdev);
mdev               37 drivers/gpu/drm/mgag200/mgag200_i2c.c static int mga_i2c_read_gpio(struct mga_device *mdev)
mdev               43 drivers/gpu/drm/mgag200/mgag200_i2c.c static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
mdev               53 drivers/gpu/drm/mgag200/mgag200_i2c.c static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
mdev               59 drivers/gpu/drm/mgag200/mgag200_i2c.c 	mga_i2c_set_gpio(mdev, ~mask, state);
mdev               65 drivers/gpu/drm/mgag200/mgag200_i2c.c 	struct mga_device *mdev = i2c->dev->dev_private;
mdev               66 drivers/gpu/drm/mgag200/mgag200_i2c.c 	mga_i2c_set(mdev, i2c->data, state);
mdev               72 drivers/gpu/drm/mgag200/mgag200_i2c.c 	struct mga_device *mdev = i2c->dev->dev_private;
mdev               73 drivers/gpu/drm/mgag200/mgag200_i2c.c 	mga_i2c_set(mdev, i2c->clock, state);
mdev               79 drivers/gpu/drm/mgag200/mgag200_i2c.c 	struct mga_device *mdev = i2c->dev->dev_private;
mdev               80 drivers/gpu/drm/mgag200/mgag200_i2c.c 	return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
mdev               86 drivers/gpu/drm/mgag200/mgag200_i2c.c 	struct mga_device *mdev = i2c->dev->dev_private;
mdev               87 drivers/gpu/drm/mgag200/mgag200_i2c.c 	return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
mdev               92 drivers/gpu/drm/mgag200/mgag200_i2c.c 	struct mga_device *mdev = dev->dev_private;
mdev              101 drivers/gpu/drm/mgag200/mgag200_i2c.c 	switch (mdev->type) {
mdev               21 drivers/gpu/drm/mgag200/mgag200_main.c static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
mdev               33 drivers/gpu/drm/mgag200/mgag200_main.c 	vram_size = mdev->mc.vram_window;
mdev               35 drivers/gpu/drm/mgag200/mgag200_main.c 	if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) {
mdev               66 drivers/gpu/drm/mgag200/mgag200_main.c static int mga_vram_init(struct mga_device *mdev)
mdev               71 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
mdev               72 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
mdev               74 drivers/gpu/drm/mgag200/mgag200_main.c 	if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
mdev               80 drivers/gpu/drm/mgag200/mgag200_main.c 	mem = pci_iomap(mdev->dev->pdev, 0, 0);
mdev               84 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->mc.vram_size = mga_probe_vram(mdev, mem);
mdev               86 drivers/gpu/drm/mgag200/mgag200_main.c 	pci_iounmap(mdev->dev->pdev, mem);
mdev               94 drivers/gpu/drm/mgag200/mgag200_main.c 	struct mga_device *mdev = dev->dev_private;
mdev               97 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->flags = mgag200_flags_from_driver_data(flags);
mdev               98 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->type = mgag200_type_from_driver_data(flags);
mdev              101 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->num_crtc = 1;
mdev              104 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->has_sdram = !(option & (1 << 14));
mdev              107 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
mdev              108 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
mdev              110 drivers/gpu/drm/mgag200/mgag200_main.c 	if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
mdev              116 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
mdev              117 drivers/gpu/drm/mgag200/mgag200_main.c 	if (mdev->rmmio == NULL)
mdev              121 drivers/gpu/drm/mgag200/mgag200_main.c 	if (IS_G200_SE(mdev))
mdev              122 drivers/gpu/drm/mgag200/mgag200_main.c 		mdev->unique_rev_id = RREG32(0x1e24);
mdev              124 drivers/gpu/drm/mgag200/mgag200_main.c 	ret = mga_vram_init(mdev);
mdev              128 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->bpp_shifts[0] = 0;
mdev              129 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->bpp_shifts[1] = 1;
mdev              130 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->bpp_shifts[2] = 0;
mdev              131 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->bpp_shifts[3] = 2;
mdev              143 drivers/gpu/drm/mgag200/mgag200_main.c 	struct mga_device *mdev;
mdev              146 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
mdev              147 drivers/gpu/drm/mgag200/mgag200_main.c 	if (mdev == NULL)
mdev              149 drivers/gpu/drm/mgag200/mgag200_main.c 	dev->dev_private = (void *)mdev;
mdev              150 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->dev = dev;
mdev              157 drivers/gpu/drm/mgag200/mgag200_main.c 	r = mgag200_mm_init(mdev);
mdev              163 drivers/gpu/drm/mgag200/mgag200_main.c 	if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
mdev              169 drivers/gpu/drm/mgag200/mgag200_main.c 	r = mgag200_modeset_init(mdev);
mdev              176 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->cursor.pixels_1 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
mdev              179 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->cursor.pixels_2 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
mdev              182 drivers/gpu/drm/mgag200/mgag200_main.c 	if (IS_ERR(mdev->cursor.pixels_2) || IS_ERR(mdev->cursor.pixels_1)) {
mdev              183 drivers/gpu/drm/mgag200/mgag200_main.c 		mdev->cursor.pixels_1 = NULL;
mdev              184 drivers/gpu/drm/mgag200/mgag200_main.c 		mdev->cursor.pixels_2 = NULL;
mdev              188 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->cursor.pixels_current = NULL;
mdev              190 drivers/gpu/drm/mgag200/mgag200_main.c 	r = drm_fbdev_generic_setup(mdev->dev, 0);
mdev              198 drivers/gpu/drm/mgag200/mgag200_main.c 	mgag200_mm_fini(mdev);
mdev              207 drivers/gpu/drm/mgag200/mgag200_main.c 	struct mga_device *mdev = dev->dev_private;
mdev              209 drivers/gpu/drm/mgag200/mgag200_main.c 	if (mdev == NULL)
mdev              211 drivers/gpu/drm/mgag200/mgag200_main.c 	mgag200_modeset_fini(mdev);
mdev              213 drivers/gpu/drm/mgag200/mgag200_main.c 	mgag200_mm_fini(mdev);
mdev               30 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = dev->dev_private;
mdev               76 drivers/gpu/drm/mgag200/mgag200_mode.c static inline void mga_wait_vsync(struct mga_device *mdev)
mdev               91 drivers/gpu/drm/mgag200/mgag200_mode.c static inline void mga_wait_busy(struct mga_device *mdev)
mdev              102 drivers/gpu/drm/mgag200/mgag200_mode.c static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
mdev              113 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->unique_rev_id <= 0x03) {
mdev              211 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->unique_rev_id >= 0x04) {
mdev              221 drivers/gpu/drm/mgag200/mgag200_mode.c static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
mdev              236 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_EW3) {
mdev              400 drivers/gpu/drm/mgag200/mgag200_mode.c static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
mdev              493 drivers/gpu/drm/mgag200/mgag200_mode.c static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
mdev              506 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_EH3) {
mdev              623 drivers/gpu/drm/mgag200/mgag200_mode.c static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
mdev              703 drivers/gpu/drm/mgag200/mgag200_mode.c static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
mdev              705 drivers/gpu/drm/mgag200/mgag200_mode.c 	switch(mdev->type) {
mdev              708 drivers/gpu/drm/mgag200/mgag200_mode.c 		return mga_g200se_set_plls(mdev, clock);
mdev              712 drivers/gpu/drm/mgag200/mgag200_mode.c 		return mga_g200wb_set_plls(mdev, clock);
mdev              715 drivers/gpu/drm/mgag200/mgag200_mode.c 		return mga_g200ev_set_plls(mdev, clock);
mdev              719 drivers/gpu/drm/mgag200/mgag200_mode.c 		return mga_g200eh_set_plls(mdev, clock);
mdev              722 drivers/gpu/drm/mgag200/mgag200_mode.c 		return mga_g200er_set_plls(mdev, clock);
mdev              730 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = crtc->dev->dev_private;
mdev              785 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = crtc->dev->dev_private;
mdev              835 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = crtc->dev->dev_private;
mdev              904 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = dev->dev_private;
mdev              928 drivers/gpu/drm/mgag200/mgag200_mode.c 	bppshift = mdev->bpp_shifts[fb->format->cpp[0] - 1];
mdev              930 drivers/gpu/drm/mgag200/mgag200_mode.c 	switch (mdev->type) {
mdev              938 drivers/gpu/drm/mgag200/mgag200_mode.c 		if (mdev->has_sdram)
mdev              999 drivers/gpu/drm/mgag200/mgag200_mode.c 		if (IS_G200_SE(mdev) &&
mdev             1002 drivers/gpu/drm/mgag200/mgag200_mode.c 		if ((mdev->type == G200_EV ||
mdev             1003 drivers/gpu/drm/mgag200/mgag200_mode.c 		    mdev->type == G200_WB ||
mdev             1004 drivers/gpu/drm/mgag200/mgag200_mode.c 		    mdev->type == G200_EH ||
mdev             1005 drivers/gpu/drm/mgag200/mgag200_mode.c 		    mdev->type == G200_EW3 ||
mdev             1006 drivers/gpu/drm/mgag200/mgag200_mode.c 		    mdev->type == G200_EH3) &&
mdev             1013 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_ER)
mdev             1108 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_WB || mdev->type == G200_EW3)
mdev             1115 drivers/gpu/drm/mgag200/mgag200_mode.c 	mga_crtc_set_plls(mdev, mode->clock);
mdev             1121 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_ER)
mdev             1124 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_EW3)
mdev             1127 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_EV) {
mdev             1138 drivers/gpu/drm/mgag200/mgag200_mode.c 		memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
mdev             1143 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_ER) {
mdev             1160 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (IS_G200_SE(mdev)) {
mdev             1161 drivers/gpu/drm/mgag200/mgag200_mode.c 		if  (mdev->unique_rev_id >= 0x04) {
mdev             1164 drivers/gpu/drm/mgag200/mgag200_mode.c 		} else if (mdev->unique_rev_id >= 0x02) {
mdev             1194 drivers/gpu/drm/mgag200/mgag200_mode.c 			if (mdev->unique_rev_id >= 0x01)
mdev             1208 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = dev->dev_private;
mdev             1212 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->suspended)
mdev             1231 drivers/gpu/drm/mgag200/mgag200_mode.c 	mdev->suspended = true;
mdev             1240 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = dev->dev_private;
mdev             1244 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (!mdev->suspended)
mdev             1255 drivers/gpu/drm/mgag200/mgag200_mode.c 	mdev->suspended = false;
mdev             1265 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = dev->dev_private;
mdev             1295 drivers/gpu/drm/mgag200/mgag200_mode.c 	mga_wait_vsync(mdev);
mdev             1296 drivers/gpu/drm/mgag200/mgag200_mode.c 	mga_wait_busy(mdev);
mdev             1304 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
mdev             1319 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = dev->dev_private;
mdev             1328 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
mdev             1342 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_WB || mdev->type == G200_EW3)
mdev             1355 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = dev->dev_private;
mdev             1359 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_WB || mdev->type == G200_EW3)
mdev             1362 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
mdev             1433 drivers/gpu/drm/mgag200/mgag200_mode.c static void mga_crtc_init(struct mga_device *mdev)
mdev             1444 drivers/gpu/drm/mgag200/mgag200_mode.c 	drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
mdev             1447 drivers/gpu/drm/mgag200/mgag200_mode.c 	mdev->mode_info.crtc = mga_crtc;
mdev             1567 drivers/gpu/drm/mgag200/mgag200_mode.c 	struct mga_device *mdev = (struct mga_device*)dev->dev_private;
mdev             1570 drivers/gpu/drm/mgag200/mgag200_mode.c 	if (IS_G200_SE(mdev)) {
mdev             1571 drivers/gpu/drm/mgag200/mgag200_mode.c 		if (mdev->unique_rev_id == 0x01) {
mdev             1579 drivers/gpu/drm/mgag200/mgag200_mode.c 		} else if (mdev->unique_rev_id == 0x02) {
mdev             1592 drivers/gpu/drm/mgag200/mgag200_mode.c 	} else if (mdev->type == G200_WB) {
mdev             1600 drivers/gpu/drm/mgag200/mgag200_mode.c 	} else if (mdev->type == G200_EV &&
mdev             1604 drivers/gpu/drm/mgag200/mgag200_mode.c 	} else if (mdev->type == G200_EH &&
mdev             1608 drivers/gpu/drm/mgag200/mgag200_mode.c 	} else if (mdev->type == G200_ER &&
mdev             1632 drivers/gpu/drm/mgag200/mgag200_mode.c 	if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
mdev             1698 drivers/gpu/drm/mgag200/mgag200_mode.c int mgag200_modeset_init(struct mga_device *mdev)
mdev             1703 drivers/gpu/drm/mgag200/mgag200_mode.c 	mdev->mode_info.mode_config_initialized = true;
mdev             1705 drivers/gpu/drm/mgag200/mgag200_mode.c 	mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
mdev             1706 drivers/gpu/drm/mgag200/mgag200_mode.c 	mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
mdev             1708 drivers/gpu/drm/mgag200/mgag200_mode.c 	mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
mdev             1710 drivers/gpu/drm/mgag200/mgag200_mode.c 	mga_crtc_init(mdev);
mdev             1712 drivers/gpu/drm/mgag200/mgag200_mode.c 	encoder = mga_encoder_init(mdev->dev);
mdev             1718 drivers/gpu/drm/mgag200/mgag200_mode.c 	connector = mga_vga_init(mdev->dev);
mdev             1729 drivers/gpu/drm/mgag200/mgag200_mode.c void mgag200_modeset_fini(struct mga_device *mdev)
mdev               33 drivers/gpu/drm/mgag200/mgag200_ttm.c int mgag200_mm_init(struct mga_device *mdev)
mdev               37 drivers/gpu/drm/mgag200/mgag200_ttm.c 	struct drm_device *dev = mdev->dev;
mdev               40 drivers/gpu/drm/mgag200/mgag200_ttm.c 				       mdev->mc.vram_size,
mdev               51 drivers/gpu/drm/mgag200/mgag200_ttm.c 	mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
mdev               57 drivers/gpu/drm/mgag200/mgag200_ttm.c void mgag200_mm_fini(struct mga_device *mdev)
mdev               59 drivers/gpu/drm/mgag200/mgag200_ttm.c 	struct drm_device *dev = mdev->dev;
mdev               65 drivers/gpu/drm/mgag200/mgag200_ttm.c 	arch_phys_wc_del(mdev->fb_mtrr);
mdev               66 drivers/gpu/drm/mgag200/mgag200_ttm.c 	mdev->fb_mtrr = 0;
mdev              154 drivers/hv/hv_utils_transport.c 	misc_deregister(&hvt->mdev);
mdev              284 drivers/hv/hv_utils_transport.c 	hvt->mdev.minor = MISC_DYNAMIC_MINOR;
mdev              285 drivers/hv/hv_utils_transport.c 	hvt->mdev.name = name;
mdev              294 drivers/hv/hv_utils_transport.c 	hvt->mdev.fops = &hvt->fops;
mdev              307 drivers/hv/hv_utils_transport.c 	if (misc_register(&hvt->mdev))
mdev               24 drivers/hv/hv_utils_transport.h 	struct miscdevice mdev;             /* misc device */
mdev               58 drivers/ide/pmac.c 	struct macio_dev		*mdev;
mdev              848 drivers/ide/pmac.c 	return pmif->mdev && pmif->mdev->media_bay != NULL;
mdev              947 drivers/ide/pmac.c 		if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
mdev             1070 drivers/ide/pmac.c 	if (pmif->mdev)
mdev             1071 drivers/ide/pmac.c 		lock_media_bay(pmif->mdev->media_bay);
mdev             1101 drivers/ide/pmac.c 	       pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
mdev             1108 drivers/ide/pmac.c 	if (pmif->mdev)
mdev             1109 drivers/ide/pmac.c 		unlock_media_bay(pmif->mdev->media_bay);
mdev             1130 drivers/ide/pmac.c static int pmac_ide_macio_attach(struct macio_dev *mdev,
mdev             1143 drivers/ide/pmac.c 	if (macio_resource_count(mdev) == 0) {
mdev             1145 drivers/ide/pmac.c 				    mdev->ofdev.dev.of_node);
mdev             1151 drivers/ide/pmac.c 	if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
mdev             1153 drivers/ide/pmac.c 				"%pOF!\n", mdev->ofdev.dev.of_node);
mdev             1163 drivers/ide/pmac.c 	if (macio_irq_count(mdev) == 0) {
mdev             1165 drivers/ide/pmac.c 				    "13\n", mdev->ofdev.dev.of_node);
mdev             1168 drivers/ide/pmac.c 		irq = macio_irq(mdev, 0);
mdev             1170 drivers/ide/pmac.c 	base = ioremap(macio_resource_start(mdev, 0), 0x400);
mdev             1173 drivers/ide/pmac.c 	pmif->mdev = mdev;
mdev             1174 drivers/ide/pmac.c 	pmif->node = mdev->ofdev.dev.of_node;
mdev             1179 drivers/ide/pmac.c 	if (macio_resource_count(mdev) >= 2) {
mdev             1180 drivers/ide/pmac.c 		if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
mdev             1183 drivers/ide/pmac.c 					    mdev->ofdev.dev.of_node);
mdev             1185 drivers/ide/pmac.c 			pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
mdev             1189 drivers/ide/pmac.c 	dev_set_drvdata(&mdev->ofdev.dev, pmif);
mdev             1194 drivers/ide/pmac.c 	hw.dev = &mdev->bus->pdev->dev;
mdev             1195 drivers/ide/pmac.c 	hw.parent = &mdev->ofdev.dev;
mdev             1200 drivers/ide/pmac.c 		dev_set_drvdata(&mdev->ofdev.dev, NULL);
mdev             1204 drivers/ide/pmac.c 			macio_release_resource(mdev, 1);
mdev             1206 drivers/ide/pmac.c 		macio_release_resource(mdev, 0);
mdev             1218 drivers/ide/pmac.c pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
mdev             1220 drivers/ide/pmac.c 	pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
mdev             1223 drivers/ide/pmac.c 	if (mesg.event != mdev->ofdev.dev.power.power_state.event
mdev             1227 drivers/ide/pmac.c 			mdev->ofdev.dev.power.power_state = mesg;
mdev             1234 drivers/ide/pmac.c pmac_ide_macio_resume(struct macio_dev *mdev)
mdev             1236 drivers/ide/pmac.c 	pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
mdev             1239 drivers/ide/pmac.c 	if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
mdev             1242 drivers/ide/pmac.c 			mdev->ofdev.dev.power.power_state = PMSG_ON;
mdev             1286 drivers/ide/pmac.c 	pmif->mdev = NULL;
mdev             1352 drivers/ide/pmac.c static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
mdev             1354 drivers/ide/pmac.c 	pmac_ide_hwif_t *pmif = dev_get_drvdata(&mdev->ofdev.dev);
mdev              887 drivers/infiniband/hw/mlx4/cq.c 	struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
mdev              890 drivers/infiniband/hw/mlx4/cq.c 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mdev              757 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
mdev              758 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_iboe *iboe = &mdev->iboe;
mdev              763 drivers/infiniband/hw/mlx4/main.c 	int is_bonded = mlx4_is_bonded(mdev->dev);
mdev              765 drivers/infiniband/hw/mlx4/main.c 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
mdev              769 drivers/infiniband/hw/mlx4/main.c 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
mdev              782 drivers/infiniband/hw/mlx4/main.c 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
mdev              783 drivers/infiniband/hw/mlx4/main.c 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
mdev              809 drivers/infiniband/hw/mlx4/main.c 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mdev              939 drivers/infiniband/hw/mlx4/main.c static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
mdev              945 drivers/infiniband/hw/mlx4/main.c 	for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
mdev              946 drivers/infiniband/hw/mlx4/main.c 		if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
mdev              948 drivers/infiniband/hw/mlx4/main.c 		err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
mdev              954 drivers/infiniband/hw/mlx4/main.c 		atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
mdev             1061 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
mdev             1062 drivers/infiniband/hw/mlx4/main.c 	u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
mdev             1074 drivers/infiniband/hw/mlx4/main.c 	mutex_lock(&mdev->cap_mask_mutex);
mdev             1083 drivers/infiniband/hw/mlx4/main.c 	err = mlx4_ib_SET_PORT(mdev, port,
mdev             1271 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
mdev             1279 drivers/infiniband/hw/mlx4/main.c 	if (mlx4_ib_add_mc(mdev, mqp, gid)) {
mdev             1307 drivers/infiniband/hw/mlx4/main.c int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
mdev             1316 drivers/infiniband/hw/mlx4/main.c 	spin_lock_bh(&mdev->iboe.lock);
mdev             1317 drivers/infiniband/hw/mlx4/main.c 	ndev = mdev->iboe.netdevs[mqp->port - 1];
mdev             1320 drivers/infiniband/hw/mlx4/main.c 	spin_unlock_bh(&mdev->iboe.lock);
mdev             1487 drivers/infiniband/hw/mlx4/main.c 		struct mlx4_ib_dev *mdev,
mdev             1512 drivers/infiniband/hw/mlx4/main.c 		ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
mdev             1533 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = to_mdev(qp->device);
mdev             1555 drivers/infiniband/hw/mlx4/main.c 	if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
mdev             1558 drivers/infiniband/hw/mlx4/main.c 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
mdev             1565 drivers/infiniband/hw/mlx4/main.c 	ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
mdev             1575 drivers/infiniband/hw/mlx4/main.c 				mdev, qp, default_table + default_flow,
mdev             1578 drivers/infiniband/hw/mlx4/main.c 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mdev             1584 drivers/infiniband/hw/mlx4/main.c 		ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
mdev             1587 drivers/infiniband/hw/mlx4/main.c 			mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mdev             1594 drivers/infiniband/hw/mlx4/main.c 	if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
mdev             1604 drivers/infiniband/hw/mlx4/main.c 	ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
mdev             1614 drivers/infiniband/hw/mlx4/main.c 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mdev             1836 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
mdev             1840 drivers/infiniband/hw/mlx4/main.c 		err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
mdev             1844 drivers/infiniband/hw/mlx4/main.c 			err = __mlx4_ib_destroy_flow(mdev->dev,
mdev             1859 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
mdev             1860 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_dev	*dev = mdev->dev;
mdev             1866 drivers/infiniband/hw/mlx4/main.c 	if (mdev->dev->caps.steering_mode ==
mdev             1873 drivers/infiniband/hw/mlx4/main.c 	err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
mdev             1884 drivers/infiniband/hw/mlx4/main.c 		err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
mdev             1907 drivers/infiniband/hw/mlx4/main.c 	mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
mdev             1910 drivers/infiniband/hw/mlx4/main.c 		mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
mdev             1937 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
mdev             1938 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_dev *dev = mdev->dev;
mdev             1945 drivers/infiniband/hw/mlx4/main.c 	if (mdev->dev->caps.steering_mode ==
mdev             1965 drivers/infiniband/hw/mlx4/main.c 	err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
mdev             1971 drivers/infiniband/hw/mlx4/main.c 		err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
mdev             1980 drivers/infiniband/hw/mlx4/main.c 		spin_lock_bh(&mdev->iboe.lock);
mdev             1981 drivers/infiniband/hw/mlx4/main.c 		ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
mdev             1984 drivers/infiniband/hw/mlx4/main.c 		spin_unlock_bh(&mdev->iboe.lock);
mdev             2488 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = to_mdev(ibdev);
mdev             2495 drivers/infiniband/hw/mlx4/main.c 		if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
mdev             2497 drivers/infiniband/hw/mlx4/main.c 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
mdev             2979 drivers/infiniband/hw/mlx4/main.c int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
mdev             3007 drivers/infiniband/hw/mlx4/main.c 		err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
mdev             3200 drivers/infiniband/hw/mlx4/main.c void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
mdev             3205 drivers/infiniband/hw/mlx4/main.c 	err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
mdev             3211 drivers/infiniband/hw/mlx4/main.c 	atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
mdev             3217 drivers/infiniband/hw/mlx4/main.c 	struct mlx4_ib_dev *mdev = ew->ib_dev;
mdev             3220 drivers/infiniband/hw/mlx4/main.c 	mlx4_ib_sl2vl_update(mdev, port);
mdev              834 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
mdev              892 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
mdev              904 drivers/infiniband/hw/mlx4/mlx4_ib.h void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
mdev              749 drivers/infiniband/hw/mlx4/mr.c 	struct mlx4_dev *mdev = NULL;
mdev              752 drivers/infiniband/hw/mlx4/mr.c 		if (mdev && to_mdev(ibfmr->device)->dev != mdev)
mdev              754 drivers/infiniband/hw/mlx4/mr.c 		mdev = to_mdev(ibfmr->device)->dev;
mdev              757 drivers/infiniband/hw/mlx4/mr.c 	if (!mdev)
mdev              763 drivers/infiniband/hw/mlx4/mr.c 		mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
mdev              772 drivers/infiniband/hw/mlx4/mr.c 	err = mlx4_SYNC_TPT(mdev);
mdev             2884 drivers/infiniband/hw/mlx4/qp.c 	struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
mdev             2885 drivers/infiniband/hw/mlx4/qp.c 	struct ib_device *ib_dev = &mdev->ib_dev;
mdev             2937 drivers/infiniband/hw/mlx4/qp.c 			cpu_to_be32(mdev->dev->caps.spec_qps[sqp->qp.port - 1].qp0_tunnel);
mdev             2940 drivers/infiniband/hw/mlx4/qp.c 	if (mlx4_is_master(mdev->dev)) {
mdev             2941 drivers/infiniband/hw/mlx4/qp.c 		if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
mdev             2944 drivers/infiniband/hw/mlx4/qp.c 		if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
mdev             3551 drivers/infiniband/hw/mlx4/qp.c 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
mdev             3561 drivers/infiniband/hw/mlx4/qp.c 			if (!fill_gid_by_hw_index(mdev, sqp->qp.port,
mdev             3573 drivers/infiniband/hw/mlx4/qp.c 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
mdev             3866 drivers/infiniband/hw/mlx4/qp.c 	struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
mdev             3871 drivers/infiniband/hw/mlx4/qp.c 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR &&
mdev             4432 drivers/infiniband/hw/mlx4/qp.c 	struct mlx4_dev *mdev = dev->dev;
mdev             4440 drivers/infiniband/hw/mlx4/qp.c 	if (mdev->persist->state == MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mdev             4492 drivers/infiniband/hw/mlx4/qp.c 	struct mlx4_dev *mdev = dev->dev;
mdev             4495 drivers/infiniband/hw/mlx4/qp.c 	if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mdev             4521 drivers/infiniband/hw/mlx4/qp.c 	struct mlx4_dev *mdev = dev->dev;
mdev             4524 drivers/infiniband/hw/mlx4/qp.c 	if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mdev              310 drivers/infiniband/hw/mlx4/srq.c 	struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
mdev              313 drivers/infiniband/hw/mlx4/srq.c 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mdev               52 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_ib_dev *mdev = port->dev;
mdev               55 drivers/infiniband/hw/mlx4/sysfs.c 	sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev,
mdev               77 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_ib_dev *mdev = port->dev;
mdev               87 drivers/infiniband/hw/mlx4/sysfs.c 	spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags);
mdev               89 drivers/infiniband/hw/mlx4/sysfs.c 	*(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1].
mdev               95 drivers/infiniband/hw/mlx4/sysfs.c 	mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status
mdev               97 drivers/infiniband/hw/mlx4/sysfs.c 	mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val),
mdev              102 drivers/infiniband/hw/mlx4/sysfs.c 	mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes
mdev              105 drivers/infiniband/hw/mlx4/sysfs.c 	spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags);
mdev              106 drivers/infiniband/hw/mlx4/sysfs.c 	mlx4_ib_init_alias_guid_work(mdev, port->num - 1);
mdev              118 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_ib_dev *mdev = port->dev;
mdev              122 drivers/infiniband/hw/mlx4/sysfs.c 	ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num,
mdev              145 drivers/infiniband/hw/mlx4/sysfs.c 	struct mlx4_ib_dev *mdev = port->dev;
mdev              149 drivers/infiniband/hw/mlx4/sysfs.c 	ret = __mlx4_ib_query_pkey(&mdev->ib_dev, port->num,
mdev               45 drivers/infiniband/hw/mlx5/cmd.h int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
mdev              258 drivers/infiniband/hw/mlx5/cong.c 	struct mlx5_core_dev *mdev;
mdev              261 drivers/infiniband/hw/mlx5/cong.c 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
mdev              262 drivers/infiniband/hw/mlx5/cong.c 	if (!mdev)
mdev              273 drivers/infiniband/hw/mlx5/cong.c 	err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
mdev              294 drivers/infiniband/hw/mlx5/cong.c 	struct mlx5_core_dev *mdev;
mdev              299 drivers/infiniband/hw/mlx5/cong.c 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
mdev              300 drivers/infiniband/hw/mlx5/cong.c 	if (!mdev)
mdev              322 drivers/infiniband/hw/mlx5/cong.c 	err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
mdev              395 drivers/infiniband/hw/mlx5/cong.c 	struct mlx5_core_dev *mdev;
mdev              402 drivers/infiniband/hw/mlx5/cong.c 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
mdev              403 drivers/infiniband/hw/mlx5/cong.c 	if (!mdev)
mdev              406 drivers/infiniband/hw/mlx5/cong.c 	if (!MLX5_CAP_GEN(mdev, cc_query_allowed) ||
mdev              407 drivers/infiniband/hw/mlx5/cong.c 	    !MLX5_CAP_GEN(mdev, cc_modify_allowed))
mdev              417 drivers/infiniband/hw/mlx5/cong.c 						 mdev->priv.dbg_root);
mdev              271 drivers/infiniband/hw/mlx5/cq.c 	mlx5_dump_err_cqe(dev->mdev, cqe);
mdev              351 drivers/infiniband/hw/mlx5/cq.c 	mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
mdev              490 drivers/infiniband/hw/mlx5/cq.c 		mqp = __mlx5_qp_lookup(dev->mdev, qpn);
mdev              548 drivers/infiniband/hw/mlx5/cq.c 		xa_lock(&dev->mdev->priv.mkey_table);
mdev              549 drivers/infiniband/hw/mlx5/cq.c 		mmkey = xa_load(&dev->mdev->priv.mkey_table,
mdev              563 drivers/infiniband/hw/mlx5/cq.c 		xa_unlock(&dev->mdev->priv.mkey_table);
mdev              601 drivers/infiniband/hw/mlx5/cq.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              607 drivers/infiniband/hw/mlx5/cq.c 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mdev              635 drivers/infiniband/hw/mlx5/cq.c 	struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
mdev              637 drivers/infiniband/hw/mlx5/cq.c 	void __iomem *uar_page = mdev->priv.uar->map;
mdev              667 drivers/infiniband/hw/mlx5/cq.c 	err = mlx5_frag_buf_alloc_node(dev->mdev,
mdev              670 drivers/infiniband/hw/mlx5/cq.c 				       dev->mdev->priv.numa_node);
mdev              696 drivers/infiniband/hw/mlx5/cq.c 		if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
mdev              772 drivers/infiniband/hw/mlx5/cq.c 		       MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
mdev              774 drivers/infiniband/hw/mlx5/cq.c 		       MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
mdev              797 drivers/infiniband/hw/mlx5/cq.c 		    !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
mdev              853 drivers/infiniband/hw/mlx5/cq.c 	err = mlx5_db_alloc(dev->mdev, &cq->db);
mdev              884 drivers/infiniband/hw/mlx5/cq.c 	*index = dev->mdev->priv.uar->index;
mdev              892 drivers/infiniband/hw/mlx5/cq.c 	mlx5_db_free(dev->mdev, &cq->db);
mdev              899 drivers/infiniband/hw/mlx5/cq.c 	mlx5_db_free(dev->mdev, &cq->db);
mdev              929 drivers/infiniband/hw/mlx5/cq.c 	    (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
mdev              936 drivers/infiniband/hw/mlx5/cq.c 	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
mdev              963 drivers/infiniband/hw/mlx5/cq.c 	err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
mdev              981 drivers/infiniband/hw/mlx5/cq.c 	err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
mdev             1006 drivers/infiniband/hw/mlx5/cq.c 	mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
mdev             1022 drivers/infiniband/hw/mlx5/cq.c 	mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
mdev             1101 drivers/infiniband/hw/mlx5/cq.c 	if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
mdev             1107 drivers/infiniband/hw/mlx5/cq.c 	err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
mdev             1243 drivers/infiniband/hw/mlx5/cq.c 	if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
mdev             1249 drivers/infiniband/hw/mlx5/cq.c 	    entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
mdev             1252 drivers/infiniband/hw/mlx5/cq.c 			     1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
mdev             1257 drivers/infiniband/hw/mlx5/cq.c 	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
mdev             1316 drivers/infiniband/hw/mlx5/cq.c 	err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
mdev               31 drivers/infiniband/hw/mlx5/devx.c 	struct mlx5_ib_dev *mdev;
mdev              108 drivers/infiniband/hw/mlx5/devx.c 	struct mlx5_core_dev		*mdev;
mdev              139 drivers/infiniband/hw/mlx5/devx.c 	if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
mdev              144 drivers/infiniband/hw/mlx5/devx.c 	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
mdev              147 drivers/infiniband/hw/mlx5/devx.c 	    (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
mdev              154 drivers/infiniband/hw/mlx5/devx.c 	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
mdev              170 drivers/infiniband/hw/mlx5/devx.c 	mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
mdev              595 drivers/infiniband/hw/mlx5/devx.c 			if (!dev->mdev->issi)
mdev              919 drivers/infiniband/hw/mlx5/devx.c 	if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
mdev              967 drivers/infiniband/hw/mlx5/devx.c 	err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
mdev             1057 drivers/infiniband/hw/mlx5/devx.c 	err = mlx5_cmd_exec(dev->mdev, cmd_in,
mdev             1268 drivers/infiniband/hw/mlx5/devx.c 	return xa_err(xa_store(&dev->mdev->priv.mkey_table,
mdev             1348 drivers/infiniband/hw/mlx5/devx.c 		xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
mdev             1354 drivers/infiniband/hw/mlx5/devx.c 		ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
mdev             1356 drivers/infiniband/hw/mlx5/devx.c 		ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
mdev             1358 drivers/infiniband/hw/mlx5/devx.c 		ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
mdev             1448 drivers/infiniband/hw/mlx5/devx.c 		err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
mdev             1454 drivers/infiniband/hw/mlx5/devx.c 		err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
mdev             1458 drivers/infiniband/hw/mlx5/devx.c 		err = mlx5_cmd_exec(dev->mdev, cmd_in,
mdev             1490 drivers/infiniband/hw/mlx5/devx.c 		mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
mdev             1492 drivers/infiniband/hw/mlx5/devx.c 		mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
mdev             1494 drivers/infiniband/hw/mlx5/devx.c 		mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
mdev             1511 drivers/infiniband/hw/mlx5/devx.c 	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
mdev             1536 drivers/infiniband/hw/mlx5/devx.c 	err = mlx5_cmd_exec(mdev->mdev, cmd_in,
mdev             1559 drivers/infiniband/hw/mlx5/devx.c 	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
mdev             1579 drivers/infiniband/hw/mlx5/devx.c 	err = mlx5_cmd_exec(mdev->mdev, cmd_in,
mdev             1619 drivers/infiniband/hw/mlx5/devx.c 	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
mdev             1624 drivers/infiniband/hw/mlx5/devx.c 	mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
mdev             1697 drivers/infiniband/hw/mlx5/devx.c 	struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
mdev             1746 drivers/infiniband/hw/mlx5/devx.c 	async_data->mdev = mdev;
mdev             1992 drivers/infiniband/hw/mlx5/devx.c 	if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
mdev             2204 drivers/infiniband/hw/mlx5/devx.c 	err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
mdev             2209 drivers/infiniband/hw/mlx5/devx.c 	obj->mdev = dev->mdev;
mdev             2219 drivers/infiniband/hw/mlx5/devx.c 	mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
mdev             2235 drivers/infiniband/hw/mlx5/devx.c 	err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
mdev             2383 drivers/infiniband/hw/mlx5/devx.c 	is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
mdev             2421 drivers/infiniband/hw/mlx5/devx.c 	mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
mdev             2432 drivers/infiniband/hw/mlx5/devx.c 	mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
mdev             2930 drivers/infiniband/hw/mlx5/devx.c 	return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
mdev              320 drivers/infiniband/hw/mlx5/flow.c 	obj->mdev = dev->mdev;
mdev              333 drivers/infiniband/hw/mlx5/flow.c 		mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
mdev              337 drivers/infiniband/hw/mlx5/flow.c 		mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
mdev              365 drivers/infiniband/hw/mlx5/flow.c 		mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
mdev              381 drivers/infiniband/hw/mlx5/flow.c 	return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
mdev              383 drivers/infiniband/hw/mlx5/flow.c 	       MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
mdev              391 drivers/infiniband/hw/mlx5/flow.c 	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
mdev              398 drivers/infiniband/hw/mlx5/flow.c 	if (!mlx5_ib_modify_header_supported(mdev))
mdev              414 drivers/infiniband/hw/mlx5/flow.c 	action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
mdev              418 drivers/infiniband/hw/mlx5/flow.c 	uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev,
mdev              431 drivers/infiniband/hw/mlx5/flow.c 			return MLX5_CAP_FLOWTABLE(ibdev->mdev,
mdev              436 drivers/infiniband/hw/mlx5/flow.c 			return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
mdev              441 drivers/infiniband/hw/mlx5/flow.c 			return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
mdev              446 drivers/infiniband/hw/mlx5/flow.c 			return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
mdev              493 drivers/infiniband/hw/mlx5/flow.c 		mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
mdev              512 drivers/infiniband/hw/mlx5/flow.c 	struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
mdev              528 drivers/infiniband/hw/mlx5/flow.c 	if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
mdev              539 drivers/infiniband/hw/mlx5/flow.c 		maction->flow_action_raw.dev = mdev;
mdev              554 drivers/infiniband/hw/mlx5/flow.c 		ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
mdev              560 drivers/infiniband/hw/mlx5/flow.c 	uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev,
mdev               69 drivers/infiniband/hw/mlx5/gsi.c 	return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
mdev               58 drivers/infiniband/hw/mlx5/ib_rep.c 	ibdev->mdev = dev;
mdev               97 drivers/infiniband/hw/mlx5/ib_rep.c void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
mdev               99 drivers/infiniband/hw/mlx5/ib_rep.c 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
mdev              104 drivers/infiniband/hw/mlx5/ib_rep.c void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
mdev              106 drivers/infiniband/hw/mlx5/ib_rep.c 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
mdev              143 drivers/infiniband/hw/mlx5/ib_rep.c 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
mdev               21 drivers/infiniband/hw/mlx5/ib_rep.h void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev);
mdev               22 drivers/infiniband/hw/mlx5/ib_rep.h void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev);
mdev               54 drivers/infiniband/hw/mlx5/ib_rep.h static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {}
mdev               55 drivers/infiniband/hw/mlx5/ib_rep.h static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {}
mdev               55 drivers/infiniband/hw/mlx5/ib_virt.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev               63 drivers/infiniband/hw/mlx5/ib_virt.c 	err = mlx5_query_hca_vport_context(mdev, 1, 1,  vf + 1, rep);
mdev               97 drivers/infiniband/hw/mlx5/ib_virt.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev               99 drivers/infiniband/hw/mlx5/ib_virt.c 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
mdev              112 drivers/infiniband/hw/mlx5/ib_virt.c 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
mdev              125 drivers/infiniband/hw/mlx5/ib_virt.c 	struct mlx5_core_dev *mdev;
mdev              131 drivers/infiniband/hw/mlx5/ib_virt.c 	mdev = dev->mdev;
mdev              137 drivers/infiniband/hw/mlx5/ib_virt.c 	err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz);
mdev              155 drivers/infiniband/hw/mlx5/ib_virt.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              157 drivers/infiniband/hw/mlx5/ib_virt.c 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
mdev              166 drivers/infiniband/hw/mlx5/ib_virt.c 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
mdev              176 drivers/infiniband/hw/mlx5/ib_virt.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              178 drivers/infiniband/hw/mlx5/ib_virt.c 	struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
mdev              187 drivers/infiniband/hw/mlx5/ib_virt.c 	err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
mdev               52 drivers/infiniband/hw/mlx5/mad.c 	return dev->mdev->port_caps[port_num - 1].has_smi;
mdev               73 drivers/infiniband/hw/mlx5/mad.c 	return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier,
mdev              206 drivers/infiniband/hw/mlx5/mad.c 	struct mlx5_core_dev *mdev;
mdev              212 drivers/infiniband/hw/mlx5/mad.c 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
mdev              213 drivers/infiniband/hw/mlx5/mad.c 	if (!mdev) {
mdev              219 drivers/infiniband/hw/mlx5/mad.c 		mdev = dev->mdev;
mdev              243 drivers/infiniband/hw/mlx5/mad.c 		err = mlx5_core_query_vport_counter(mdev, 0, 0,
mdev              258 drivers/infiniband/hw/mlx5/mad.c 		err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
mdev              289 drivers/infiniband/hw/mlx5/mad.c 	if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
mdev              320 drivers/infiniband/hw/mlx5/mad.c 	dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
mdev              538 drivers/infiniband/hw/mlx5/mad.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              569 drivers/infiniband/hw/mlx5/mad.c 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
mdev              570 drivers/infiniband/hw/mlx5/mad.c 	props->pkey_tbl_len	= mdev->port_caps[port - 1].pkey_table_len;
mdev              610 drivers/infiniband/hw/mlx5/mad.c 		if (mdev->port_caps[port - 1].ext_port_cap &
mdev              141 drivers/infiniband/hw/mlx5/main.c 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
mdev              164 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
mdev              194 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev;
mdev              198 drivers/infiniband/hw/mlx5/main.c 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
mdev              199 drivers/infiniband/hw/mlx5/main.c 	if (!mdev)
mdev              208 drivers/infiniband/hw/mlx5/main.c 		if (ndev->dev.parent == mdev->device)
mdev              224 drivers/infiniband/hw/mlx5/main.c 		struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
mdev              276 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev;
mdev              278 drivers/infiniband/hw/mlx5/main.c 	mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
mdev              279 drivers/infiniband/hw/mlx5/main.c 	if (!mdev)
mdev              282 drivers/infiniband/hw/mlx5/main.c 	ndev = mlx5_lag_get_roce_netdev(mdev);
mdev              305 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = NULL;
mdev              309 drivers/infiniband/hw/mlx5/main.c 	if (!mlx5_core_mp_enabled(ibdev->mdev) ||
mdev              313 drivers/infiniband/hw/mlx5/main.c 		return ibdev->mdev;
mdev              326 drivers/infiniband/hw/mlx5/main.c 		mdev = mpi->mdev;
mdev              335 drivers/infiniband/hw/mlx5/main.c 	return mdev;
mdev              345 drivers/infiniband/hw/mlx5/main.c 	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
mdev              487 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev;
mdev              497 drivers/infiniband/hw/mlx5/main.c 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
mdev              498 drivers/infiniband/hw/mlx5/main.c 	if (!mdev) {
mdev              503 drivers/infiniband/hw/mlx5/main.c 		mdev = dev->mdev;
mdev              513 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
mdev              516 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
mdev              520 drivers/infiniband/hw/mlx5/main.c 	ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
mdev              532 drivers/infiniband/hw/mlx5/main.c 	props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
mdev              535 drivers/infiniband/hw/mlx5/main.c 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
mdev              540 drivers/infiniband/hw/mlx5/main.c 	mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
mdev              612 drivers/infiniband/hw/mlx5/main.c 	return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
mdev              638 drivers/infiniband/hw/mlx5/main.c 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
mdev              643 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
mdev              644 drivers/infiniband/hw/mlx5/main.c 		return !MLX5_CAP_GEN(dev->mdev, ib_virt);
mdev              671 drivers/infiniband/hw/mlx5/main.c 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
mdev              673 drivers/infiniband/hw/mlx5/main.c 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
mdev              691 drivers/infiniband/hw/mlx5/main.c 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
mdev              699 drivers/infiniband/hw/mlx5/main.c 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
mdev              715 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              725 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
mdev              729 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
mdev              747 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              755 drivers/infiniband/hw/mlx5/main.c 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
mdev              775 drivers/infiniband/hw/mlx5/main.c 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
mdev              793 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
mdev              797 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
mdev              823 drivers/infiniband/hw/mlx5/main.c 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
mdev              834 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              839 drivers/infiniband/hw/mlx5/main.c 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
mdev              840 drivers/infiniband/hw/mlx5/main.c 	bool raw_support = !mlx5_core_mp_enabled(mdev);
mdev              868 drivers/infiniband/hw/mlx5/main.c 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
mdev              869 drivers/infiniband/hw/mlx5/main.c 		(fw_rev_min(dev->mdev) << 16) |
mdev              870 drivers/infiniband/hw/mlx5/main.c 		fw_rev_sub(dev->mdev);
mdev              876 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, pkv))
mdev              878 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, qkv))
mdev              880 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, apm))
mdev              882 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, xrc))
mdev              884 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, imaicl)) {
mdev              887 drivers/infiniband/hw/mlx5/main.c 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
mdev              892 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, sho)) {
mdev              901 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
mdev              904 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
mdev              905 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, csum_cap)) {
mdev              911 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
mdev              916 drivers/infiniband/hw/mlx5/main.c 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
mdev              938 drivers/infiniband/hw/mlx5/main.c 			if (mlx5_accel_ipsec_device_caps(dev->mdev) &
mdev              951 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
mdev              956 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
mdev              957 drivers/infiniband/hw/mlx5/main.c 	    MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
mdev              961 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
mdev              962 drivers/infiniband/hw/mlx5/main.c 	    MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
mdev              965 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
mdev              966 drivers/infiniband/hw/mlx5/main.c 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
mdev              973 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_DEV_MEM(mdev, memic)) {
mdev              975 drivers/infiniband/hw/mlx5/main.c 			MLX5_CAP_DEV_MEM(mdev, max_memic_size);
mdev              978 drivers/infiniband/hw/mlx5/main.c 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
mdev              981 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, end_pad))
mdev              984 drivers/infiniband/hw/mlx5/main.c 	props->vendor_part_id	   = mdev->pdev->device;
mdev              985 drivers/infiniband/hw/mlx5/main.c 	props->hw_ver		   = mdev->pdev->revision;
mdev              989 drivers/infiniband/hw/mlx5/main.c 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
mdev              990 drivers/infiniband/hw/mlx5/main.c 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
mdev              991 drivers/infiniband/hw/mlx5/main.c 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
mdev              993 drivers/infiniband/hw/mlx5/main.c 	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
mdev             1000 drivers/infiniband/hw/mlx5/main.c 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
mdev             1001 drivers/infiniband/hw/mlx5/main.c 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
mdev             1002 drivers/infiniband/hw/mlx5/main.c 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
mdev             1003 drivers/infiniband/hw/mlx5/main.c 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
mdev             1004 drivers/infiniband/hw/mlx5/main.c 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
mdev             1005 drivers/infiniband/hw/mlx5/main.c 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
mdev             1006 drivers/infiniband/hw/mlx5/main.c 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
mdev             1007 drivers/infiniband/hw/mlx5/main.c 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
mdev             1008 drivers/infiniband/hw/mlx5/main.c 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
mdev             1012 drivers/infiniband/hw/mlx5/main.c 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
mdev             1017 drivers/infiniband/hw/mlx5/main.c 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
mdev             1018 drivers/infiniband/hw/mlx5/main.c 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
mdev             1023 drivers/infiniband/hw/mlx5/main.c 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
mdev             1032 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, cd))
mdev             1035 drivers/infiniband/hw/mlx5/main.c 	if (!mlx5_core_is_pf(mdev))
mdev             1041 drivers/infiniband/hw/mlx5/main.c 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
mdev             1043 drivers/infiniband/hw/mlx5/main.c 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
mdev             1046 drivers/infiniband/hw/mlx5/main.c 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
mdev             1049 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, tag_matching)) {
mdev             1051 drivers/infiniband/hw/mlx5/main.c 			(1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
mdev             1053 drivers/infiniband/hw/mlx5/main.c 			1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
mdev             1057 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, tag_matching) &&
mdev             1058 drivers/infiniband/hw/mlx5/main.c 	    MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
mdev             1063 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
mdev             1073 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
mdev             1075 drivers/infiniband/hw/mlx5/main.c 				MLX5_CAP_GEN(dev->mdev,
mdev             1082 drivers/infiniband/hw/mlx5/main.c 			if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
mdev             1090 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
mdev             1091 drivers/infiniband/hw/mlx5/main.c 		    MLX5_CAP_GEN(mdev, qos)) {
mdev             1093 drivers/infiniband/hw/mlx5/main.c 				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
mdev             1095 drivers/infiniband/hw/mlx5/main.c 				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
mdev             1098 drivers/infiniband/hw/mlx5/main.c 			if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
mdev             1099 drivers/infiniband/hw/mlx5/main.c 			    MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
mdev             1108 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
mdev             1112 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
mdev             1123 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_GEN(mdev, cqe_compression_128))
mdev             1127 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_GEN(mdev, cqe_128_always))
mdev             1129 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_GEN(mdev, qp_packet_based))
mdev             1138 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, swp)) {
mdev             1142 drivers/infiniband/hw/mlx5/main.c 			if (MLX5_CAP_ETH(mdev, swp_csum))
mdev             1146 drivers/infiniband/hw/mlx5/main.c 			if (MLX5_CAP_ETH(mdev, swp_lso))
mdev             1159 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_GEN(mdev, striding_rq)) {
mdev             1175 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
mdev             1178 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
mdev             1181 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
mdev             1184 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
mdev             1187 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
mdev             1299 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             1315 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
mdev             1326 drivers/infiniband/hw/mlx5/main.c 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
mdev             1327 drivers/infiniband/hw/mlx5/main.c 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
mdev             1328 drivers/infiniband/hw/mlx5/main.c 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
mdev             1337 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
mdev             1343 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
mdev             1347 drivers/infiniband/hw/mlx5/main.c 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
mdev             1351 drivers/infiniband/hw/mlx5/main.c 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
mdev             1355 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
mdev             1391 drivers/infiniband/hw/mlx5/main.c 		struct mlx5_core_dev *mdev;
mdev             1394 drivers/infiniband/hw/mlx5/main.c 		mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
mdev             1395 drivers/infiniband/hw/mlx5/main.c 		if (!mdev) {
mdev             1399 drivers/infiniband/hw/mlx5/main.c 			mdev = dev->mdev;
mdev             1403 drivers/infiniband/hw/mlx5/main.c 		count = mlx5_core_reserved_gids_count(mdev);
mdev             1433 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             1440 drivers/infiniband/hw/mlx5/main.c 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
mdev             1452 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev;
mdev             1457 drivers/infiniband/hw/mlx5/main.c 	mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
mdev             1458 drivers/infiniband/hw/mlx5/main.c 	if (!mdev) {
mdev             1463 drivers/infiniband/hw/mlx5/main.c 		mdev = dev->mdev;
mdev             1467 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
mdev             1509 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
mdev             1523 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev;
mdev             1527 drivers/infiniband/hw/mlx5/main.c 	mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
mdev             1528 drivers/infiniband/hw/mlx5/main.c 	if (!mdev)
mdev             1531 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
mdev             1544 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
mdev             1571 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
mdev             1586 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
mdev             1638 drivers/infiniband/hw/mlx5/main.c 		    MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
mdev             1654 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
mdev             1668 drivers/infiniband/hw/mlx5/main.c 		if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
mdev             1684 drivers/infiniband/hw/mlx5/main.c 			mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
mdev             1700 drivers/infiniband/hw/mlx5/main.c 			err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
mdev             1721 drivers/infiniband/hw/mlx5/main.c 			mlx5_nic_vport_update_local_lb(dev->mdev, false);
mdev             1734 drivers/infiniband/hw/mlx5/main.c 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mdev             1737 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
mdev             1741 drivers/infiniband/hw/mlx5/main.c 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
mdev             1742 drivers/infiniband/hw/mlx5/main.c 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
mdev             1743 drivers/infiniband/hw/mlx5/main.c 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
mdev             1752 drivers/infiniband/hw/mlx5/main.c 	if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mdev             1755 drivers/infiniband/hw/mlx5/main.c 	mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
mdev             1757 drivers/infiniband/hw/mlx5/main.c 	if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
mdev             1758 drivers/infiniband/hw/mlx5/main.c 	    (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
mdev             1759 drivers/infiniband/hw/mlx5/main.c 	     !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
mdev             1772 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             1807 drivers/infiniband/hw/mlx5/main.c 	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
mdev             1808 drivers/infiniband/hw/mlx5/main.c 	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
mdev             1809 drivers/infiniband/hw/mlx5/main.c 		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
mdev             1811 drivers/infiniband/hw/mlx5/main.c 	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
mdev             1812 drivers/infiniband/hw/mlx5/main.c 	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
mdev             1813 drivers/infiniband/hw/mlx5/main.c 	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
mdev             1814 drivers/infiniband/hw/mlx5/main.c 	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
mdev             1815 drivers/infiniband/hw/mlx5/main.c 	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
mdev             1817 drivers/infiniband/hw/mlx5/main.c 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
mdev             1819 drivers/infiniband/hw/mlx5/main.c 	resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
mdev             1821 drivers/infiniband/hw/mlx5/main.c 	resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
mdev             1822 drivers/infiniband/hw/mlx5/main.c 					MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
mdev             1826 drivers/infiniband/hw/mlx5/main.c 	if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
mdev             1827 drivers/infiniband/hw/mlx5/main.c 		if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
mdev             1829 drivers/infiniband/hw/mlx5/main.c 		if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
mdev             1831 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
mdev             1833 drivers/infiniband/hw/mlx5/main.c 		if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
mdev             1879 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
mdev             1880 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
mdev             1902 drivers/infiniband/hw/mlx5/main.c 			mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
mdev             1909 drivers/infiniband/hw/mlx5/main.c 		if (mdev->clock_info)
mdev             1942 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
mdev             1961 drivers/infiniband/hw/mlx5/main.c 		u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
mdev             2011 drivers/infiniband/hw/mlx5/main.c 	fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
mdev             2013 drivers/infiniband/hw/mlx5/main.c 	return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
mdev             2073 drivers/infiniband/hw/mlx5/main.c 	if (!dev->mdev->clock_info)
mdev             2077 drivers/infiniband/hw/mlx5/main.c 			      virt_to_page(dev->mdev->clock_info));
mdev             2155 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
mdev             2184 drivers/infiniband/hw/mlx5/main.c 	mlx5_cmd_free_uar(dev->mdev, idx);
mdev             2205 drivers/infiniband/hw/mlx5/main.c 	pfn = ((dev->mdev->bar_addr +
mdev             2206 drivers/infiniband/hw/mlx5/main.c 	      MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
mdev             2243 drivers/infiniband/hw/mlx5/main.c 		pfn = (dev->mdev->iseg_base +
mdev             2267 drivers/infiniband/hw/mlx5/main.c 		if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
mdev             2276 drivers/infiniband/hw/mlx5/main.c 		if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
mdev             2277 drivers/infiniband/hw/mlx5/main.c 		      MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
mdev             2336 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
mdev             2426 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
mdev             2482 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
mdev             2492 drivers/infiniband/hw/mlx5/main.c 			mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
mdev             2502 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
mdev             2505 drivers/infiniband/hw/mlx5/main.c 	mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
mdev             2677 drivers/infiniband/hw/mlx5/main.c static int parse_flow_attr(struct mlx5_core_dev *mdev,
mdev             2704 drivers/infiniband/hw/mlx5/main.c 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev             2711 drivers/infiniband/hw/mlx5/main.c 		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev             2926 drivers/infiniband/hw/mlx5/main.c 			if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev             2941 drivers/infiniband/hw/mlx5/main.c 			if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev             2957 drivers/infiniband/hw/mlx5/main.c 				if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev             2971 drivers/infiniband/hw/mlx5/main.c 				if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev             3079 drivers/infiniband/hw/mlx5/main.c is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
mdev             3104 drivers/infiniband/hw/mlx5/main.c static bool is_valid_spec(struct mlx5_core_dev *mdev,
mdev             3110 drivers/infiniband/hw/mlx5/main.c 	return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
mdev             3113 drivers/infiniband/hw/mlx5/main.c static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
mdev             3119 drivers/infiniband/hw/mlx5/main.c 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev             3121 drivers/infiniband/hw/mlx5/main.c 			MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev             3162 drivers/infiniband/hw/mlx5/main.c static bool is_valid_attr(struct mlx5_core_dev *mdev,
mdev             3165 drivers/infiniband/hw/mlx5/main.c 	return is_valid_ethertype(mdev, flow_attr, false) &&
mdev             3166 drivers/infiniband/hw/mlx5/main.c 	       is_valid_ethertype(mdev, flow_attr, true);
mdev             3272 drivers/infiniband/hw/mlx5/main.c 	max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
mdev             3274 drivers/infiniband/hw/mlx5/main.c 	esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
mdev             3289 drivers/infiniband/hw/mlx5/main.c 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
mdev             3292 drivers/infiniband/hw/mlx5/main.c 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
mdev             3297 drivers/infiniband/hw/mlx5/main.c 				BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
mdev             3302 drivers/infiniband/hw/mlx5/main.c 			    MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
mdev             3305 drivers/infiniband/hw/mlx5/main.c 		ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
mdev             3310 drivers/infiniband/hw/mlx5/main.c 		ns = mlx5_get_flow_namespace(dev->mdev,
mdev             3317 drivers/infiniband/hw/mlx5/main.c 		if (!MLX5_CAP_FLOWTABLE(dev->mdev,
mdev             3321 drivers/infiniband/hw/mlx5/main.c 		ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
mdev             3355 drivers/infiniband/hw/mlx5/main.c 	    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
mdev             3370 drivers/infiniband/hw/mlx5/main.c 	return mlx5_fc_query(dev->mdev, fc,
mdev             3442 drivers/infiniband/hw/mlx5/main.c 			to_mdev(ibcounters->device)->mdev, false);
mdev             3474 drivers/infiniband/hw/mlx5/main.c 		mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
mdev             3487 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
mdev             3534 drivers/infiniband/hw/mlx5/main.c 	if (!is_valid_attr(dev->mdev, flow_attr))
mdev             3550 drivers/infiniband/hw/mlx5/main.c 		err = parse_flow_attr(dev->mdev, spec,
mdev             3583 drivers/infiniband/hw/mlx5/main.c 	    !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
mdev             3945 drivers/infiniband/hw/mlx5/main.c 	esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
mdev             3948 drivers/infiniband/hw/mlx5/main.c 		max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
mdev             3950 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
mdev             3952 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
mdev             3958 drivers/infiniband/hw/mlx5/main.c 			MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
mdev             3959 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
mdev             3963 drivers/infiniband/hw/mlx5/main.c 			MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
mdev             3964 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
mdev             3966 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
mdev             3972 drivers/infiniband/hw/mlx5/main.c 			BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
mdev             3979 drivers/infiniband/hw/mlx5/main.c 	ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
mdev             4190 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_ib_dev *mdev = to_mdev(device);
mdev             4253 drivers/infiniband/hw/mlx5/main.c 		mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
mdev             4355 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
mdev             4371 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
mdev             4387 drivers/infiniband/hw/mlx5/main.c 	dev->mdev->rev_id = dev->mdev->pdev->revision;
mdev             4398 drivers/infiniband/hw/mlx5/main.c 	return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
mdev             4408 drivers/infiniband/hw/mlx5/main.c 	return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
mdev             4418 drivers/infiniband/hw/mlx5/main.c 	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
mdev             4428 drivers/infiniband/hw/mlx5/main.c 	return sprintf(buf, "%x\n", dev->mdev->rev_id);
mdev             4439 drivers/infiniband/hw/mlx5/main.c 		       dev->mdev->board_id);
mdev             4536 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
mdev             4702 drivers/infiniband/hw/mlx5/main.c 	for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
mdev             4703 drivers/infiniband/hw/mlx5/main.c 		dev->mdev->port_caps[port - 1].has_smi = false;
mdev             4704 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_GEN(dev->mdev, port_type) ==
mdev             4706 drivers/infiniband/hw/mlx5/main.c 			if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
mdev             4707 drivers/infiniband/hw/mlx5/main.c 				err = mlx5_query_hca_vport_context(dev->mdev, 0,
mdev             4715 drivers/infiniband/hw/mlx5/main.c 				dev->mdev->port_caps[port - 1].has_smi =
mdev             4718 drivers/infiniband/hw/mlx5/main.c 				dev->mdev->port_caps[port - 1].has_smi = true;
mdev             4760 drivers/infiniband/hw/mlx5/main.c 	dev->mdev->port_caps[port - 1].pkey_table_len =
mdev             4762 drivers/infiniband/hw/mlx5/main.c 	dev->mdev->port_caps[port - 1].gid_table_len =
mdev             5091 drivers/infiniband/hw/mlx5/main.c 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
mdev             5092 drivers/infiniband/hw/mlx5/main.c 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
mdev             5093 drivers/infiniband/hw/mlx5/main.c 	bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
mdev             5134 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
mdev             5143 drivers/infiniband/hw/mlx5/main.c 	if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
mdev             5173 drivers/infiniband/hw/mlx5/main.c 		 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
mdev             5174 drivers/infiniband/hw/mlx5/main.c 		 fw_rev_sub(dev->mdev));
mdev             5179 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             5180 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
mdev             5185 drivers/infiniband/hw/mlx5/main.c 	if (!ns || !mlx5_lag_is_roce(mdev))
mdev             5188 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_cmd_create_vport_lag(mdev);
mdev             5203 drivers/infiniband/hw/mlx5/main.c 	mlx5_cmd_destroy_vport_lag(mdev);
mdev             5209 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             5217 drivers/infiniband/hw/mlx5/main.c 		mlx5_cmd_destroy_vport_lag(mdev);
mdev             5247 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, roce)) {
mdev             5248 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_nic_vport_enable_roce(dev->mdev);
mdev             5260 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, roce))
mdev             5261 drivers/infiniband/hw/mlx5/main.c 		mlx5_nic_vport_disable_roce(dev->mdev);
mdev             5269 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, roce))
mdev             5270 drivers/infiniband/hw/mlx5/main.c 		mlx5_nic_vport_disable_roce(dev->mdev);
mdev             5331 drivers/infiniband/hw/mlx5/main.c static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
mdev             5333 drivers/infiniband/hw/mlx5/main.c 	return MLX5_ESWITCH_MANAGER(mdev) &&
mdev             5334 drivers/infiniband/hw/mlx5/main.c 	       mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
mdev             5343 drivers/infiniband/hw/mlx5/main.c 	num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
mdev             5347 drivers/infiniband/hw/mlx5/main.c 			mlx5_core_dealloc_q_counter(dev->mdev,
mdev             5361 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
mdev             5364 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
mdev             5367 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
mdev             5372 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
mdev             5376 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
mdev             5409 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
mdev             5416 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
mdev             5423 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
mdev             5430 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
mdev             5437 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
mdev             5452 drivers/infiniband/hw/mlx5/main.c 	is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
mdev             5453 drivers/infiniband/hw/mlx5/main.c 	num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
mdev             5463 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_cmd_alloc_q_counter(dev->mdev,
mdev             5485 drivers/infiniband/hw/mlx5/main.c 	return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
mdev             5510 drivers/infiniband/hw/mlx5/main.c 	bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
mdev             5524 drivers/infiniband/hw/mlx5/main.c static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
mdev             5538 drivers/infiniband/hw/mlx5/main.c 	ret = mlx5_core_query_q_counter(mdev, set_id, 0, out, outlen);
mdev             5565 drivers/infiniband/hw/mlx5/main.c 	ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
mdev             5584 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev;
mdev             5596 drivers/infiniband/hw/mlx5/main.c 	ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
mdev             5600 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
mdev             5606 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
mdev             5607 drivers/infiniband/hw/mlx5/main.c 		mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
mdev             5609 drivers/infiniband/hw/mlx5/main.c 		if (!mdev) {
mdev             5616 drivers/infiniband/hw/mlx5/main.c 		ret = mlx5_lag_query_cong_counters(dev->mdev,
mdev             5652 drivers/infiniband/hw/mlx5/main.c 	return mlx5_ib_query_q_counters(dev->mdev, cnts,
mdev             5664 drivers/infiniband/hw/mlx5/main.c 		err = mlx5_cmd_alloc_q_counter(dev->mdev,
mdev             5679 drivers/infiniband/hw/mlx5/main.c 	mlx5_core_dealloc_q_counter(dev->mdev, cnt_set_id);
mdev             5694 drivers/infiniband/hw/mlx5/main.c 	return mlx5_core_dealloc_q_counter(dev->mdev, counter->id);
mdev             5704 drivers/infiniband/hw/mlx5/main.c 	return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
mdev             5779 drivers/infiniband/hw/mlx5/main.c 				   dev->mdev->priv.dbg_root);
mdev             5832 drivers/infiniband/hw/mlx5/main.c 	u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
mdev             5852 drivers/infiniband/hw/mlx5/main.c 		mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
mdev             5876 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
mdev             5892 drivers/infiniband/hw/mlx5/main.c 	u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
mdev             5910 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
mdev             5914 drivers/infiniband/hw/mlx5/main.c 	err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
mdev             5926 drivers/infiniband/hw/mlx5/main.c 	mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
mdev             5939 drivers/infiniband/hw/mlx5/main.c 	int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
mdev             5946 drivers/infiniband/hw/mlx5/main.c 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
mdev             5949 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
mdev             5954 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_nic_vport_enable_roce(dev->mdev);
mdev             5967 drivers/infiniband/hw/mlx5/main.c 				mlx5_nic_vport_disable_roce(dev->mdev);
mdev             5972 drivers/infiniband/hw/mlx5/main.c 			mpi->mdev = dev->mdev;
mdev             5983 drivers/infiniband/hw/mlx5/main.c 			    (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
mdev             5988 drivers/infiniband/hw/mlx5/main.c 				dev_dbg(mpi->mdev->device,
mdev             6009 drivers/infiniband/hw/mlx5/main.c 	int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
mdev             6014 drivers/infiniband/hw/mlx5/main.c 	if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
mdev             6035 drivers/infiniband/hw/mlx5/main.c 	mlx5_nic_vport_disable_roce(dev->mdev);
mdev             6119 drivers/infiniband/hw/mlx5/main.c 		mlx5_fc_destroy(to_mdev(counters->device)->mdev,
mdev             6154 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             6176 drivers/infiniband/hw/mlx5/main.c 	if (!mlx5_core_mp_enabled(mdev)) {
mdev             6183 drivers/infiniband/hw/mlx5/main.c 		err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
mdev             6194 drivers/infiniband/hw/mlx5/main.c 	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
mdev             6195 drivers/infiniband/hw/mlx5/main.c 	dev->ib_dev.dev.parent		= mdev->device;
mdev             6202 drivers/infiniband/hw/mlx5/main.c 	dev->dm.dev = mdev;
mdev             6337 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             6376 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
mdev             6381 drivers/infiniband/hw/mlx5/main.c 	if (mlx5_core_is_pf(mdev))
mdev             6384 drivers/infiniband/hw/mlx5/main.c 	dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
mdev             6386 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, imaicl)) {
mdev             6393 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(mdev, xrc)) {
mdev             6400 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_DEV_MEM(mdev, memic) ||
mdev             6401 drivers/infiniband/hw/mlx5/main.c 	    MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
mdev             6405 drivers/infiniband/hw/mlx5/main.c 	if (mlx5_accel_ipsec_device_caps(dev->mdev) &
mdev             6417 drivers/infiniband/hw/mlx5/main.c 	if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
mdev             6418 drivers/infiniband/hw/mlx5/main.c 	    (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
mdev             6419 drivers/infiniband/hw/mlx5/main.c 	     MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
mdev             6470 drivers/infiniband/hw/mlx5/main.c 	port_num = mlx5_core_native_port_num(dev->mdev) - 1;
mdev             6478 drivers/infiniband/hw/mlx5/main.c 	u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
mdev             6485 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             6490 drivers/infiniband/hw/mlx5/main.c 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
mdev             6506 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             6511 drivers/infiniband/hw/mlx5/main.c 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
mdev             6533 drivers/infiniband/hw/mlx5/main.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             6537 drivers/infiniband/hw/mlx5/main.c 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
mdev             6578 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
mdev             6589 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mdev             6596 drivers/infiniband/hw/mlx5/main.c 				  mlx5_core_native_port_num(dev->mdev) - 1);
mdev             6603 drivers/infiniband/hw/mlx5/main.c 				     mlx5_core_native_port_num(dev->mdev) - 1);
mdev             6608 drivers/infiniband/hw/mlx5/main.c 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
mdev             6609 drivers/infiniband/hw/mlx5/main.c 	return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
mdev             6614 drivers/infiniband/hw/mlx5/main.c 	mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
mdev             6621 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
mdev             6625 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
mdev             6627 drivers/infiniband/hw/mlx5/main.c 		mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mdev             6634 drivers/infiniband/hw/mlx5/main.c 	mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mdev             6635 drivers/infiniband/hw/mlx5/main.c 	mlx5_free_bfreg(dev->mdev, &dev->bfreg);
mdev             6643 drivers/infiniband/hw/mlx5/main.c 	if (!mlx5_lag_is_roce(dev->mdev))
mdev             6680 drivers/infiniband/hw/mlx5/main.c 	mlx5_notifier_register(dev->mdev, &dev->mdev_events);
mdev             6686 drivers/infiniband/hw/mlx5/main.c 	mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
mdev             6854 drivers/infiniband/hw/mlx5/main.c static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
mdev             6865 drivers/infiniband/hw/mlx5/main.c 	mpi->mdev = mdev;
mdev             6867 drivers/infiniband/hw/mlx5/main.c 	err = mlx5_query_nic_vport_system_image_guid(mdev,
mdev             6887 drivers/infiniband/hw/mlx5/main.c 		dev_dbg(mdev->device,
mdev             6895 drivers/infiniband/hw/mlx5/main.c static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
mdev             6904 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_ESWITCH_MANAGER(mdev) &&
mdev             6905 drivers/infiniband/hw/mlx5/main.c 	    mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
mdev             6906 drivers/infiniband/hw/mlx5/main.c 		if (!mlx5_core_mp_enabled(mdev))
mdev             6907 drivers/infiniband/hw/mlx5/main.c 			mlx5_ib_register_vport_reps(mdev);
mdev             6908 drivers/infiniband/hw/mlx5/main.c 		return mdev;
mdev             6911 drivers/infiniband/hw/mlx5/main.c 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
mdev             6914 drivers/infiniband/hw/mlx5/main.c 	if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
mdev             6915 drivers/infiniband/hw/mlx5/main.c 		return mlx5_ib_add_slave_port(mdev);
mdev             6917 drivers/infiniband/hw/mlx5/main.c 	num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
mdev             6918 drivers/infiniband/hw/mlx5/main.c 			MLX5_CAP_GEN(mdev, num_vhca_ports));
mdev             6929 drivers/infiniband/hw/mlx5/main.c 	dev->mdev = mdev;
mdev             6935 drivers/infiniband/hw/mlx5/main.c static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
mdev             6940 drivers/infiniband/hw/mlx5/main.c 	if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
mdev             6941 drivers/infiniband/hw/mlx5/main.c 		mlx5_ib_unregister_vport_reps(mdev);
mdev             6945 drivers/infiniband/hw/mlx5/main.c 	if (mlx5_core_is_mp_slave(mdev)) {
mdev              192 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct mlx5_core_dev	*mdev;
mdev              853 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct mlx5_core_dev *mdev;
mdev              955 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct mlx5_core_dev		*mdev;
mdev             1462 drivers/infiniband/hw/mlx5/mlx5_ib.h 	return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
mdev             1485 drivers/infiniband/hw/mlx5/mlx5_ib.h 	if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
mdev             1489 drivers/infiniband/hw/mlx5/mlx5_ib.h 	    MLX5_CAP_GEN(dev->mdev, atomic) &&
mdev             1490 drivers/infiniband/hw/mlx5/mlx5_ib.h 	    MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
mdev               57 drivers/infiniband/hw/mlx5/mr.c 	return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
mdev               62 drivers/infiniband/hw/mlx5/mr.c 	int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
mdev               97 drivers/infiniband/hw/mlx5/mr.c 	struct xarray *mkeys = &dev->mdev->priv.mkey_table;
mdev              112 drivers/infiniband/hw/mlx5/mr.c 	spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
mdev              113 drivers/infiniband/hw/mlx5/mr.c 	key = dev->mdev->priv.mkey_key++;
mdev              114 drivers/infiniband/hw/mlx5/mr.c 	spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
mdev              180 drivers/infiniband/hw/mlx5/mr.c 		err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
mdev              218 drivers/infiniband/hw/mlx5/mr.c 		mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
mdev              555 drivers/infiniband/hw/mlx5/mr.c 		mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
mdev              587 drivers/infiniband/hw/mlx5/mr.c 	cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
mdev              620 drivers/infiniband/hw/mlx5/mr.c 	mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
mdev              646 drivers/infiniband/hw/mlx5/mr.c 		if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
mdev              648 drivers/infiniband/hw/mlx5/mr.c 		    mlx5_core_is_pf(dev->mdev))
mdev              649 drivers/infiniband/hw/mlx5/mr.c 			ent->limit = dev->mdev->profile->mr_cache[i].limit;
mdev              686 drivers/infiniband/hw/mlx5/mr.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              716 drivers/infiniband/hw/mlx5/mr.c 	err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
mdev              750 drivers/infiniband/hw/mlx5/mr.c 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
mdev             1063 drivers/infiniband/hw/mlx5/mr.c 	bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
mdev             1112 drivers/infiniband/hw/mlx5/mr.c 	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
mdev             1140 drivers/infiniband/hw/mlx5/mr.c 	atomic_add(npages, &dev->mdev->priv.reg_pages);
mdev             1152 drivers/infiniband/hw/mlx5/mr.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             1183 drivers/infiniband/hw/mlx5/mr.c 	err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
mdev             1223 drivers/infiniband/hw/mlx5/mr.c 	struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
mdev             1297 drivers/infiniband/hw/mlx5/mr.c 	} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
mdev             1353 drivers/infiniband/hw/mlx5/mr.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             1356 drivers/infiniband/hw/mlx5/mr.c 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
mdev             1413 drivers/infiniband/hw/mlx5/mr.c 	atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
mdev             1555 drivers/infiniband/hw/mlx5/mr.c 		if (mlx5_core_destroy_psv(dev->mdev,
mdev             1559 drivers/infiniband/hw/mlx5/mr.c 		if (mlx5_core_destroy_psv(dev->mdev,
mdev             1607 drivers/infiniband/hw/mlx5/mr.c 		atomic_sub(npages, &dev->mdev->priv.reg_pages);
mdev             1622 drivers/infiniband/hw/mlx5/mr.c 		atomic_sub(npages, &dev->mdev->priv.reg_pages);
mdev             1676 drivers/infiniband/hw/mlx5/mr.c 	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
mdev             1764 drivers/infiniband/hw/mlx5/mr.c 	err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
mdev             1809 drivers/infiniband/hw/mlx5/mr.c 	if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
mdev             1812 drivers/infiniband/hw/mlx5/mr.c 	if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
mdev             1936 drivers/infiniband/hw/mlx5/mr.c 	err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
mdev             1949 drivers/infiniband/hw/mlx5/mr.c 			mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
mdev             1970 drivers/infiniband/hw/mlx5/mr.c 		xa_erase_irq(&dev->mdev->priv.mkey_table,
mdev             1979 drivers/infiniband/hw/mlx5/mr.c 	err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
mdev              335 drivers/infiniband/hw/mlx5/odp.c 	if (!MLX5_CAP_GEN(dev->mdev, pg) ||
mdev              341 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
mdev              346 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
mdev              349 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
mdev              352 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
mdev              355 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
mdev              358 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
mdev              361 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
mdev              364 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
mdev              367 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
mdev              370 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
mdev              373 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
mdev              376 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
mdev              379 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
mdev              382 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
mdev              385 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
mdev              388 drivers/infiniband/hw/mlx5/odp.c 	if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
mdev              389 drivers/infiniband/hw/mlx5/odp.c 	    MLX5_CAP_GEN(dev->mdev, null_mkey) &&
mdev              390 drivers/infiniband/hw/mlx5/odp.c 	    MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
mdev              391 drivers/infiniband/hw/mlx5/odp.c 	    !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
mdev              413 drivers/infiniband/hw/mlx5/odp.c 	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
mdev              464 drivers/infiniband/hw/mlx5/odp.c 		    mr->mmkey.key, dev->mdev, mr);
mdev              796 drivers/infiniband/hw/mlx5/odp.c 	mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
mdev              850 drivers/infiniband/hw/mlx5/odp.c 		if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mdev              872 drivers/infiniband/hw/mlx5/odp.c 		ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
mdev             1141 drivers/infiniband/hw/mlx5/odp.c 		common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
mdev             1521 drivers/infiniband/hw/mlx5/odp.c 	eq->core = mlx5_eq_create_generic(dev->mdev, &param);
mdev             1526 drivers/infiniband/hw/mlx5/odp.c 	err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
mdev             1534 drivers/infiniband/hw/mlx5/odp.c 	mlx5_eq_destroy_generic(dev->mdev, eq->core);
mdev             1547 drivers/infiniband/hw/mlx5/odp.c 	mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
mdev             1548 drivers/infiniband/hw/mlx5/odp.c 	err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
mdev             1597 drivers/infiniband/hw/mlx5/odp.c 		ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
mdev             1646 drivers/infiniband/hw/mlx5/odp.c 		mmkey = xa_load(&dev->mdev->priv.mkey_table,
mdev             1666 drivers/infiniband/hw/mlx5/odp.c 		mmkey = xa_load(&dev->mdev->priv.mkey_table,
mdev              337 drivers/infiniband/hw/mlx5/qp.c 	if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
mdev              363 drivers/infiniband/hw/mlx5/qp.c 			if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
mdev              366 drivers/infiniband/hw/mlx5/qp.c 					    MLX5_CAP_GEN(dev->mdev,
mdev              490 drivers/infiniband/hw/mlx5/qp.c 	if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
mdev              492 drivers/infiniband/hw/mlx5/qp.c 			    wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
mdev              502 drivers/infiniband/hw/mlx5/qp.c 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
mdev              506 drivers/infiniband/hw/mlx5/qp.c 			    1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
mdev              529 drivers/infiniband/hw/mlx5/qp.c 	if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
mdev              531 drivers/infiniband/hw/mlx5/qp.c 			     desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
mdev              543 drivers/infiniband/hw/mlx5/qp.c 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
mdev              546 drivers/infiniband/hw/mlx5/qp.c 			     1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
mdev             1055 drivers/infiniband/hw/mlx5/qp.c 	qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
mdev             1068 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
mdev             1069 drivers/infiniband/hw/mlx5/qp.c 				       &qp->buf, dev->mdev->priv.numa_node);
mdev             1116 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_db_alloc(dev->mdev, &qp->db);
mdev             1148 drivers/infiniband/hw/mlx5/qp.c 	mlx5_db_free(dev->mdev, &qp->db);
mdev             1154 drivers/infiniband/hw/mlx5/qp.c 	mlx5_frag_buf_free(dev->mdev, &qp->buf);
mdev             1165 drivers/infiniband/hw/mlx5/qp.c 	mlx5_db_free(dev->mdev, &qp->db);
mdev             1166 drivers/infiniband/hw/mlx5/qp.c 	mlx5_frag_buf_free(dev->mdev, &qp->buf);
mdev             1203 drivers/infiniband/hw/mlx5/qp.c 	return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
mdev             1209 drivers/infiniband/hw/mlx5/qp.c 	mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
mdev             1253 drivers/infiniband/hw/mlx5/qp.c 	if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
mdev             1260 drivers/infiniband/hw/mlx5/qp.c 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
mdev             1261 drivers/infiniband/hw/mlx5/qp.c 	    MLX5_CAP_ETH(dev->mdev, swp))
mdev             1277 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);
mdev             1297 drivers/infiniband/hw/mlx5/qp.c 	mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
mdev             1367 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
mdev             1377 drivers/infiniband/hw/mlx5/qp.c 	mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
mdev             1395 drivers/infiniband/hw/mlx5/qp.c 	mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
mdev             1436 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
mdev             1508 drivers/infiniband/hw/mlx5/qp.c 			if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
mdev             1582 drivers/infiniband/hw/mlx5/qp.c 	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
mdev             1650 drivers/infiniband/hw/mlx5/qp.c 	    !tunnel_offload_supported(dev->mdev)) {
mdev             1794 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
mdev             1801 drivers/infiniband/hw/mlx5/qp.c 			mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
mdev             1811 drivers/infiniband/hw/mlx5/qp.c 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
mdev             1836 drivers/infiniband/hw/mlx5/qp.c 	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
mdev             1889 drivers/infiniband/hw/mlx5/qp.c 	    MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
mdev             1915 drivers/infiniband/hw/mlx5/qp.c 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
mdev             1916 drivers/infiniband/hw/mlx5/qp.c 	u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
mdev             1924 drivers/infiniband/hw/mlx5/qp.c 		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
mdev             1926 drivers/infiniband/hw/mlx5/qp.c 		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
mdev             1951 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             1983 drivers/infiniband/hw/mlx5/qp.c 		if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
mdev             1995 drivers/infiniband/hw/mlx5/qp.c 		if (!MLX5_CAP_GEN(mdev, cd)) {
mdev             2009 drivers/infiniband/hw/mlx5/qp.c 		if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
mdev             2019 drivers/infiniband/hw/mlx5/qp.c 		if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
mdev             2020 drivers/infiniband/hw/mlx5/qp.c 		    !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
mdev             2031 drivers/infiniband/hw/mlx5/qp.c 		if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
mdev             2032 drivers/infiniband/hw/mlx5/qp.c 		      MLX5_CAP_ETH(dev->mdev, vlan_cap)) ||
mdev             2062 drivers/infiniband/hw/mlx5/qp.c 		if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
mdev             2066 drivers/infiniband/hw/mlx5/qp.c 			    !tunnel_offload_supported(mdev)) {
mdev             2091 drivers/infiniband/hw/mlx5/qp.c 				!MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
mdev             2100 drivers/infiniband/hw/mlx5/qp.c 			    (MLX5_CAP_GEN(dev->mdev, port_type) !=
mdev             2102 drivers/infiniband/hw/mlx5/qp.c 			    !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
mdev             2130 drivers/infiniband/hw/mlx5/qp.c 				1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
mdev             2252 drivers/infiniband/hw/mlx5/qp.c 	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
mdev             2263 drivers/infiniband/hw/mlx5/qp.c 		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
mdev             2287 drivers/infiniband/hw/mlx5/qp.c 		err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
mdev             2453 drivers/infiniband/hw/mlx5/qp.c 			err = mlx5_core_qp_modify(dev->mdev,
mdev             2495 drivers/infiniband/hw/mlx5/qp.c 		err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
mdev             2619 drivers/infiniband/hw/mlx5/qp.c 	if (!MLX5_CAP_GEN(dev->mdev, dct)) {
mdev             2686 drivers/infiniband/hw/mlx5/qp.c 		if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
mdev             2758 drivers/infiniband/hw/mlx5/qp.c 		err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
mdev             2847 drivers/infiniband/hw/mlx5/qp.c 		 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
mdev             2925 drivers/infiniband/hw/mlx5/qp.c 		    dev->mdev->port_caps[port - 1].gid_table_len) {
mdev             2928 drivers/infiniband/hw/mlx5/qp.c 			       dev->mdev->port_caps[port - 1].gid_table_len);
mdev             2978 drivers/infiniband/hw/mlx5/qp.c 		return modify_raw_packet_eth_prio(dev->mdev,
mdev             3170 drivers/infiniband/hw/mlx5/qp.c 		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
mdev             3180 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
mdev             3325 drivers/infiniband/hw/mlx5/qp.c 			err = modify_raw_packet_tx_affinity(dev->mdev, sq,
mdev             3337 drivers/infiniband/hw/mlx5/qp.c 		err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
mdev             3402 drivers/infiniband/hw/mlx5/qp.c 	return mlx5_core_qp_modify(dev->mdev,
mdev             3503 drivers/infiniband/hw/mlx5/qp.c 				u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
mdev             3525 drivers/infiniband/hw/mlx5/qp.c 				      (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
mdev             3667 drivers/infiniband/hw/mlx5/qp.c 				    MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
mdev             3678 drivers/infiniband/hw/mlx5/qp.c 				    MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
mdev             3692 drivers/infiniband/hw/mlx5/qp.c 		err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
mdev             3822 drivers/infiniband/hw/mlx5/qp.c 		    attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
mdev             3867 drivers/infiniband/hw/mlx5/qp.c 		err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
mdev             3875 drivers/infiniband/hw/mlx5/qp.c 			mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
mdev             3978 drivers/infiniband/hw/mlx5/qp.c 		    dev->mdev->port_caps[port - 1].pkey_table_len) {
mdev             3987 drivers/infiniband/hw/mlx5/qp.c 	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
mdev             3995 drivers/infiniband/hw/mlx5/qp.c 	    (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
mdev             4283 drivers/infiniband/hw/mlx5/qp.c 	     MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
mdev             4285 drivers/infiniband/hw/mlx5/qp.c 	     MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
mdev             4824 drivers/infiniband/hw/mlx5/qp.c 			     (MLX5_CAP_GEN(dev->mdev,
mdev             4967 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             4988 drivers/infiniband/hw/mlx5/qp.c 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
mdev             5230 drivers/infiniband/hw/mlx5/qp.c 			if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
mdev             5271 drivers/infiniband/hw/mlx5/qp.c 			err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
mdev             5362 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             5369 drivers/infiniband/hw/mlx5/qp.c 	if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
mdev             5519 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
mdev             5542 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
mdev             5640 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
mdev             5719 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
mdev             5845 drivers/infiniband/hw/mlx5/qp.c 	if (!MLX5_CAP_GEN(dev->mdev, xrc))
mdev             5852 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
mdev             5867 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
mdev             5905 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
mdev             5950 drivers/infiniband/hw/mlx5/qp.c 		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
mdev             5973 drivers/infiniband/hw/mlx5/qp.c 	has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
mdev             5975 drivers/infiniband/hw/mlx5/qp.c 		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
mdev             5984 drivers/infiniband/hw/mlx5/qp.c 		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
mdev             6002 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
mdev             6008 drivers/infiniband/hw/mlx5/qp.c 			mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
mdev             6024 drivers/infiniband/hw/mlx5/qp.c 	if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
mdev             6073 drivers/infiniband/hw/mlx5/qp.c 		if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
mdev             6175 drivers/infiniband/hw/mlx5/qp.c 	mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
mdev             6188 drivers/infiniband/hw/mlx5/qp.c 	mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
mdev             6214 drivers/infiniband/hw/mlx5/qp.c 	    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
mdev             6217 drivers/infiniband/hw/mlx5/qp.c 			    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
mdev             6247 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
mdev             6265 drivers/infiniband/hw/mlx5/qp.c 	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
mdev             6276 drivers/infiniband/hw/mlx5/qp.c 	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
mdev             6332 drivers/infiniband/hw/mlx5/qp.c 			if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
mdev             6333 drivers/infiniband/hw/mlx5/qp.c 			      MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
mdev             6356 drivers/infiniband/hw/mlx5/qp.c 		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
mdev             6366 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
mdev             6394 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             6402 drivers/infiniband/hw/mlx5/qp.c 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mdev             6454 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             6457 drivers/infiniband/hw/mlx5/qp.c 	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mdev             6483 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev             6486 drivers/infiniband/hw/mlx5/qp.c 	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mdev             6520 drivers/infiniband/hw/mlx5/qp.c 	if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
mdev              116 drivers/infiniband/hw/mlx5/srq.c 	if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
mdev              138 drivers/infiniband/hw/mlx5/srq.c 	err = mlx5_db_alloc(dev->mdev, &srq->db);
mdev              144 drivers/infiniband/hw/mlx5/srq.c 	if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf,
mdev              145 drivers/infiniband/hw/mlx5/srq.c 				     dev->mdev->priv.numa_node)) {
mdev              180 drivers/infiniband/hw/mlx5/srq.c 	if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
mdev              190 drivers/infiniband/hw/mlx5/srq.c 	mlx5_frag_buf_free(dev->mdev, &srq->buf);
mdev              193 drivers/infiniband/hw/mlx5/srq.c 	mlx5_db_free(dev->mdev, &srq->db);
mdev              213 drivers/infiniband/hw/mlx5/srq.c 	mlx5_frag_buf_free(dev->mdev, &srq->buf);
mdev              214 drivers/infiniband/hw/mlx5/srq.c 	mlx5_db_free(dev->mdev, &srq->db);
mdev              227 drivers/infiniband/hw/mlx5/srq.c 	__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
mdev              286 drivers/infiniband/hw/mlx5/srq.c 		    MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
mdev              427 drivers/infiniband/hw/mlx5/srq.c 	struct mlx5_core_dev *mdev = dev->mdev;
mdev              435 drivers/infiniband/hw/mlx5/srq.c 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mdev              122 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
mdev              143 drivers/infiniband/hw/mlx5/srq_cmd.c 	return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
mdev              159 drivers/infiniband/hw/mlx5/srq_cmd.c 	return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
mdev              178 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
mdev              222 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
mdev              245 drivers/infiniband/hw/mlx5/srq_cmd.c 	return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
mdev              261 drivers/infiniband/hw/mlx5/srq_cmd.c 	return  mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
mdev              283 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
mdev              330 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
mdev              350 drivers/infiniband/hw/mlx5/srq_cmd.c 	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
mdev              387 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
mdev              417 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
mdev              467 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
mdev              487 drivers/infiniband/hw/mlx5/srq_cmd.c 	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
mdev              503 drivers/infiniband/hw/mlx5/srq_cmd.c 	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
mdev              522 drivers/infiniband/hw/mlx5/srq_cmd.c 	err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
mdev              548 drivers/infiniband/hw/mlx5/srq_cmd.c 	if (!dev->mdev->issi)
mdev              562 drivers/infiniband/hw/mlx5/srq_cmd.c 	if (!dev->mdev->issi)
mdev              631 drivers/infiniband/hw/mlx5/srq_cmd.c 	if (!dev->mdev->issi)
mdev              646 drivers/infiniband/hw/mlx5/srq_cmd.c 	if (!dev->mdev->issi)
mdev              699 drivers/infiniband/hw/mlx5/srq_cmd.c 	mlx5_notifier_register(dev->mdev, &table->nb);
mdev              708 drivers/infiniband/hw/mlx5/srq_cmd.c 	mlx5_notifier_unregister(dev->mdev, &table->nb);
mdev              366 drivers/infiniband/hw/mthca/mthca_dev.h #define mthca_dbg(mdev, format, arg...)					\
mdev              369 drivers/infiniband/hw/mthca/mthca_dev.h 			dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
mdev              374 drivers/infiniband/hw/mthca/mthca_dev.h #define mthca_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
mdev              378 drivers/infiniband/hw/mthca/mthca_dev.h #define mthca_err(mdev, format, arg...) \
mdev              379 drivers/infiniband/hw/mthca/mthca_dev.h 	dev_err(&mdev->pdev->dev, format, ## arg)
mdev              380 drivers/infiniband/hw/mthca/mthca_dev.h #define mthca_info(mdev, format, arg...) \
mdev              381 drivers/infiniband/hw/mthca/mthca_dev.h 	dev_info(&mdev->pdev->dev, format, ## arg)
mdev              382 drivers/infiniband/hw/mthca/mthca_dev.h #define mthca_warn(mdev, format, arg...) \
mdev              383 drivers/infiniband/hw/mthca/mthca_dev.h 	dev_warn(&mdev->pdev->dev, format, ## arg)
mdev              412 drivers/infiniband/hw/mthca/mthca_dev.h int mthca_reset(struct mthca_dev *mdev);
mdev              136 drivers/infiniband/hw/mthca/mthca_main.c static int mthca_tune_pci(struct mthca_dev *mdev)
mdev              142 drivers/infiniband/hw/mthca/mthca_main.c 	if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
mdev              143 drivers/infiniband/hw/mthca/mthca_main.c 		if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
mdev              144 drivers/infiniband/hw/mthca/mthca_main.c 			mthca_err(mdev, "Couldn't set PCI-X max read count, "
mdev              148 drivers/infiniband/hw/mthca/mthca_main.c 	} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
mdev              149 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
mdev              151 drivers/infiniband/hw/mthca/mthca_main.c 	if (pci_is_pcie(mdev->pdev)) {
mdev              152 drivers/infiniband/hw/mthca/mthca_main.c 		if (pcie_set_readrq(mdev->pdev, 4096)) {
mdev              153 drivers/infiniband/hw/mthca/mthca_main.c 			mthca_err(mdev, "Couldn't write PCI Express read request, "
mdev              157 drivers/infiniband/hw/mthca/mthca_main.c 	} else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
mdev              158 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_info(mdev, "No PCI Express capability, "
mdev              164 drivers/infiniband/hw/mthca/mthca_main.c static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
mdev              168 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
mdev              169 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_QUERY_DEV_LIM(mdev, dev_lim);
mdev              171 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "QUERY_DEV_LIM command returned %d"
mdev              176 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "HCA minimum page size of %d bigger than "
mdev              182 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "HCA has %d ports, but we only support %d, "
mdev              188 drivers/infiniband/hw/mthca/mthca_main.c 	if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
mdev              189 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
mdev              192 drivers/infiniband/hw/mthca/mthca_main.c 			  (unsigned long long)pci_resource_len(mdev->pdev, 2));
mdev              196 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.num_ports      	= dev_lim->num_ports;
mdev              197 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.vl_cap             = dev_lim->max_vl;
mdev              198 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.mtu_cap            = dev_lim->max_mtu;
mdev              199 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.gid_table_len  	= dev_lim->max_gids;
mdev              200 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.pkey_table_len 	= dev_lim->max_pkeys;
mdev              201 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
mdev              207 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.max_sg		= min_t(int, dev_lim->max_sg,
mdev              210 drivers/infiniband/hw/mthca/mthca_main.c 					       (mthca_is_memfree(mdev) ?
mdev              214 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.max_wqes           = dev_lim->max_qp_sz;
mdev              215 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.max_qp_init_rdma   = dev_lim->max_requester_per_qp;
mdev              216 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_qps       = dev_lim->reserved_qps;
mdev              217 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.max_srq_wqes       = dev_lim->max_srq_sz;
mdev              218 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
mdev              219 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
mdev              220 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.max_desc_sz        = dev_lim->max_desc_sz;
mdev              221 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.max_srq_sge	= mthca_max_srq_sge(mdev);
mdev              227 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.max_cqes           = dev_lim->max_cq_sz - 1;
mdev              228 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_cqs       = dev_lim->reserved_cqs;
mdev              229 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_eqs       = dev_lim->reserved_eqs;
mdev              230 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_mtts      = dev_lim->reserved_mtts;
mdev              231 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_mrws      = dev_lim->reserved_mrws;
mdev              232 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_uars      = dev_lim->reserved_uars;
mdev              233 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_pds       = dev_lim->reserved_pds;
mdev              234 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.port_width_cap     = dev_lim->max_port_width;
mdev              235 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.page_size_cap      = ~(u32) (dev_lim->min_page_sz - 1);
mdev              236 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.flags              = dev_lim->flags;
mdev              244 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
mdev              245 drivers/infiniband/hw/mthca/mthca_main.c 	else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
mdev              246 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->limits.stat_rate_support = 0xf;
mdev              248 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->limits.stat_rate_support = 0x3;
mdev              257 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
mdev              263 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
mdev              266 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
mdev              269 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
mdev              272 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
mdev              275 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
mdev              278 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->mthca_flags |= MTHCA_FLAG_SRQ;
mdev              280 drivers/infiniband/hw/mthca/mthca_main.c 	if (mthca_is_memfree(mdev))
mdev              282 drivers/infiniband/hw/mthca/mthca_main.c 			mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
mdev              287 drivers/infiniband/hw/mthca/mthca_main.c static int mthca_init_tavor(struct mthca_dev *mdev)
mdev              295 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_SYS_EN(mdev);
mdev              297 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err);
mdev              301 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_QUERY_FW(mdev);
mdev              303 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "QUERY_FW command returned %d,"
mdev              307 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_QUERY_DDR(mdev);
mdev              309 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err);
mdev              313 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_dev_lim(mdev, &dev_lim);
mdev              315 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err);
mdev              322 drivers/infiniband/hw/mthca/mthca_main.c 	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mdev              325 drivers/infiniband/hw/mthca/mthca_main.c 	size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
mdev              331 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_INIT_HCA(mdev, &init_hca);
mdev              333 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
mdev              340 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_SYS_DIS(mdev);
mdev              345 drivers/infiniband/hw/mthca/mthca_main.c static int mthca_load_fw(struct mthca_dev *mdev)
mdev              351 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->fw.arbel.fw_icm =
mdev              352 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
mdev              354 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->fw.arbel.fw_icm) {
mdev              355 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
mdev              359 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm);
mdev              361 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err);
mdev              364 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_RUN_FW(mdev);
mdev              366 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err);
mdev              373 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_UNMAP_FA(mdev);
mdev              376 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
mdev              380 drivers/infiniband/hw/mthca/mthca_main.c static int mthca_init_icm(struct mthca_dev *mdev,
mdev              388 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
mdev              390 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err);
mdev              394 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
mdev              398 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
mdev              400 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->fw.arbel.aux_icm) {
mdev              401 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
mdev              405 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm);
mdev              407 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err);
mdev              411 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
mdev              413 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
mdev              418 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
mdev              419 drivers/infiniband/hw/mthca/mthca_main.c 					   dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
mdev              421 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
mdev              422 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->limits.mtt_seg_size,
mdev              423 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->limits.num_mtt_segs,
mdev              424 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->limits.reserved_mtts,
mdev              426 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->mr_table.mtt_table) {
mdev              427 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
mdev              432 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
mdev              434 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->limits.num_mpts,
mdev              435 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->limits.reserved_mrws,
mdev              437 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->mr_table.mpt_table) {
mdev              438 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
mdev              443 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
mdev              445 drivers/infiniband/hw/mthca/mthca_main.c 							mdev->limits.num_qps,
mdev              446 drivers/infiniband/hw/mthca/mthca_main.c 							mdev->limits.reserved_qps,
mdev              448 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->qp_table.qp_table) {
mdev              449 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
mdev              454 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
mdev              456 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->limits.num_qps,
mdev              457 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->limits.reserved_qps,
mdev              459 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->qp_table.eqp_table) {
mdev              460 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
mdev              465 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
mdev              467 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->limits.num_qps <<
mdev              468 drivers/infiniband/hw/mthca/mthca_main.c 							 mdev->qp_table.rdb_shift, 0,
mdev              470 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->qp_table.rdb_table) {
mdev              471 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
mdev              476 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
mdev              478 drivers/infiniband/hw/mthca/mthca_main.c 						     mdev->limits.num_cqs,
mdev              479 drivers/infiniband/hw/mthca/mthca_main.c 						     mdev->limits.reserved_cqs,
mdev              481 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->cq_table.table) {
mdev              482 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
mdev              487 drivers/infiniband/hw/mthca/mthca_main.c 	if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
mdev              488 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->srq_table.table =
mdev              489 drivers/infiniband/hw/mthca/mthca_main.c 			mthca_alloc_icm_table(mdev, init_hca->srqc_base,
mdev              491 drivers/infiniband/hw/mthca/mthca_main.c 					      mdev->limits.num_srqs,
mdev              492 drivers/infiniband/hw/mthca/mthca_main.c 					      mdev->limits.reserved_srqs,
mdev              494 drivers/infiniband/hw/mthca/mthca_main.c 		if (!mdev->srq_table.table) {
mdev              495 drivers/infiniband/hw/mthca/mthca_main.c 			mthca_err(mdev, "Failed to map SRQ context memory, "
mdev              507 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
mdev              509 drivers/infiniband/hw/mthca/mthca_main.c 						      mdev->limits.num_mgms +
mdev              510 drivers/infiniband/hw/mthca/mthca_main.c 						      mdev->limits.num_amgms,
mdev              511 drivers/infiniband/hw/mthca/mthca_main.c 						      mdev->limits.num_mgms +
mdev              512 drivers/infiniband/hw/mthca/mthca_main.c 						      mdev->limits.num_amgms,
mdev              514 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev->mcg_table.table) {
mdev              515 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
mdev              523 drivers/infiniband/hw/mthca/mthca_main.c 	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mdev              524 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_free_icm_table(mdev, mdev->srq_table.table);
mdev              527 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->cq_table.table);
mdev              530 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mdev              533 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
mdev              536 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
mdev              539 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
mdev              542 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
mdev              545 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_unmap_eq_icm(mdev);
mdev              548 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_UNMAP_ICM_AUX(mdev);
mdev              551 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
mdev              556 drivers/infiniband/hw/mthca/mthca_main.c static void mthca_free_icms(struct mthca_dev *mdev)
mdev              559 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->mcg_table.table);
mdev              560 drivers/infiniband/hw/mthca/mthca_main.c 	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mdev              561 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_free_icm_table(mdev, mdev->srq_table.table);
mdev              562 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->cq_table.table);
mdev              563 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mdev              564 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
mdev              565 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
mdev              566 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
mdev              567 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
mdev              568 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_unmap_eq_icm(mdev);
mdev              570 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_UNMAP_ICM_AUX(mdev);
mdev              571 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
mdev              574 drivers/infiniband/hw/mthca/mthca_main.c static int mthca_init_arbel(struct mthca_dev *mdev)
mdev              582 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_QUERY_FW(mdev);
mdev              584 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err);
mdev              588 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_ENABLE_LAM(mdev);
mdev              590 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
mdev              591 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
mdev              593 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err);
mdev              597 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_load_fw(mdev);
mdev              599 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Loading FW returned %d, aborting.\n", err);
mdev              603 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_dev_lim(mdev, &dev_lim);
mdev              605 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err);
mdev              612 drivers/infiniband/hw/mthca/mthca_main.c 	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mdev              615 drivers/infiniband/hw/mthca/mthca_main.c 	icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
mdev              621 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
mdev              625 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_INIT_HCA(mdev, &init_hca);
mdev              627 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
mdev              634 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icms(mdev);
mdev              637 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_UNMAP_FA(mdev);
mdev              638 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
mdev              641 drivers/infiniband/hw/mthca/mthca_main.c 	if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mdev              642 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_DISABLE_LAM(mdev);
mdev              647 drivers/infiniband/hw/mthca/mthca_main.c static void mthca_close_hca(struct mthca_dev *mdev)
mdev              649 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_CLOSE_HCA(mdev, 0);
mdev              651 drivers/infiniband/hw/mthca/mthca_main.c 	if (mthca_is_memfree(mdev)) {
mdev              652 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_free_icms(mdev);
mdev              654 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_UNMAP_FA(mdev);
mdev              655 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
mdev              657 drivers/infiniband/hw/mthca/mthca_main.c 		if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mdev              658 drivers/infiniband/hw/mthca/mthca_main.c 			mthca_DISABLE_LAM(mdev);
mdev              660 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_SYS_DIS(mdev);
mdev              663 drivers/infiniband/hw/mthca/mthca_main.c static int mthca_init_hca(struct mthca_dev *mdev)
mdev              668 drivers/infiniband/hw/mthca/mthca_main.c 	if (mthca_is_memfree(mdev))
mdev              669 drivers/infiniband/hw/mthca/mthca_main.c 		err = mthca_init_arbel(mdev);
mdev              671 drivers/infiniband/hw/mthca/mthca_main.c 		err = mthca_init_tavor(mdev);
mdev              676 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_QUERY_ADAPTER(mdev, &adapter);
mdev              678 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err);
mdev              682 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->eq_table.inta_pin = adapter.inta_pin;
mdev              683 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mthca_is_memfree(mdev))
mdev              684 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->rev_id = adapter.revision_id;
mdev              685 drivers/infiniband/hw/mthca/mthca_main.c 	memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
mdev              690 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_close_hca(mdev);
mdev              851 drivers/infiniband/hw/mthca/mthca_main.c static int mthca_enable_msi_x(struct mthca_dev *mdev)
mdev              855 drivers/infiniband/hw/mthca/mthca_main.c 	err = pci_alloc_irq_vectors(mdev->pdev, 3, 3, PCI_IRQ_MSIX);
mdev              859 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector =
mdev              860 drivers/infiniband/hw/mthca/mthca_main.c 			pci_irq_vector(mdev->pdev, 0);
mdev              861 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector =
mdev              862 drivers/infiniband/hw/mthca/mthca_main.c 			pci_irq_vector(mdev->pdev, 1);
mdev              863 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->eq_table.eq[MTHCA_EQ_CMD  ].msi_x_vector =
mdev              864 drivers/infiniband/hw/mthca/mthca_main.c 			pci_irq_vector(mdev->pdev, 2);
mdev              901 drivers/infiniband/hw/mthca/mthca_main.c 	struct mthca_dev *mdev;
mdev              964 drivers/infiniband/hw/mthca/mthca_main.c 	mdev = ib_alloc_device(mthca_dev, ib_dev);
mdev              965 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev) {
mdev              972 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->pdev = pdev;
mdev              974 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->mthca_flags = mthca_hca_table[hca_type].flags;
mdev              976 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
mdev              983 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_reset(mdev);
mdev              985 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to reset HCA, aborting.\n");
mdev              989 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_cmd_init(mdev);
mdev              991 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_err(mdev, "Failed to init command interface, aborting.\n");
mdev              995 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_tune_pci(mdev);
mdev              999 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_init_hca(mdev);
mdev             1003 drivers/infiniband/hw/mthca/mthca_main.c 	if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
mdev             1004 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
mdev             1005 drivers/infiniband/hw/mthca/mthca_main.c 			   (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
mdev             1006 drivers/infiniband/hw/mthca/mthca_main.c 			   (int) (mdev->fw_ver & 0xffff),
mdev             1010 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
mdev             1013 drivers/infiniband/hw/mthca/mthca_main.c 	if (msi_x && !mthca_enable_msi_x(mdev))
mdev             1014 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
mdev             1016 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_setup_hca(mdev);
mdev             1017 drivers/infiniband/hw/mthca/mthca_main.c 	if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
mdev             1019 drivers/infiniband/hw/mthca/mthca_main.c 		mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
mdev             1021 drivers/infiniband/hw/mthca/mthca_main.c 		err = mthca_setup_hca(mdev);
mdev             1027 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_register_device(mdev);
mdev             1031 drivers/infiniband/hw/mthca/mthca_main.c 	err = mthca_create_agents(mdev);
mdev             1035 drivers/infiniband/hw/mthca/mthca_main.c 	pci_set_drvdata(pdev, mdev);
mdev             1036 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->hca_type = hca_type;
mdev             1038 drivers/infiniband/hw/mthca/mthca_main.c 	mdev->active = true;
mdev             1043 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_unregister_device(mdev);
mdev             1046 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_mcg_table(mdev);
mdev             1047 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_av_table(mdev);
mdev             1048 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_qp_table(mdev);
mdev             1049 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_srq_table(mdev);
mdev             1050 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_cq_table(mdev);
mdev             1051 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cmd_use_polling(mdev);
mdev             1052 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_eq_table(mdev);
mdev             1054 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_pd_free(mdev, &mdev->driver_pd);
mdev             1056 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_mr_table(mdev);
mdev             1057 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_pd_table(mdev);
mdev             1058 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cleanup_uar_table(mdev);
mdev             1061 drivers/infiniband/hw/mthca/mthca_main.c 	if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
mdev             1064 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_close_hca(mdev);
mdev             1067 drivers/infiniband/hw/mthca/mthca_main.c 	mthca_cmd_cleanup(mdev);
mdev             1070 drivers/infiniband/hw/mthca/mthca_main.c 	ib_dealloc_device(&mdev->ib_dev);
mdev             1083 drivers/infiniband/hw/mthca/mthca_main.c 	struct mthca_dev *mdev = pci_get_drvdata(pdev);
mdev             1086 drivers/infiniband/hw/mthca/mthca_main.c 	if (mdev) {
mdev             1087 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_free_agents(mdev);
mdev             1088 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_unregister_device(mdev);
mdev             1090 drivers/infiniband/hw/mthca/mthca_main.c 		for (p = 1; p <= mdev->limits.num_ports; ++p)
mdev             1091 drivers/infiniband/hw/mthca/mthca_main.c 			mthca_CLOSE_IB(mdev, p);
mdev             1093 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_mcg_table(mdev);
mdev             1094 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_av_table(mdev);
mdev             1095 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_qp_table(mdev);
mdev             1096 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_srq_table(mdev);
mdev             1097 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_cq_table(mdev);
mdev             1098 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cmd_use_polling(mdev);
mdev             1099 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_eq_table(mdev);
mdev             1101 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_pd_free(mdev, &mdev->driver_pd);
mdev             1103 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_mr_table(mdev);
mdev             1104 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_pd_table(mdev);
mdev             1106 drivers/infiniband/hw/mthca/mthca_main.c 		iounmap(mdev->kar);
mdev             1107 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_uar_free(mdev, &mdev->driver_uar);
mdev             1108 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cleanup_uar_table(mdev);
mdev             1109 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_close_hca(mdev);
mdev             1110 drivers/infiniband/hw/mthca/mthca_main.c 		mthca_cmd_cleanup(mdev);
mdev             1112 drivers/infiniband/hw/mthca/mthca_main.c 		if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
mdev             1115 drivers/infiniband/hw/mthca/mthca_main.c 		ib_dealloc_device(&mdev->ib_dev);
mdev             1124 drivers/infiniband/hw/mthca/mthca_main.c 	struct mthca_dev *mdev;
mdev             1127 drivers/infiniband/hw/mthca/mthca_main.c 	mdev = pci_get_drvdata(pdev);
mdev             1128 drivers/infiniband/hw/mthca/mthca_main.c 	if (!mdev)
mdev             1130 drivers/infiniband/hw/mthca/mthca_main.c 	hca_type = mdev->hca_type;
mdev               54 drivers/infiniband/hw/mthca/mthca_profile.h s64 mthca_make_profile(struct mthca_dev *mdev,
mdev               67 drivers/infiniband/hw/mthca/mthca_provider.c 	struct mthca_dev *mdev = to_mdev(ibdev);
mdev               79 drivers/infiniband/hw/mthca/mthca_provider.c 	props->fw_ver              = mdev->fw_ver;
mdev               84 drivers/infiniband/hw/mthca/mthca_provider.c 	err = mthca_MAD_IFC(mdev, 1, 1,
mdev               89 drivers/infiniband/hw/mthca/mthca_provider.c 	props->device_cap_flags    = mdev->device_cap_flags;
mdev               97 drivers/infiniband/hw/mthca/mthca_provider.c 	props->page_size_cap       = mdev->limits.page_size_cap;
mdev               98 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
mdev               99 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_qp_wr           = mdev->limits.max_wqes;
mdev              100 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_send_sge        = mdev->limits.max_sg;
mdev              101 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_recv_sge        = mdev->limits.max_sg;
mdev              102 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_sge_rd          = mdev->limits.max_sg;
mdev              103 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_cq              = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
mdev              104 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_cqe             = mdev->limits.max_cqes;
mdev              105 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_mr              = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
mdev              106 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_pd              = mdev->limits.num_pds - mdev->limits.reserved_pds;
mdev              107 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_qp_rd_atom      = 1 << mdev->qp_table.rdb_shift;
mdev              108 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
mdev              110 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_srq             = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
mdev              111 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_srq_wr          = mdev->limits.max_srq_wqes;
mdev              112 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_srq_sge         = mdev->limits.max_srq_sge;
mdev              113 drivers/infiniband/hw/mthca/mthca_provider.c 	props->local_ca_ack_delay  = mdev->limits.local_ca_ack_delay;
mdev              114 drivers/infiniband/hw/mthca/mthca_provider.c 	props->atomic_cap          = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
mdev              116 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_pkeys           = mdev->limits.pkey_table_len;
mdev              117 drivers/infiniband/hw/mthca/mthca_provider.c 	props->max_mcast_grp       = mdev->limits.num_mgms + mdev->limits.num_amgms;
mdev              126 drivers/infiniband/hw/mthca/mthca_provider.c 	if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
mdev              130 drivers/infiniband/hw/mthca/mthca_provider.c 			(1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
mdev             1001 drivers/infiniband/hw/mthca/mthca_provider.c 	struct mthca_dev *mdev = NULL;
mdev             1004 drivers/infiniband/hw/mthca/mthca_provider.c 		if (mdev && to_mdev(fmr->device) != mdev)
mdev             1006 drivers/infiniband/hw/mthca/mthca_provider.c 		mdev = to_mdev(fmr->device);
mdev             1009 drivers/infiniband/hw/mthca/mthca_provider.c 	if (!mdev)
mdev             1012 drivers/infiniband/hw/mthca/mthca_provider.c 	if (mthca_is_memfree(mdev)) {
mdev             1014 drivers/infiniband/hw/mthca/mthca_provider.c 			mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
mdev             1019 drivers/infiniband/hw/mthca/mthca_provider.c 			mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
mdev             1021 drivers/infiniband/hw/mthca/mthca_provider.c 	err = mthca_SYNC_TPT(mdev);
mdev               41 drivers/infiniband/hw/mthca/mthca_reset.c int mthca_reset(struct mthca_dev *mdev)
mdev               69 drivers/infiniband/hw/mthca/mthca_reset.c 	if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) {
mdev               72 drivers/infiniband/hw/mthca/mthca_reset.c 		while ((bridge = pci_get_device(mdev->pdev->vendor,
mdev               73 drivers/infiniband/hw/mthca/mthca_reset.c 						mdev->pdev->device + 2,
mdev               76 drivers/infiniband/hw/mthca/mthca_reset.c 			    bridge->subordinate == mdev->pdev->bus) {
mdev               77 drivers/infiniband/hw/mthca/mthca_reset.c 				mthca_dbg(mdev, "Found bridge: %s\n",
mdev               89 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_warn(mdev, "No bridge found for %s\n",
mdev               90 drivers/infiniband/hw/mthca/mthca_reset.c 				  pci_name(mdev->pdev));
mdev              105 drivers/infiniband/hw/mthca/mthca_reset.c 		if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
mdev              107 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't save HCA "
mdev              113 drivers/infiniband/hw/mthca/mthca_reset.c 	hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
mdev              114 drivers/infiniband/hw/mthca/mthca_reset.c 	hca_pcie_cap = pci_pcie_cap(mdev->pdev);
mdev              128 drivers/infiniband/hw/mthca/mthca_reset.c 				mthca_err(mdev, "Couldn't save HCA bridge "
mdev              136 drivers/infiniband/hw/mthca/mthca_reset.c 				mthca_err(mdev, "Couldn't locate HCA bridge "
mdev              144 drivers/infiniband/hw/mthca/mthca_reset.c 		void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) +
mdev              149 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't map HCA reset register, "
mdev              167 drivers/infiniband/hw/mthca/mthca_reset.c 			if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) {
mdev              169 drivers/infiniband/hw/mthca/mthca_reset.c 				mthca_err(mdev, "Couldn't access HCA after reset, "
mdev              181 drivers/infiniband/hw/mthca/mthca_reset.c 		mthca_err(mdev, "PCI device did not come back after reset, "
mdev              192 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
mdev              199 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
mdev              213 drivers/infiniband/hw/mthca/mthca_reset.c 				mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
mdev              222 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
mdev              229 drivers/infiniband/hw/mthca/mthca_reset.c 		if (pci_write_config_dword(mdev->pdev, hca_pcix_cap,
mdev              232 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't restore HCA PCI-X "
mdev              240 drivers/infiniband/hw/mthca/mthca_reset.c 		if (pcie_capability_write_word(mdev->pdev, PCI_EXP_DEVCTL,
mdev              243 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't restore HCA PCI Express "
mdev              248 drivers/infiniband/hw/mthca/mthca_reset.c 		if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL,
mdev              251 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't restore HCA PCI Express "
mdev              261 drivers/infiniband/hw/mthca/mthca_reset.c 		if (pci_write_config_dword(mdev->pdev, i * 4, hca_header[i])) {
mdev              263 drivers/infiniband/hw/mthca/mthca_reset.c 			mthca_err(mdev, "Couldn't restore HCA reg %x, "
mdev              269 drivers/infiniband/hw/mthca/mthca_reset.c 	if (pci_write_config_dword(mdev->pdev, PCI_COMMAND,
mdev              272 drivers/infiniband/hw/mthca/mthca_reset.c 		mthca_err(mdev, "Couldn't restore HCA COMMAND, "
mdev               24 drivers/input/joystick/maplecontrol.c 	struct maple_device *mdev;
mdev               67 drivers/input/joystick/maplecontrol.c 	maple_getcond_callback(pad->mdev, dc_pad_callback, HZ/20,
mdev               77 drivers/input/joystick/maplecontrol.c 	maple_getcond_callback(pad->mdev, dc_pad_callback, 0,
mdev               98 drivers/input/joystick/maplecontrol.c 	struct maple_device *mdev = to_maple_dev(dev);
mdev              103 drivers/input/joystick/maplecontrol.c 	unsigned long data = be32_to_cpu(mdev->devinfo.function_data[0]);
mdev              113 drivers/input/joystick/maplecontrol.c 	pad->mdev = mdev;
mdev              140 drivers/input/joystick/maplecontrol.c 	idev->dev.parent = &mdev->dev;
mdev              141 drivers/input/joystick/maplecontrol.c 	idev->name = mdev->product_name;
mdev              148 drivers/input/joystick/maplecontrol.c 	mdev->driver = mdrv;
mdev              149 drivers/input/joystick/maplecontrol.c 	maple_set_drvdata(mdev, pad);
mdev              156 drivers/input/joystick/maplecontrol.c 	maple_set_drvdata(mdev, NULL);
mdev              162 drivers/input/joystick/maplecontrol.c 	struct maple_device *mdev = to_maple_dev(dev);
mdev              163 drivers/input/joystick/maplecontrol.c 	struct dc_pad *pad = maple_get_drvdata(mdev);
mdev              165 drivers/input/joystick/maplecontrol.c 	mdev->callback = NULL;
mdev              167 drivers/input/joystick/maplecontrol.c 	maple_set_drvdata(mdev, NULL);
mdev              148 drivers/input/keyboard/maple_keyb.c 	struct maple_device *mdev;
mdev              154 drivers/input/keyboard/maple_keyb.c 	mdev = to_maple_dev(dev);
mdev              172 drivers/input/keyboard/maple_keyb.c 	idev->name = mdev->product_name;
mdev              178 drivers/input/keyboard/maple_keyb.c 	idev->dev.parent = &mdev->dev;
mdev              191 drivers/input/keyboard/maple_keyb.c 	maple_getcond_callback(mdev, dc_kbd_callback, HZ/50,
mdev              194 drivers/input/keyboard/maple_keyb.c 	mdev->driver = mdrv;
mdev              196 drivers/input/keyboard/maple_keyb.c 	maple_set_drvdata(mdev, kbd);
mdev              201 drivers/input/keyboard/maple_keyb.c 	maple_set_drvdata(mdev, NULL);
mdev              211 drivers/input/keyboard/maple_keyb.c 	struct maple_device *mdev = to_maple_dev(dev);
mdev              212 drivers/input/keyboard/maple_keyb.c 	struct dc_kbd *kbd = maple_get_drvdata(mdev);
mdev              219 drivers/input/keyboard/maple_keyb.c 	maple_set_drvdata(mdev, NULL);
mdev               24 drivers/input/mouse/maplemouse.c 	struct maple_device *mdev;
mdev               53 drivers/input/mouse/maplemouse.c 	maple_getcond_callback(mse->mdev, dc_mouse_callback, HZ/50,
mdev               63 drivers/input/mouse/maplemouse.c 	maple_getcond_callback(mse->mdev, dc_mouse_callback, 0,
mdev               70 drivers/input/mouse/maplemouse.c 	struct maple_device *mdev = to_maple_dev(dev);
mdev               89 drivers/input/mouse/maplemouse.c 	mse->mdev = mdev;
mdev               98 drivers/input/mouse/maplemouse.c 	input_dev->name = mdev->product_name;
mdev              104 drivers/input/mouse/maplemouse.c 	mdev->driver = mdrv;
mdev              105 drivers/input/mouse/maplemouse.c 	maple_set_drvdata(mdev, mse);
mdev              119 drivers/input/mouse/maplemouse.c 	struct maple_device *mdev = to_maple_dev(dev);
mdev              120 drivers/input/mouse/maplemouse.c 	struct dc_mouse *mse = maple_get_drvdata(mdev);
mdev              122 drivers/input/mouse/maplemouse.c 	mdev->callback = NULL;
mdev              124 drivers/input/mouse/maplemouse.c 	maple_set_drvdata(mdev, NULL);
mdev               34 drivers/isdn/mISDN/core.c 	struct mISDNdevice *mdev = dev_to_mISDN(dev);
mdev               36 drivers/isdn/mISDN/core.c 	if (!mdev)
mdev               38 drivers/isdn/mISDN/core.c 	return sprintf(buf, "%d\n", mdev->id);
mdev               45 drivers/isdn/mISDN/core.c 	struct mISDNdevice *mdev = dev_to_mISDN(dev);
mdev               47 drivers/isdn/mISDN/core.c 	if (!mdev)
mdev               49 drivers/isdn/mISDN/core.c 	return sprintf(buf, "%d\n", mdev->nrbchan);
mdev               56 drivers/isdn/mISDN/core.c 	struct mISDNdevice *mdev = dev_to_mISDN(dev);
mdev               58 drivers/isdn/mISDN/core.c 	if (!mdev)
mdev               60 drivers/isdn/mISDN/core.c 	return sprintf(buf, "%d\n", mdev->Dprotocols);
mdev               67 drivers/isdn/mISDN/core.c 	struct mISDNdevice *mdev = dev_to_mISDN(dev);
mdev               69 drivers/isdn/mISDN/core.c 	if (!mdev)
mdev               71 drivers/isdn/mISDN/core.c 	return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols());
mdev               78 drivers/isdn/mISDN/core.c 	struct mISDNdevice *mdev = dev_to_mISDN(dev);
mdev               80 drivers/isdn/mISDN/core.c 	if (!mdev)
mdev               82 drivers/isdn/mISDN/core.c 	return sprintf(buf, "%d\n", mdev->D.protocol);
mdev              119 drivers/isdn/mISDN/core.c 	struct mISDNdevice *mdev = dev_to_mISDN(dev);
mdev              123 drivers/isdn/mISDN/core.c 	for (i = 0; i <= mdev->nrbchan; i++)
mdev              124 drivers/isdn/mISDN/core.c 		*bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0';
mdev              144 drivers/isdn/mISDN/core.c 	struct mISDNdevice *mdev = dev_to_mISDN(dev);
mdev              146 drivers/isdn/mISDN/core.c 	if (!mdev)
mdev              149 drivers/isdn/mISDN/core.c 	if (add_uevent_var(env, "nchans=%d", mdev->nrbchan))
mdev              172 drivers/isdn/mISDN/core.c 	struct mISDNdevice *mdev = dev_to_mISDN(dev);
mdev              174 drivers/isdn/mISDN/core.c 	if (!mdev)
mdev              176 drivers/isdn/mISDN/core.c 	if (mdev->id != *(const u_int *)id)
mdev              166 drivers/macintosh/macio_asic.c 	struct macio_dev *mdev;
mdev              168 drivers/macintosh/macio_asic.c         mdev = to_macio_device(dev);
mdev              169 drivers/macintosh/macio_asic.c 	kfree(mdev);
mdev              455 drivers/macintosh/macio_asic.c 	struct macio_dev *rdev, *mdev, *mbdev = NULL, *sdev = NULL;
mdev              481 drivers/macintosh/macio_asic.c 		mdev = macio_add_one_device(chip, &rdev->ofdev.dev, np, NULL,
mdev              483 drivers/macintosh/macio_asic.c 		if (mdev == NULL)
mdev              486 drivers/macintosh/macio_asic.c 			mbdev = mdev;
mdev              488 drivers/macintosh/macio_asic.c 			sdev = mdev;
mdev               61 drivers/macintosh/mediabay.c 	struct macio_dev		*mdev;
mdev              448 drivers/macintosh/mediabay.c 	struct macio_dev *mdev;
mdev              458 drivers/macintosh/mediabay.c 	mdev = to_macio_device(dev);
mdev              461 drivers/macintosh/mediabay.c 		drv->mediabay_event(mdev, state);
mdev              505 drivers/macintosh/mediabay.c 			device_for_each_child(&bay->mdev->ofdev.dev,
mdev              519 drivers/macintosh/mediabay.c 		device_for_each_child(&bay->mdev->ofdev.dev,
mdev              525 drivers/macintosh/mediabay.c 		device_for_each_child(&bay->mdev->ofdev.dev,
mdev              555 drivers/macintosh/mediabay.c static int media_bay_attach(struct macio_dev *mdev,
mdev              564 drivers/macintosh/mediabay.c 	ofnode = mdev->ofdev.dev.of_node;
mdev              566 drivers/macintosh/mediabay.c 	if (macio_resource_count(mdev) < 1)
mdev              568 drivers/macintosh/mediabay.c 	if (macio_request_resources(mdev, "media-bay"))
mdev              574 drivers/macintosh/mediabay.c 	base = macio_resource_start(mdev, 0) & 0xffff0000u;
mdev              577 drivers/macintosh/mediabay.c 		macio_release_resources(mdev);
mdev              583 drivers/macintosh/mediabay.c 	bay->mdev = mdev;
mdev              605 drivers/macintosh/mediabay.c 	macio_set_drvdata(mdev, bay);
mdev              615 drivers/macintosh/mediabay.c static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
mdev              617 drivers/macintosh/mediabay.c 	struct media_bay_info	*bay = macio_get_drvdata(mdev);
mdev              619 drivers/macintosh/mediabay.c 	if (state.event != mdev->ofdev.dev.power.power_state.event
mdev              626 drivers/macintosh/mediabay.c 		mdev->ofdev.dev.power.power_state = state;
mdev              631 drivers/macintosh/mediabay.c static int media_bay_resume(struct macio_dev *mdev)
mdev              633 drivers/macintosh/mediabay.c 	struct media_bay_info	*bay = macio_get_drvdata(mdev);
mdev              635 drivers/macintosh/mediabay.c 	if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
mdev              636 drivers/macintosh/mediabay.c 		mdev->ofdev.dev.power.power_state = PMSG_ON;
mdev               59 drivers/macintosh/rack-meter.c 	struct macio_dev		*mdev;
mdev               97 drivers/macintosh/rack-meter.c 	struct macio_chip *macio = rm->mdev->bus->chip;
mdev              366 drivers/macintosh/rack-meter.c static int rackmeter_probe(struct macio_dev* mdev,
mdev              377 drivers/macintosh/rack-meter.c 	for_each_child_of_node(mdev->ofdev.dev.of_node, i2s)
mdev              405 drivers/macintosh/rack-meter.c 	rm->mdev = mdev;
mdev              408 drivers/macintosh/rack-meter.c 	dev_set_drvdata(&mdev->ofdev.dev, rm);
mdev              411 drivers/macintosh/rack-meter.c 	if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) {
mdev              415 drivers/macintosh/rack-meter.c 		       mdev->ofdev.dev.of_node);
mdev              419 drivers/macintosh/rack-meter.c 	if (macio_request_resources(mdev, "rackmeter")) {
mdev              422 drivers/macintosh/rack-meter.c 		       mdev->ofdev.dev.of_node);
mdev              426 drivers/macintosh/rack-meter.c 	rm->irq = macio_irq(mdev, 1);
mdev              434 drivers/macintosh/rack-meter.c 		       mdev->ofdev.dev.of_node);
mdev              452 drivers/macintosh/rack-meter.c 	rm->dma_buf_v = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
mdev              462 drivers/macintosh/rack-meter.c 	rm->i2s_regs = ioremap(macio_resource_start(mdev, 0), 0x1000);
mdev              473 drivers/macintosh/rack-meter.c 	rm->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x100);
mdev              508 drivers/macintosh/rack-meter.c 	dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
mdev              515 drivers/macintosh/rack-meter.c 	macio_release_resources(mdev);
mdev              522 drivers/macintosh/rack-meter.c 	dev_set_drvdata(&mdev->ofdev.dev, NULL);
mdev              526 drivers/macintosh/rack-meter.c static int rackmeter_remove(struct macio_dev* mdev)
mdev              528 drivers/macintosh/rack-meter.c 	struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
mdev              534 drivers/macintosh/rack-meter.c 	dev_set_drvdata(&mdev->ofdev.dev, NULL);
mdev              547 drivers/macintosh/rack-meter.c 	dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
mdev              556 drivers/macintosh/rack-meter.c 	macio_release_resources(mdev);
mdev              565 drivers/macintosh/rack-meter.c static int rackmeter_shutdown(struct macio_dev* mdev)
mdev              567 drivers/macintosh/rack-meter.c 	struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
mdev               36 drivers/mailbox/mailbox-sti.c #define MBOX_BASE(mdev, inst)   ((mdev)->base + ((inst) * 4))
mdev               81 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device	*mdev;
mdev               89 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = chan_info->mdev;
mdev               93 drivers/mailbox/mailbox-sti.c 	return mdev->enabled[instance] & BIT(channel);
mdev              122 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = chan_info->mdev;
mdev              126 drivers/mailbox/mailbox-sti.c 	void __iomem *base = MBOX_BASE(mdev, instance);
mdev              128 drivers/mailbox/mailbox-sti.c 	spin_lock_irqsave(&mdev->lock, flags);
mdev              129 drivers/mailbox/mailbox-sti.c 	mdev->enabled[instance] |= BIT(channel);
mdev              131 drivers/mailbox/mailbox-sti.c 	spin_unlock_irqrestore(&mdev->lock, flags);
mdev              137 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = chan_info->mdev;
mdev              141 drivers/mailbox/mailbox-sti.c 	void __iomem *base = MBOX_BASE(mdev, instance);
mdev              143 drivers/mailbox/mailbox-sti.c 	spin_lock_irqsave(&mdev->lock, flags);
mdev              144 drivers/mailbox/mailbox-sti.c 	mdev->enabled[instance] &= ~BIT(channel);
mdev              146 drivers/mailbox/mailbox-sti.c 	spin_unlock_irqrestore(&mdev->lock, flags);
mdev              152 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = chan_info->mdev;
mdev              155 drivers/mailbox/mailbox-sti.c 	void __iomem *base = MBOX_BASE(mdev, instance);
mdev              160 drivers/mailbox/mailbox-sti.c static struct mbox_chan *sti_mbox_irq_to_channel(struct sti_mbox_device *mdev,
mdev              163 drivers/mailbox/mailbox-sti.c 	struct mbox_controller *mbox = mdev->mbox;
mdev              167 drivers/mailbox/mailbox-sti.c 	void __iomem *base = MBOX_BASE(mdev, instance);
mdev              193 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = data;
mdev              194 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
mdev              200 drivers/mailbox/mailbox-sti.c 		chan = sti_mbox_irq_to_channel(mdev, instance);
mdev              215 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = data;
mdev              216 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
mdev              223 drivers/mailbox/mailbox-sti.c 		chan = sti_mbox_irq_to_channel(mdev, instance);
mdev              229 drivers/mailbox/mailbox-sti.c 			dev_warn(mdev->dev,
mdev              232 drivers/mailbox/mailbox-sti.c 				 mdev->name, chan_info->instance,
mdev              233 drivers/mailbox/mailbox-sti.c 				 chan_info->channel, mdev->enabled[instance]);
mdev              246 drivers/mailbox/mailbox-sti.c 		dev_err(mdev->dev, "Spurious IRQ - was a channel requested?\n");
mdev              254 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = chan_info->mdev;
mdev              257 drivers/mailbox/mailbox-sti.c 	void __iomem *base = MBOX_BASE(mdev, instance);
mdev              260 drivers/mailbox/mailbox-sti.c 		dev_dbg(mdev->dev, "Mbox: %s: inst: %d, chan: %d disabled\n",
mdev              261 drivers/mailbox/mailbox-sti.c 			mdev->name, instance, channel);
mdev              266 drivers/mailbox/mailbox-sti.c 		dev_dbg(mdev->dev, "Mbox: %s: inst: %d, chan: %d not ready\n",
mdev              267 drivers/mailbox/mailbox-sti.c 			mdev->name, instance, channel);
mdev              277 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = chan_info->mdev;
mdev              280 drivers/mailbox/mailbox-sti.c 	void __iomem *base = MBOX_BASE(mdev, instance);
mdev              285 drivers/mailbox/mailbox-sti.c 	dev_dbg(mdev->dev,
mdev              287 drivers/mailbox/mailbox-sti.c 		mdev->name, instance, channel);
mdev              303 drivers/mailbox/mailbox-sti.c 	struct mbox_controller *mbox = chan_info->mdev->mbox;
mdev              324 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev = dev_get_drvdata(mbox->dev);
mdev              325 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_pdata *pdata = dev_get_platdata(mdev->dev);
mdev              345 drivers/mailbox/mailbox-sti.c 		    mbox->dev == chan_info->mdev->dev &&
mdev              370 drivers/mailbox/mailbox-sti.c 	chan_info->mdev		= mdev;
mdev              378 drivers/mailbox/mailbox-sti.c 		 mdev->name, instance, channel);
mdev              408 drivers/mailbox/mailbox-sti.c 	struct sti_mbox_device *mdev;
mdev              422 drivers/mailbox/mailbox-sti.c 	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
mdev              423 drivers/mailbox/mailbox-sti.c 	if (!mdev)
mdev              426 drivers/mailbox/mailbox-sti.c 	platform_set_drvdata(pdev, mdev);
mdev              429 drivers/mailbox/mailbox-sti.c 	mdev->base = devm_ioremap_resource(&pdev->dev, res);
mdev              430 drivers/mailbox/mailbox-sti.c 	if (IS_ERR(mdev->base))
mdev              431 drivers/mailbox/mailbox-sti.c 		return PTR_ERR(mdev->base);
mdev              433 drivers/mailbox/mailbox-sti.c 	ret = of_property_read_string(np, "mbox-name", &mdev->name);
mdev              435 drivers/mailbox/mailbox-sti.c 		mdev->name = np->full_name;
mdev              446 drivers/mailbox/mailbox-sti.c 	mdev->dev		= &pdev->dev;
mdev              447 drivers/mailbox/mailbox-sti.c 	mdev->mbox		= mbox;
mdev              449 drivers/mailbox/mailbox-sti.c 	spin_lock_init(&mdev->lock);
mdev              456 drivers/mailbox/mailbox-sti.c 	mbox->dev		= mdev->dev;
mdev              469 drivers/mailbox/mailbox-sti.c 			 "%s: Registered Tx only Mailbox\n", mdev->name);
mdev              476 drivers/mailbox/mailbox-sti.c 					IRQF_ONESHOT, mdev->name, mdev);
mdev              482 drivers/mailbox/mailbox-sti.c 	dev_info(&pdev->dev, "%s: Registered Tx/Rx Mailbox\n", mdev->name);
mdev              132 drivers/mailbox/omap-mailbox.c unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
mdev              134 drivers/mailbox/omap-mailbox.c 	return __raw_readl(mdev->mbox_base + ofs);
mdev              138 drivers/mailbox/omap-mailbox.c void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
mdev              140 drivers/mailbox/omap-mailbox.c 	__raw_writel(val, mdev->mbox_base + ofs);
mdev              395 drivers/mailbox/omap-mailbox.c static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
mdev              399 drivers/mailbox/omap-mailbox.c 	struct omap_mbox **mboxes = mdev->mboxes;
mdev              419 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_device *mdev;
mdev              434 drivers/mailbox/omap-mailbox.c 	list_for_each_entry(mdev, &omap_mbox_devices, elem) {
mdev              435 drivers/mailbox/omap-mailbox.c 		mbox = omap_mbox_device_find(mdev, chan_name);
mdev              466 drivers/mailbox/omap-mailbox.c static int omap_mbox_register(struct omap_mbox_device *mdev)
mdev              472 drivers/mailbox/omap-mailbox.c 	if (!mdev || !mdev->mboxes)
mdev              475 drivers/mailbox/omap-mailbox.c 	mboxes = mdev->mboxes;
mdev              479 drivers/mailbox/omap-mailbox.c 		mbox->dev = device_create(&omap_mbox_class, mdev->dev,
mdev              488 drivers/mailbox/omap-mailbox.c 	list_add(&mdev->elem, &omap_mbox_devices);
mdev              491 drivers/mailbox/omap-mailbox.c 	ret = devm_mbox_controller_register(mdev->dev, &mdev->controller);
mdev              501 drivers/mailbox/omap-mailbox.c static int omap_mbox_unregister(struct omap_mbox_device *mdev)
mdev              506 drivers/mailbox/omap-mailbox.c 	if (!mdev || !mdev->mboxes)
mdev              510 drivers/mailbox/omap-mailbox.c 	list_del(&mdev->elem);
mdev              513 drivers/mailbox/omap-mailbox.c 	mboxes = mdev->mboxes;
mdev              522 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_device *mdev = mbox->parent;
mdev              525 drivers/mailbox/omap-mailbox.c 	mutex_lock(&mdev->cfg_lock);
mdev              526 drivers/mailbox/omap-mailbox.c 	pm_runtime_get_sync(mdev->dev);
mdev              529 drivers/mailbox/omap-mailbox.c 		pm_runtime_put_sync(mdev->dev);
mdev              530 drivers/mailbox/omap-mailbox.c 	mutex_unlock(&mdev->cfg_lock);
mdev              537 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_device *mdev = mbox->parent;
mdev              539 drivers/mailbox/omap-mailbox.c 	mutex_lock(&mdev->cfg_lock);
mdev              541 drivers/mailbox/omap-mailbox.c 	pm_runtime_put_sync(mdev->dev);
mdev              542 drivers/mailbox/omap-mailbox.c 	mutex_unlock(&mdev->cfg_lock);
mdev              603 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_device *mdev = dev_get_drvdata(dev);
mdev              609 drivers/mailbox/omap-mailbox.c 	for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
mdev              610 drivers/mailbox/omap-mailbox.c 		if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
mdev              611 drivers/mailbox/omap-mailbox.c 			dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
mdev              617 drivers/mailbox/omap-mailbox.c 	for (usr = 0; usr < mdev->num_users; usr++) {
mdev              618 drivers/mailbox/omap-mailbox.c 		reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
mdev              619 drivers/mailbox/omap-mailbox.c 		mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg);
mdev              627 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_device *mdev = dev_get_drvdata(dev);
mdev              633 drivers/mailbox/omap-mailbox.c 	for (usr = 0; usr < mdev->num_users; usr++) {
mdev              634 drivers/mailbox/omap-mailbox.c 		reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
mdev              635 drivers/mailbox/omap-mailbox.c 		mbox_write_reg(mdev, mdev->irq_ctx[usr], reg);
mdev              677 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_device *mdev;
mdev              680 drivers/mailbox/omap-mailbox.c 	mdev = container_of(controller, struct omap_mbox_device, controller);
mdev              681 drivers/mailbox/omap-mailbox.c 	if (WARN_ON(!mdev))
mdev              691 drivers/mailbox/omap-mailbox.c 	mbox = omap_mbox_device_find(mdev, node->name);
mdev              703 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_device *mdev;
mdev              771 drivers/mailbox/omap-mailbox.c 	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
mdev              772 drivers/mailbox/omap-mailbox.c 	if (!mdev)
mdev              776 drivers/mailbox/omap-mailbox.c 	mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
mdev              777 drivers/mailbox/omap-mailbox.c 	if (IS_ERR(mdev->mbox_base))
mdev              778 drivers/mailbox/omap-mailbox.c 		return PTR_ERR(mdev->mbox_base);
mdev              780 drivers/mailbox/omap-mailbox.c 	mdev->irq_ctx = devm_kcalloc(&pdev->dev, num_users, sizeof(u32),
mdev              782 drivers/mailbox/omap-mailbox.c 	if (!mdev->irq_ctx)
mdev              823 drivers/mailbox/omap-mailbox.c 		mbox->parent = mdev;
mdev              833 drivers/mailbox/omap-mailbox.c 	mutex_init(&mdev->cfg_lock);
mdev              834 drivers/mailbox/omap-mailbox.c 	mdev->dev = &pdev->dev;
mdev              835 drivers/mailbox/omap-mailbox.c 	mdev->num_users = num_users;
mdev              836 drivers/mailbox/omap-mailbox.c 	mdev->num_fifos = num_fifos;
mdev              837 drivers/mailbox/omap-mailbox.c 	mdev->intr_type = intr_type;
mdev              838 drivers/mailbox/omap-mailbox.c 	mdev->mboxes = list;
mdev              844 drivers/mailbox/omap-mailbox.c 	mdev->controller.txdone_irq = true;
mdev              845 drivers/mailbox/omap-mailbox.c 	mdev->controller.dev = mdev->dev;
mdev              846 drivers/mailbox/omap-mailbox.c 	mdev->controller.ops = &omap_mbox_chan_ops;
mdev              847 drivers/mailbox/omap-mailbox.c 	mdev->controller.chans = chnls;
mdev              848 drivers/mailbox/omap-mailbox.c 	mdev->controller.num_chans = info_count;
mdev              849 drivers/mailbox/omap-mailbox.c 	mdev->controller.of_xlate = omap_mbox_of_xlate;
mdev              850 drivers/mailbox/omap-mailbox.c 	ret = omap_mbox_register(mdev);
mdev              854 drivers/mailbox/omap-mailbox.c 	platform_set_drvdata(pdev, mdev);
mdev              855 drivers/mailbox/omap-mailbox.c 	pm_runtime_enable(mdev->dev);
mdev              857 drivers/mailbox/omap-mailbox.c 	ret = pm_runtime_get_sync(mdev->dev);
mdev              859 drivers/mailbox/omap-mailbox.c 		pm_runtime_put_noidle(mdev->dev);
mdev              867 drivers/mailbox/omap-mailbox.c 	l = mbox_read_reg(mdev, MAILBOX_REVISION);
mdev              868 drivers/mailbox/omap-mailbox.c 	dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
mdev              870 drivers/mailbox/omap-mailbox.c 	ret = pm_runtime_put_sync(mdev->dev);
mdev              878 drivers/mailbox/omap-mailbox.c 	pm_runtime_disable(mdev->dev);
mdev              879 drivers/mailbox/omap-mailbox.c 	omap_mbox_unregister(mdev);
mdev              885 drivers/mailbox/omap-mailbox.c 	struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
mdev              887 drivers/mailbox/omap-mailbox.c 	pm_runtime_disable(mdev->dev);
mdev              888 drivers/mailbox/omap-mailbox.c 	omap_mbox_unregister(mdev);
mdev              480 drivers/mailbox/zynqmp-ipi-mailbox.c 	struct device *dev, *mdev;
mdev              498 drivers/mailbox/zynqmp-ipi-mailbox.c 	mdev = &ipi_mbox->dev;
mdev              505 drivers/mailbox/zynqmp-ipi-mailbox.c 		mchan->req_buf = devm_ioremap(mdev, res.start,
mdev              508 drivers/mailbox/zynqmp-ipi-mailbox.c 			dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
mdev              513 drivers/mailbox/zynqmp-ipi-mailbox.c 		dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
mdev              521 drivers/mailbox/zynqmp-ipi-mailbox.c 		mchan->resp_buf = devm_ioremap(mdev, res.start,
mdev              524 drivers/mailbox/zynqmp-ipi-mailbox.c 			dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
mdev              529 drivers/mailbox/zynqmp-ipi-mailbox.c 		dev_err(mdev, "Unmatched resource %s.\n", name);
mdev              532 drivers/mailbox/zynqmp-ipi-mailbox.c 	mchan->rx_buf = devm_kzalloc(mdev,
mdev              544 drivers/mailbox/zynqmp-ipi-mailbox.c 		mchan->req_buf = devm_ioremap(mdev, res.start,
mdev              547 drivers/mailbox/zynqmp-ipi-mailbox.c 			dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
mdev              552 drivers/mailbox/zynqmp-ipi-mailbox.c 		dev_err(mdev, "Unmatched resource %s.\n", name);
mdev              560 drivers/mailbox/zynqmp-ipi-mailbox.c 		mchan->resp_buf = devm_ioremap(mdev, res.start,
mdev              563 drivers/mailbox/zynqmp-ipi-mailbox.c 			dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
mdev              568 drivers/mailbox/zynqmp-ipi-mailbox.c 		dev_err(mdev, "Unmatched resource %s.\n", name);
mdev              571 drivers/mailbox/zynqmp-ipi-mailbox.c 	mchan->rx_buf = devm_kzalloc(mdev,
mdev              586 drivers/mailbox/zynqmp-ipi-mailbox.c 	mbox->dev = mdev;
mdev              593 drivers/mailbox/zynqmp-ipi-mailbox.c 	chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL);
mdev              601 drivers/mailbox/zynqmp-ipi-mailbox.c 	ret = devm_mbox_controller_register(mdev, mbox);
mdev              603 drivers/mailbox/zynqmp-ipi-mailbox.c 		dev_err(mdev,
mdev              606 drivers/mailbox/zynqmp-ipi-mailbox.c 		dev_info(mdev,
mdev               34 drivers/mcb/mcb-core.c 	struct mcb_device *mdev = to_mcb_device(dev);
mdev               37 drivers/mcb/mcb-core.c 	found_id = mcb_match_id(mdrv->id_table, mdev);
mdev               46 drivers/mcb/mcb-core.c 	struct mcb_device *mdev = to_mcb_device(dev);
mdev               49 drivers/mcb/mcb-core.c 	ret = add_uevent_var(env, "MODALIAS=mcb:16z%03d", mdev->id);
mdev               59 drivers/mcb/mcb-core.c 	struct mcb_device *mdev = to_mcb_device(dev);
mdev               64 drivers/mcb/mcb-core.c 	found_id = mcb_match_id(mdrv->id_table, mdev);
mdev               68 drivers/mcb/mcb-core.c 	carrier_mod = mdev->dev.parent->driver->owner;
mdev               73 drivers/mcb/mcb-core.c 	ret = mdrv->probe(mdev, found_id);
mdev               83 drivers/mcb/mcb-core.c 	struct mcb_device *mdev = to_mcb_device(dev);
mdev               86 drivers/mcb/mcb-core.c 	mdrv->remove(mdev);
mdev               88 drivers/mcb/mcb-core.c 	carrier_mod = mdev->dev.parent->driver->owner;
mdev               91 drivers/mcb/mcb-core.c 	put_device(&mdev->dev);
mdev               99 drivers/mcb/mcb-core.c 	struct mcb_device *mdev = to_mcb_device(dev);
mdev              102 drivers/mcb/mcb-core.c 		mdrv->shutdown(mdev);
mdev              210 drivers/mcb/mcb-core.c 	struct mcb_device *mdev = to_mcb_device(dev);
mdev              212 drivers/mcb/mcb-core.c 	mcb_bus_put(mdev->bus);
mdev              213 drivers/mcb/mcb-core.c 	kfree(mdev);
mdev              390 drivers/mcb/mcb-core.c 	struct mcb_device *mdev = to_mcb_device(dev);
mdev              393 drivers/mcb/mcb-core.c 	if (mdev->is_added)
mdev              400 drivers/mcb/mcb-core.c 	mdev->is_added = true;
mdev               42 drivers/mcb/mcb-parse.c 	struct mcb_device *mdev;
mdev               50 drivers/mcb/mcb-parse.c 	mdev = mcb_alloc_dev(bus);
mdev               51 drivers/mcb/mcb-parse.c 	if (!mdev)
mdev               59 drivers/mcb/mcb-parse.c 	mdev->id = GDD_DEV(reg1);
mdev               60 drivers/mcb/mcb-parse.c 	mdev->rev = GDD_REV(reg1);
mdev               61 drivers/mcb/mcb-parse.c 	mdev->var = GDD_VAR(reg1);
mdev               62 drivers/mcb/mcb-parse.c 	mdev->bar = GDD_BAR(reg2);
mdev               63 drivers/mcb/mcb-parse.c 	mdev->group = GDD_GRP(reg2);
mdev               64 drivers/mcb/mcb-parse.c 	mdev->inst = GDD_INS(reg2);
mdev               71 drivers/mcb/mcb-parse.c 	if (mdev->bar > bar_count - 1) {
mdev               72 drivers/mcb/mcb-parse.c 		pr_info("No BAR for 16z%03d\n", mdev->id);
mdev               77 drivers/mcb/mcb-parse.c 	dev_mapbase = cb[mdev->bar].addr;
mdev               79 drivers/mcb/mcb-parse.c 		pr_info("BAR not assigned for 16z%03d\n", mdev->id);
mdev               86 drivers/mcb/mcb-parse.c 			mdev->id);
mdev               91 drivers/mcb/mcb-parse.c 	pr_debug("Found a 16z%03d\n", mdev->id);
mdev               93 drivers/mcb/mcb-parse.c 	mdev->irq.start = GDD_IRQ(reg1);
mdev               94 drivers/mcb/mcb-parse.c 	mdev->irq.end = GDD_IRQ(reg1);
mdev               95 drivers/mcb/mcb-parse.c 	mdev->irq.flags = IORESOURCE_IRQ;
mdev               97 drivers/mcb/mcb-parse.c 	mdev->mem.start = dev_mapbase + offset;
mdev               99 drivers/mcb/mcb-parse.c 	mdev->mem.end = mdev->mem.start + size - 1;
mdev              100 drivers/mcb/mcb-parse.c 	mdev->mem.flags = IORESOURCE_MEM;
mdev              102 drivers/mcb/mcb-parse.c 	mdev->is_added = false;
mdev              104 drivers/mcb/mcb-parse.c 	ret = mcb_device_register(bus, mdev);
mdev              111 drivers/mcb/mcb-parse.c 	mcb_free_dev(mdev);
mdev               21 drivers/mcb/mcb-pci.c static int mcb_pci_get_irq(struct mcb_device *mdev)
mdev               23 drivers/mcb/mcb-pci.c 	struct mcb_bus *mbus = mdev->bus;
mdev              645 drivers/media/common/siano/smscoreapi.c 			    void *mdev)
mdev              655 drivers/media/common/siano/smscoreapi.c 	dev->media_dev = mdev;
mdev             1120 drivers/media/common/siano/smscoreapi.h 				   void *mdev);
mdev               76 drivers/media/common/videobuf2/videobuf2-dvb.c 			  struct media_device *mdev,
mdev               95 drivers/media/common/videobuf2/videobuf2-dvb.c 	if (mdev)
mdev               96 drivers/media/common/videobuf2/videobuf2-dvb.c 		fe->adapter.mdev = mdev;
mdev              196 drivers/media/common/videobuf2/videobuf2-dvb.c 			 struct media_device *mdev,
mdev              211 drivers/media/common/videobuf2/videobuf2-dvb.c 	res = vb2_dvb_register_adapter(f, module, adapter_priv, device, mdev,
mdev              334 drivers/media/common/videobuf2/videobuf2-v4l2.c static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
mdev              437 drivers/media/common/videobuf2/videobuf2-v4l2.c 	req = media_request_get_by_fd(mdev, b->request_fd);
mdev              672 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
mdev              685 drivers/media/common/videobuf2/videobuf2-v4l2.c 	ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
mdev              747 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
mdev              758 drivers/media/common/videobuf2/videobuf2-v4l2.c 	ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
mdev              961 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
mdev              980 drivers/media/common/videobuf2/videobuf2-v4l2.c 	return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
mdev             2799 drivers/media/dvb-core/dvb_frontend.c 		if (fe->dvb->mdev) {
mdev             2800 drivers/media/dvb-core/dvb_frontend.c 			mutex_lock(&fe->dvb->mdev->graph_mutex);
mdev             2801 drivers/media/dvb-core/dvb_frontend.c 			if (fe->dvb->mdev->enable_source)
mdev             2802 drivers/media/dvb-core/dvb_frontend.c 				ret = fe->dvb->mdev->enable_source(
mdev             2805 drivers/media/dvb-core/dvb_frontend.c 			mutex_unlock(&fe->dvb->mdev->graph_mutex);
mdev             2832 drivers/media/dvb-core/dvb_frontend.c 	if (fe->dvb->mdev) {
mdev             2833 drivers/media/dvb-core/dvb_frontend.c 		mutex_lock(&fe->dvb->mdev->graph_mutex);
mdev             2834 drivers/media/dvb-core/dvb_frontend.c 		if (fe->dvb->mdev->disable_source)
mdev             2835 drivers/media/dvb-core/dvb_frontend.c 			fe->dvb->mdev->disable_source(dvbdev->entity);
mdev             2836 drivers/media/dvb-core/dvb_frontend.c 		mutex_unlock(&fe->dvb->mdev->graph_mutex);
mdev             2871 drivers/media/dvb-core/dvb_frontend.c 		if (fe->dvb->mdev) {
mdev             2872 drivers/media/dvb-core/dvb_frontend.c 			mutex_lock(&fe->dvb->mdev->graph_mutex);
mdev             2873 drivers/media/dvb-core/dvb_frontend.c 			if (fe->dvb->mdev->disable_source)
mdev             2874 drivers/media/dvb-core/dvb_frontend.c 				fe->dvb->mdev->disable_source(dvbdev->entity);
mdev             2875 drivers/media/dvb-core/dvb_frontend.c 			mutex_unlock(&fe->dvb->mdev->graph_mutex);
mdev              284 drivers/media/dvb-core/dvbdev.c 		ret = media_device_register_entity(dvbdev->adapter->mdev,
mdev              379 drivers/media/dvb-core/dvbdev.c 	ret = media_device_register_entity(dvbdev->adapter->mdev,
mdev              400 drivers/media/dvb-core/dvbdev.c 	if (!dvbdev->adapter->mdev)
mdev              427 drivers/media/dvb-core/dvbdev.c 	dvbdev->intf_devnode = media_devnode_create(dvbdev->adapter->mdev,
mdev              595 drivers/media/dvb-core/dvbdev.c 	struct media_device *mdev = adap->mdev;
mdev              599 drivers/media/dvb-core/dvbdev.c 	media_device_for_each_entity(entity, mdev) {
mdev              616 drivers/media/dvb-core/dvbdev.c 	struct media_device *mdev = adap->mdev;
mdev              627 drivers/media/dvb-core/dvbdev.c 	if (!mdev)
mdev              630 drivers/media/dvb-core/dvbdev.c 	media_device_for_each_entity(entity, mdev) {
mdev              680 drivers/media/dvb-core/dvbdev.c 		ret = media_device_register_entity(mdev, conn);
mdev              685 drivers/media/dvb-core/dvbdev.c 			ret = media_create_pad_links(mdev,
mdev              697 drivers/media/dvb-core/dvbdev.c 			ret = media_create_pad_links(mdev,
mdev              714 drivers/media/dvb-core/dvbdev.c 		ret = media_create_pad_links(mdev,
mdev              725 drivers/media/dvb-core/dvbdev.c 		ret = media_create_pad_links(mdev,
mdev              743 drivers/media/dvb-core/dvbdev.c 		media_device_for_each_entity(entity, mdev) {
mdev              766 drivers/media/dvb-core/dvbdev.c 	media_device_for_each_intf(intf, mdev) {
mdev             1373 drivers/media/i2c/tvp5150.c 		ret = media_device_register_entity(sd->v4l2_dev->mdev, input);
mdev               30 drivers/media/mc/mc-dev-allocator.c 	struct media_device mdev;
mdev               37 drivers/media/mc/mc-dev-allocator.c to_media_device_instance(struct media_device *mdev)
mdev               39 drivers/media/mc/mc-dev-allocator.c 	return container_of(mdev, struct media_device_instance, mdev);
mdev               47 drivers/media/mc/mc-dev-allocator.c 	dev_dbg(mdi->mdev.dev, "%s: releasing Media Device\n", __func__);
mdev               51 drivers/media/mc/mc-dev-allocator.c 	media_device_unregister(&mdi->mdev);
mdev               52 drivers/media/mc/mc-dev-allocator.c 	media_device_cleanup(&mdi->mdev);
mdev               68 drivers/media/mc/mc-dev-allocator.c 		if (mdi->mdev.dev != dev)
mdev               81 drivers/media/mc/mc-dev-allocator.c 		return &mdi->mdev;
mdev               94 drivers/media/mc/mc-dev-allocator.c 	return &mdi->mdev;
mdev              101 drivers/media/mc/mc-dev-allocator.c 	struct media_device *mdev;
mdev              104 drivers/media/mc/mc-dev-allocator.c 	mdev = __media_device_get(&udev->dev, module_name, owner);
mdev              105 drivers/media/mc/mc-dev-allocator.c 	if (!mdev) {
mdev              111 drivers/media/mc/mc-dev-allocator.c 	if (!mdev->dev)
mdev              112 drivers/media/mc/mc-dev-allocator.c 		__media_device_usb_init(mdev, udev, udev->product,
mdev              115 drivers/media/mc/mc-dev-allocator.c 	return mdev;
mdev              119 drivers/media/mc/mc-dev-allocator.c void media_device_delete(struct media_device *mdev, const char *module_name,
mdev              122 drivers/media/mc/mc-dev-allocator.c 	struct media_device_instance *mdi = to_media_device_instance(mdev);
mdev              128 drivers/media/mc/mc-dev-allocator.c 		dev_dbg(mdi->mdev.dev,
mdev               81 drivers/media/mc/mc-device.c static struct media_entity *find_entity(struct media_device *mdev, u32 id)
mdev               88 drivers/media/mc/mc-device.c 	media_device_for_each_entity(entity, mdev) {
mdev               98 drivers/media/mc/mc-device.c static long media_device_enum_entities(struct media_device *mdev, void *arg)
mdev              103 drivers/media/mc/mc-device.c 	ent = find_entity(mdev, entd->id);
mdev              151 drivers/media/mc/mc-device.c static long media_device_enum_links(struct media_device *mdev, void *arg)
mdev              156 drivers/media/mc/mc-device.c 	entity = find_entity(mdev, links->entity);
mdev              200 drivers/media/mc/mc-device.c static long media_device_setup_link(struct media_device *mdev, void *arg)
mdev              209 drivers/media/mc/mc-device.c 	source = find_entity(mdev, linkd->source.entity);
mdev              210 drivers/media/mc/mc-device.c 	sink = find_entity(mdev, linkd->sink.entity);
mdev              230 drivers/media/mc/mc-device.c static long media_device_get_topology(struct media_device *mdev, void *arg)
mdev              244 drivers/media/mc/mc-device.c 	topo->topology_version = mdev->topology_version;
mdev              249 drivers/media/mc/mc-device.c 	media_device_for_each_entity(entity, mdev) {
mdev              277 drivers/media/mc/mc-device.c 	media_device_for_each_intf(intf, mdev) {
mdev              313 drivers/media/mc/mc-device.c 	media_device_for_each_pad(pad, mdev) {
mdev              341 drivers/media/mc/mc-device.c 	media_device_for_each_link(link, mdev) {
mdev              373 drivers/media/mc/mc-device.c static long media_device_request_alloc(struct media_device *mdev,
mdev              377 drivers/media/mc/mc-device.c 	if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
mdev              380 drivers/media/mc/mc-device.c 	return media_request_alloc(mdev, alloc_fd);
mdev              492 drivers/media/mc/mc-device.c static long media_device_enum_links32(struct media_device *mdev,
mdev              509 drivers/media/mc/mc-device.c 	ret = media_device_enum_links(mdev, &links);
mdev              562 drivers/media/mc/mc-device.c 	struct media_device *mdev = devnode->media_dev;
mdev              564 drivers/media/mc/mc-device.c 	return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model);
mdev              583 drivers/media/mc/mc-device.c int __must_check media_device_register_entity(struct media_device *mdev,
mdev              592 drivers/media/mc/mc-device.c 		dev_warn(mdev->dev,
mdev              597 drivers/media/mc/mc-device.c 	WARN_ON(entity->graph_obj.mdev != NULL);
mdev              598 drivers/media/mc/mc-device.c 	entity->graph_obj.mdev = mdev;
mdev              603 drivers/media/mc/mc-device.c 	ret = ida_alloc_min(&mdev->entity_internal_idx, 1, GFP_KERNEL);
mdev              608 drivers/media/mc/mc-device.c 	mutex_lock(&mdev->graph_mutex);
mdev              609 drivers/media/mc/mc-device.c 	mdev->entity_internal_idx_max =
mdev              610 drivers/media/mc/mc-device.c 		max(mdev->entity_internal_idx_max, entity->internal_idx);
mdev              613 drivers/media/mc/mc-device.c 	media_gobj_create(mdev, MEDIA_GRAPH_ENTITY, &entity->graph_obj);
mdev              617 drivers/media/mc/mc-device.c 		media_gobj_create(mdev, MEDIA_GRAPH_PAD,
mdev              621 drivers/media/mc/mc-device.c 	list_for_each_entry_safe(notify, next, &mdev->entity_notify, list)
mdev              624 drivers/media/mc/mc-device.c 	if (mdev->entity_internal_idx_max
mdev              625 drivers/media/mc/mc-device.c 	    >= mdev->pm_count_walk.ent_enum.idx_max) {
mdev              633 drivers/media/mc/mc-device.c 		ret = media_graph_walk_init(&new, mdev);
mdev              635 drivers/media/mc/mc-device.c 			mutex_unlock(&mdev->graph_mutex);
mdev              638 drivers/media/mc/mc-device.c 		media_graph_walk_cleanup(&mdev->pm_count_walk);
mdev              639 drivers/media/mc/mc-device.c 		mdev->pm_count_walk = new;
mdev              641 drivers/media/mc/mc-device.c 	mutex_unlock(&mdev->graph_mutex);
mdev              649 drivers/media/mc/mc-device.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              654 drivers/media/mc/mc-device.c 	ida_free(&mdev->entity_internal_idx, entity->internal_idx);
mdev              657 drivers/media/mc/mc-device.c 	list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
mdev              676 drivers/media/mc/mc-device.c 	entity->graph_obj.mdev = NULL;
mdev              681 drivers/media/mc/mc-device.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              683 drivers/media/mc/mc-device.c 	if (mdev == NULL)
mdev              686 drivers/media/mc/mc-device.c 	mutex_lock(&mdev->graph_mutex);
mdev              688 drivers/media/mc/mc-device.c 	mutex_unlock(&mdev->graph_mutex);
mdev              702 drivers/media/mc/mc-device.c void media_device_init(struct media_device *mdev)
mdev              704 drivers/media/mc/mc-device.c 	INIT_LIST_HEAD(&mdev->entities);
mdev              705 drivers/media/mc/mc-device.c 	INIT_LIST_HEAD(&mdev->interfaces);
mdev              706 drivers/media/mc/mc-device.c 	INIT_LIST_HEAD(&mdev->pads);
mdev              707 drivers/media/mc/mc-device.c 	INIT_LIST_HEAD(&mdev->links);
mdev              708 drivers/media/mc/mc-device.c 	INIT_LIST_HEAD(&mdev->entity_notify);
mdev              710 drivers/media/mc/mc-device.c 	mutex_init(&mdev->req_queue_mutex);
mdev              711 drivers/media/mc/mc-device.c 	mutex_init(&mdev->graph_mutex);
mdev              712 drivers/media/mc/mc-device.c 	ida_init(&mdev->entity_internal_idx);
mdev              714 drivers/media/mc/mc-device.c 	atomic_set(&mdev->request_id, 0);
mdev              716 drivers/media/mc/mc-device.c 	dev_dbg(mdev->dev, "Media device initialized\n");
mdev              720 drivers/media/mc/mc-device.c void media_device_cleanup(struct media_device *mdev)
mdev              722 drivers/media/mc/mc-device.c 	ida_destroy(&mdev->entity_internal_idx);
mdev              723 drivers/media/mc/mc-device.c 	mdev->entity_internal_idx_max = 0;
mdev              724 drivers/media/mc/mc-device.c 	media_graph_walk_cleanup(&mdev->pm_count_walk);
mdev              725 drivers/media/mc/mc-device.c 	mutex_destroy(&mdev->graph_mutex);
mdev              726 drivers/media/mc/mc-device.c 	mutex_destroy(&mdev->req_queue_mutex);
mdev              730 drivers/media/mc/mc-device.c int __must_check __media_device_register(struct media_device *mdev,
mdev              741 drivers/media/mc/mc-device.c 	mdev->devnode = devnode;
mdev              743 drivers/media/mc/mc-device.c 	devnode->parent = mdev->dev;
mdev              747 drivers/media/mc/mc-device.c 	mdev->topology_version = 0;
mdev              749 drivers/media/mc/mc-device.c 	ret = media_devnode_register(mdev, devnode, owner);
mdev              752 drivers/media/mc/mc-device.c 		mdev->devnode = NULL;
mdev              759 drivers/media/mc/mc-device.c 		mdev->devnode = NULL;
mdev              765 drivers/media/mc/mc-device.c 	dev_dbg(mdev->dev, "Media device registered\n");
mdev              771 drivers/media/mc/mc-device.c int __must_check media_device_register_entity_notify(struct media_device *mdev,
mdev              774 drivers/media/mc/mc-device.c 	mutex_lock(&mdev->graph_mutex);
mdev              775 drivers/media/mc/mc-device.c 	list_add_tail(&nptr->list, &mdev->entity_notify);
mdev              776 drivers/media/mc/mc-device.c 	mutex_unlock(&mdev->graph_mutex);
mdev              784 drivers/media/mc/mc-device.c static void __media_device_unregister_entity_notify(struct media_device *mdev,
mdev              790 drivers/media/mc/mc-device.c void media_device_unregister_entity_notify(struct media_device *mdev,
mdev              793 drivers/media/mc/mc-device.c 	mutex_lock(&mdev->graph_mutex);
mdev              794 drivers/media/mc/mc-device.c 	__media_device_unregister_entity_notify(mdev, nptr);
mdev              795 drivers/media/mc/mc-device.c 	mutex_unlock(&mdev->graph_mutex);
mdev              799 drivers/media/mc/mc-device.c void media_device_unregister(struct media_device *mdev)
mdev              806 drivers/media/mc/mc-device.c 	if (mdev == NULL)
mdev              809 drivers/media/mc/mc-device.c 	mutex_lock(&mdev->graph_mutex);
mdev              812 drivers/media/mc/mc-device.c 	if (!media_devnode_is_registered(mdev->devnode)) {
mdev              813 drivers/media/mc/mc-device.c 		mutex_unlock(&mdev->graph_mutex);
mdev              818 drivers/media/mc/mc-device.c 	media_devnode_unregister_prepare(mdev->devnode);
mdev              821 drivers/media/mc/mc-device.c 	list_for_each_entry_safe(entity, next, &mdev->entities, graph_obj.list)
mdev              825 drivers/media/mc/mc-device.c 	list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list)
mdev              826 drivers/media/mc/mc-device.c 		__media_device_unregister_entity_notify(mdev, notify);
mdev              829 drivers/media/mc/mc-device.c 	list_for_each_entry_safe(intf, tmp_intf, &mdev->interfaces,
mdev              840 drivers/media/mc/mc-device.c 	mutex_unlock(&mdev->graph_mutex);
mdev              842 drivers/media/mc/mc-device.c 	dev_dbg(mdev->dev, "Media device unregistered\n");
mdev              844 drivers/media/mc/mc-device.c 	device_remove_file(&mdev->devnode->dev, &dev_attr_model);
mdev              845 drivers/media/mc/mc-device.c 	media_devnode_unregister(mdev->devnode);
mdev              847 drivers/media/mc/mc-device.c 	mdev->devnode = NULL;
mdev              852 drivers/media/mc/mc-device.c void media_device_pci_init(struct media_device *mdev,
mdev              856 drivers/media/mc/mc-device.c 	mdev->dev = &pci_dev->dev;
mdev              859 drivers/media/mc/mc-device.c 		strscpy(mdev->model, name, sizeof(mdev->model));
mdev              861 drivers/media/mc/mc-device.c 		strscpy(mdev->model, pci_name(pci_dev), sizeof(mdev->model));
mdev              863 drivers/media/mc/mc-device.c 	sprintf(mdev->bus_info, "PCI:%s", pci_name(pci_dev));
mdev              865 drivers/media/mc/mc-device.c 	mdev->hw_revision = (pci_dev->subsystem_vendor << 16)
mdev              868 drivers/media/mc/mc-device.c 	media_device_init(mdev);
mdev              874 drivers/media/mc/mc-device.c void __media_device_usb_init(struct media_device *mdev,
mdev              879 drivers/media/mc/mc-device.c 	mdev->dev = &udev->dev;
mdev              882 drivers/media/mc/mc-device.c 		strscpy(mdev->driver_name, driver_name,
mdev              883 drivers/media/mc/mc-device.c 			sizeof(mdev->driver_name));
mdev              886 drivers/media/mc/mc-device.c 		strscpy(mdev->model, board_name, sizeof(mdev->model));
mdev              888 drivers/media/mc/mc-device.c 		strscpy(mdev->model, udev->product, sizeof(mdev->model));
mdev              890 drivers/media/mc/mc-device.c 		strscpy(mdev->model, "unknown model", sizeof(mdev->model));
mdev              892 drivers/media/mc/mc-device.c 		strscpy(mdev->serial, udev->serial, sizeof(mdev->serial));
mdev              893 drivers/media/mc/mc-device.c 	usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info));
mdev              894 drivers/media/mc/mc-device.c 	mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
mdev              896 drivers/media/mc/mc-device.c 	media_device_init(mdev);
mdev              211 drivers/media/mc/mc-devnode.c int __must_check media_devnode_register(struct media_device *mdev,
mdev              232 drivers/media/mc/mc-devnode.c 	devnode->media_dev = mdev;
mdev               99 drivers/media/mc/mc-entity.c 		dev_dbg(gobj->mdev->dev,
mdev              108 drivers/media/mc/mc-entity.c 		dev_dbg(gobj->mdev->dev,
mdev              121 drivers/media/mc/mc-entity.c 		dev_dbg(gobj->mdev->dev,
mdev              134 drivers/media/mc/mc-entity.c 		dev_dbg(gobj->mdev->dev,
mdev              145 drivers/media/mc/mc-entity.c void media_gobj_create(struct media_device *mdev,
mdev              149 drivers/media/mc/mc-entity.c 	BUG_ON(!mdev);
mdev              151 drivers/media/mc/mc-entity.c 	gobj->mdev = mdev;
mdev              154 drivers/media/mc/mc-entity.c 	gobj->id = media_gobj_gen_id(type, ++mdev->id);
mdev              158 drivers/media/mc/mc-entity.c 		list_add_tail(&gobj->list, &mdev->entities);
mdev              161 drivers/media/mc/mc-entity.c 		list_add_tail(&gobj->list, &mdev->pads);
mdev              164 drivers/media/mc/mc-entity.c 		list_add_tail(&gobj->list, &mdev->links);
mdev              167 drivers/media/mc/mc-entity.c 		list_add_tail(&gobj->list, &mdev->interfaces);
mdev              171 drivers/media/mc/mc-entity.c 	mdev->topology_version++;
mdev              179 drivers/media/mc/mc-entity.c 	if (gobj->mdev == NULL)
mdev              184 drivers/media/mc/mc-entity.c 	gobj->mdev->topology_version++;
mdev              189 drivers/media/mc/mc-entity.c 	gobj->mdev = NULL;
mdev              200 drivers/media/mc/mc-entity.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              209 drivers/media/mc/mc-entity.c 	if (mdev)
mdev              210 drivers/media/mc/mc-entity.c 		mutex_lock(&mdev->graph_mutex);
mdev              215 drivers/media/mc/mc-entity.c 		if (mdev)
mdev              216 drivers/media/mc/mc-entity.c 			media_gobj_create(mdev, MEDIA_GRAPH_PAD,
mdev              220 drivers/media/mc/mc-entity.c 	if (mdev)
mdev              221 drivers/media/mc/mc-entity.c 		mutex_unlock(&mdev->graph_mutex);
mdev              278 drivers/media/mc/mc-entity.c 	struct media_graph *graph, struct media_device *mdev)
mdev              280 drivers/media/mc/mc-entity.c 	return media_entity_enum_init(&graph->ent_enum, mdev);
mdev              303 drivers/media/mc/mc-entity.c 	dev_dbg(entity->graph_obj.mdev->dev,
mdev              319 drivers/media/mc/mc-entity.c 		dev_dbg(entity->graph_obj.mdev->dev,
mdev              332 drivers/media/mc/mc-entity.c 		dev_dbg(entity->graph_obj.mdev->dev,
mdev              341 drivers/media/mc/mc-entity.c 	dev_dbg(entity->graph_obj.mdev->dev, "walk: pushing '%s' on stack\n",
mdev              361 drivers/media/mc/mc-entity.c 	dev_dbg(entity->graph_obj.mdev->dev,
mdev              410 drivers/media/mc/mc-entity.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              417 drivers/media/mc/mc-entity.c 		ret = media_graph_walk_init(&pipe->graph, mdev);
mdev              476 drivers/media/mc/mc-entity.c 				dev_dbg(entity->graph_obj.mdev->dev,
mdev              490 drivers/media/mc/mc-entity.c 			dev_dbg(entity->graph_obj.mdev->dev,
mdev              535 drivers/media/mc/mc-entity.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              538 drivers/media/mc/mc-entity.c 	mutex_lock(&mdev->graph_mutex);
mdev              540 drivers/media/mc/mc-entity.c 	mutex_unlock(&mdev->graph_mutex);
mdev              576 drivers/media/mc/mc-entity.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              578 drivers/media/mc/mc-entity.c 	mutex_lock(&mdev->graph_mutex);
mdev              580 drivers/media/mc/mc-entity.c 	mutex_unlock(&mdev->graph_mutex);
mdev              678 drivers/media/mc/mc-entity.c 	media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK,
mdev              696 drivers/media/mc/mc-entity.c 	media_gobj_create(sink->graph_obj.mdev, MEDIA_GRAPH_LINK,
mdev              710 drivers/media/mc/mc-entity.c int media_create_pad_links(const struct media_device *mdev,
mdev              733 drivers/media/mc/mc-entity.c 		media_device_for_each_entity(source, mdev) {
mdev              736 drivers/media/mc/mc-entity.c 			media_device_for_each_entity(sink, mdev) {
mdev              757 drivers/media/mc/mc-entity.c 	media_device_for_each_entity(entity, mdev) {
mdev              789 drivers/media/mc/mc-entity.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              792 drivers/media/mc/mc-entity.c 	if (mdev == NULL)
mdev              795 drivers/media/mc/mc-entity.c 	mutex_lock(&mdev->graph_mutex);
mdev              797 drivers/media/mc/mc-entity.c 	mutex_unlock(&mdev->graph_mutex);
mdev              828 drivers/media/mc/mc-entity.c 	struct media_device *mdev;
mdev              852 drivers/media/mc/mc-entity.c 	mdev = source->graph_obj.mdev;
mdev              854 drivers/media/mc/mc-entity.c 	if (mdev->ops && mdev->ops->link_notify) {
mdev              855 drivers/media/mc/mc-entity.c 		ret = mdev->ops->link_notify(link, flags,
mdev              863 drivers/media/mc/mc-entity.c 	if (mdev->ops && mdev->ops->link_notify)
mdev              864 drivers/media/mc/mc-entity.c 		mdev->ops->link_notify(link, flags,
mdev              875 drivers/media/mc/mc-entity.c 	mutex_lock(&link->graph_obj.mdev->graph_mutex);
mdev              877 drivers/media/mc/mc-entity.c 	mutex_unlock(&link->graph_obj.mdev->graph_mutex);
mdev              920 drivers/media/mc/mc-entity.c static void media_interface_init(struct media_device *mdev,
mdev              929 drivers/media/mc/mc-entity.c 	media_gobj_create(mdev, gobj_type, &intf->graph_obj);
mdev              934 drivers/media/mc/mc-entity.c struct media_intf_devnode *media_devnode_create(struct media_device *mdev,
mdev              947 drivers/media/mc/mc-entity.c 	media_interface_init(mdev, &devnode->intf, MEDIA_GRAPH_INTF_DEVNODE,
mdev              977 drivers/media/mc/mc-entity.c 	media_gobj_create(intf->graph_obj.mdev, MEDIA_GRAPH_LINK,
mdev              994 drivers/media/mc/mc-entity.c 	struct media_device *mdev = link->graph_obj.mdev;
mdev              997 drivers/media/mc/mc-entity.c 	if (mdev == NULL)
mdev             1000 drivers/media/mc/mc-entity.c 	mutex_lock(&mdev->graph_mutex);
mdev             1002 drivers/media/mc/mc-entity.c 	mutex_unlock(&mdev->graph_mutex);
mdev             1018 drivers/media/mc/mc-entity.c 	struct media_device *mdev = intf->graph_obj.mdev;
mdev             1021 drivers/media/mc/mc-entity.c 	if (mdev == NULL)
mdev             1024 drivers/media/mc/mc-entity.c 	mutex_lock(&mdev->graph_mutex);
mdev             1026 drivers/media/mc/mc-entity.c 	mutex_unlock(&mdev->graph_mutex);
mdev               64 drivers/media/mc/mc-request.c 	struct media_device *mdev = req->mdev;
mdev               66 drivers/media/mc/mc-request.c 	dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
mdev               73 drivers/media/mc/mc-request.c 	if (mdev->ops->req_free)
mdev               74 drivers/media/mc/mc-request.c 		mdev->ops->req_free(req);
mdev              121 drivers/media/mc/mc-request.c 	struct media_device *mdev = req->mdev;
mdev              126 drivers/media/mc/mc-request.c 	dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
mdev              134 drivers/media/mc/mc-request.c 	mutex_lock(&mdev->req_queue_mutex);
mdev              144 drivers/media/mc/mc-request.c 		dev_dbg(mdev->dev,
mdev              148 drivers/media/mc/mc-request.c 		mutex_unlock(&mdev->req_queue_mutex);
mdev              152 drivers/media/mc/mc-request.c 	ret = mdev->ops->req_validate(req);
mdev              175 drivers/media/mc/mc-request.c 		mdev->ops->req_queue(req);
mdev              177 drivers/media/mc/mc-request.c 	mutex_unlock(&mdev->req_queue_mutex);
mdev              180 drivers/media/mc/mc-request.c 		dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
mdev              190 drivers/media/mc/mc-request.c 	struct media_device *mdev = req->mdev;
mdev              196 drivers/media/mc/mc-request.c 		dev_dbg(mdev->dev,
mdev              203 drivers/media/mc/mc-request.c 		dev_dbg(mdev->dev,
mdev              247 drivers/media/mc/mc-request.c media_request_get_by_fd(struct media_device *mdev, int request_fd)
mdev              252 drivers/media/mc/mc-request.c 	if (!mdev || !mdev->ops ||
mdev              253 drivers/media/mc/mc-request.c 	    !mdev->ops->req_validate || !mdev->ops->req_queue)
mdev              263 drivers/media/mc/mc-request.c 	if (req->mdev != mdev)
mdev              283 drivers/media/mc/mc-request.c 	dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
mdev              288 drivers/media/mc/mc-request.c int media_request_alloc(struct media_device *mdev, int *alloc_fd)
mdev              296 drivers/media/mc/mc-request.c 	if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
mdev              309 drivers/media/mc/mc-request.c 	if (mdev->ops->req_alloc)
mdev              310 drivers/media/mc/mc-request.c 		req = mdev->ops->req_alloc(mdev);
mdev              319 drivers/media/mc/mc-request.c 	req->mdev = mdev;
mdev              332 drivers/media/mc/mc-request.c 		 atomic_inc_return(&mdev->request_id), fd);
mdev              333 drivers/media/mc/mc-request.c 	dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
mdev             1809 drivers/media/pci/intel/ipu3/ipu3-cio2.c 	cio2->v4l2_dev.mdev = &cio2->media_dev;
mdev             1039 drivers/media/pci/saa7134/saa7134-core.c 	dev->v4l2_dev.mdev = dev->media_dev;
mdev              768 drivers/media/pci/saa7134/saa7134-video.c 	struct media_device *mdev = dev->media_dev;
mdev              773 drivers/media/pci/saa7134/saa7134-video.c 	if (!mdev || !dev->decoder)
mdev              283 drivers/media/platform/exynos4-is/fimc-isp-video.c 		mutex_lock(&me->graph_obj.mdev->graph_mutex);
mdev              291 drivers/media/platform/exynos4-is/fimc-isp-video.c 		mutex_unlock(&me->graph_obj.mdev->graph_mutex);
mdev              307 drivers/media/platform/exynos4-is/fimc-isp-video.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              321 drivers/media/platform/exynos4-is/fimc-isp-video.c 		mutex_lock(&mdev->graph_mutex);
mdev              323 drivers/media/platform/exynos4-is/fimc-isp-video.c 		mutex_unlock(&mdev->graph_mutex);
mdev              483 drivers/media/platform/exynos4-is/fimc-lite.c 	mutex_lock(&me->graph_obj.mdev->graph_mutex);
mdev              491 drivers/media/platform/exynos4-is/fimc-lite.c 	mutex_unlock(&me->graph_obj.mdev->graph_mutex);
mdev              524 drivers/media/platform/exynos4-is/fimc-lite.c 		mutex_lock(&entity->graph_obj.mdev->graph_mutex);
mdev              526 drivers/media/platform/exynos4-is/fimc-lite.c 		mutex_unlock(&entity->graph_obj.mdev->graph_mutex);
mdev             1183 drivers/media/platform/exynos4-is/media-dev.c 		&container_of(link->graph_obj.mdev, struct fimc_md,
mdev             1191 drivers/media/platform/exynos4-is/media-dev.c 						   link->graph_obj.mdev);
mdev             1448 drivers/media/platform/exynos4-is/media-dev.c 	v4l2_dev->mdev = &fmd->media_dev;
mdev              164 drivers/media/platform/exynos4-is/media-dev.h 	return me->graph_obj.mdev == NULL ? NULL :
mdev              165 drivers/media/platform/exynos4-is/media-dev.h 		container_of(me->graph_obj.mdev, struct fimc_md, media_dev);
mdev              175 drivers/media/platform/exynos4-is/media-dev.h 	mutex_lock(&ve->vdev.entity.graph_obj.mdev->graph_mutex);
mdev              180 drivers/media/platform/exynos4-is/media-dev.h 	mutex_unlock(&ve->vdev.entity.graph_obj.mdev->graph_mutex);
mdev             1692 drivers/media/platform/omap3isp/isp.c 	isp->v4l2_dev.mdev = &isp->media_dev;
mdev             2605 drivers/media/platform/omap3isp/ispccdc.c 	ccdc->subdev.dev = vdev->mdev->dev;
mdev             1034 drivers/media/platform/omap3isp/ispccp2.c 	ccp2->subdev.dev = vdev->mdev->dev;
mdev             1201 drivers/media/platform/omap3isp/ispcsi2.c 	csi2->subdev.dev = vdev->mdev->dev;
mdev             2228 drivers/media/platform/omap3isp/isppreview.c 	prev->subdev.dev = vdev->mdev->dev;
mdev             1684 drivers/media/platform/omap3isp/ispresizer.c 	res->subdev.dev = vdev->mdev->dev;
mdev             1029 drivers/media/platform/omap3isp/ispstat.c 	stat->subdev.dev = vdev->mdev->dev;
mdev              227 drivers/media/platform/omap3isp/ispvideo.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              231 drivers/media/platform/omap3isp/ispvideo.c 	mutex_lock(&mdev->graph_mutex);
mdev              232 drivers/media/platform/omap3isp/ispvideo.c 	ret = media_graph_walk_init(&graph, mdev);
mdev              234 drivers/media/platform/omap3isp/ispvideo.c 		mutex_unlock(&mdev->graph_mutex);
mdev              259 drivers/media/platform/omap3isp/ispvideo.c 	mutex_unlock(&mdev->graph_mutex);
mdev              936 drivers/media/platform/omap3isp/ispvideo.c 	ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
mdev              876 drivers/media/platform/qcom/camss/camss.c 	camss->v4l2_dev.mdev = &camss->media_dev;
mdev              115 drivers/media/platform/rcar-vin/rcar-core.c 	struct rvin_group *group = container_of(link->graph_obj.mdev,
mdev              116 drivers/media/platform/rcar-vin/rcar-core.c 						struct rvin_group, mdev);
mdev              139 drivers/media/platform/rcar-vin/rcar-core.c 	media_device_for_each_entity(entity, &group->mdev)
mdev              246 drivers/media/platform/rcar-vin/rcar-core.c 	media_device_unregister(&group->mdev);
mdev              247 drivers/media/platform/rcar-vin/rcar-core.c 	media_device_cleanup(&group->mdev);
mdev              253 drivers/media/platform/rcar-vin/rcar-core.c 	struct media_device *mdev = &group->mdev;
mdev              268 drivers/media/platform/rcar-vin/rcar-core.c 	mdev->dev = vin->dev;
mdev              269 drivers/media/platform/rcar-vin/rcar-core.c 	mdev->ops = &rvin_media_ops;
mdev              274 drivers/media/platform/rcar-vin/rcar-core.c 	strscpy(mdev->driver_name, KBUILD_MODNAME, sizeof(mdev->driver_name));
mdev              275 drivers/media/platform/rcar-vin/rcar-core.c 	strscpy(mdev->model, match->compatible, sizeof(mdev->model));
mdev              276 drivers/media/platform/rcar-vin/rcar-core.c 	snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
mdev              277 drivers/media/platform/rcar-vin/rcar-core.c 		 dev_name(mdev->dev));
mdev              279 drivers/media/platform/rcar-vin/rcar-core.c 	media_device_init(mdev);
mdev              281 drivers/media/platform/rcar-vin/rcar-core.c 	ret = media_device_register(&group->mdev);
mdev              363 drivers/media/platform/rcar-vin/rcar-core.c 	vin->v4l2_dev.mdev = &group->mdev;
mdev              380 drivers/media/platform/rcar-vin/rcar-core.c 	vin->v4l2_dev.mdev = NULL;
mdev             1106 drivers/media/platform/rcar-vin/rcar-dma.c 	struct media_device *mdev;
mdev             1140 drivers/media/platform/rcar-vin/rcar-dma.c 	mdev = vin->vdev.entity.graph_obj.mdev;
mdev             1141 drivers/media/platform/rcar-vin/rcar-dma.c 	mutex_lock(&mdev->graph_mutex);
mdev             1144 drivers/media/platform/rcar-vin/rcar-dma.c 	mutex_unlock(&mdev->graph_mutex);
mdev              248 drivers/media/platform/rcar-vin/rcar-vin.h 	struct media_device mdev;
mdev              936 drivers/media/platform/s3c-camif/camif-capture.c 	return vb2_qbuf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, buf);
mdev              974 drivers/media/platform/s3c-camif/camif-capture.c 	return vb2_prepare_buf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, b);
mdev              315 drivers/media/platform/s3c-camif/camif-core.c 	v4l2_dev->mdev = md;
mdev              173 drivers/media/platform/stm32/stm32-dcmi.c 	struct media_device		mdev;
mdev             1932 drivers/media/platform/stm32/stm32-dcmi.c 	dcmi->v4l2_dev.mdev = &dcmi->mdev;
mdev             1935 drivers/media/platform/stm32/stm32-dcmi.c 	strscpy(dcmi->mdev.model, DRV_NAME, sizeof(dcmi->mdev.model));
mdev             1936 drivers/media/platform/stm32/stm32-dcmi.c 	snprintf(dcmi->mdev.bus_info, sizeof(dcmi->mdev.bus_info),
mdev             1938 drivers/media/platform/stm32/stm32-dcmi.c 	dcmi->mdev.dev = &pdev->dev;
mdev             1939 drivers/media/platform/stm32/stm32-dcmi.c 	media_device_init(&dcmi->mdev);
mdev             2037 drivers/media/platform/stm32/stm32-dcmi.c 	media_device_cleanup(&dcmi->mdev);
mdev             2053 drivers/media/platform/stm32/stm32-dcmi.c 	media_device_cleanup(&dcmi->mdev);
mdev               73 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	ret = media_device_register(&csi->mdev);
mdev               99 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	media_device_unregister(&csi->mdev);
mdev              180 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	csi->mdev.dev = csi->dev;
mdev              181 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	strscpy(csi->mdev.model, "Allwinner Video Capture Device",
mdev              182 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 		sizeof(csi->mdev.model));
mdev              183 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	csi->mdev.hw_revision = 0;
mdev              184 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	media_device_init(&csi->mdev);
mdev              185 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	csi->v4l.mdev = &csi->mdev;
mdev              261 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	media_device_unregister(&csi->mdev);
mdev              265 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	media_device_cleanup(&csi->mdev);
mdev              276 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	media_device_unregister(&csi->mdev);
mdev              278 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c 	media_device_cleanup(&csi->mdev);
mdev              129 drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.h 	struct media_device		mdev;
mdev              747 drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c 	csi->v4l2_dev.mdev = &csi->media_dev;
mdev              110 drivers/media/platform/vicodec/vicodec-core.c 	struct media_device	mdev;
mdev             2143 drivers/media/platform/vicodec/vicodec-core.c 	media_device_cleanup(&dev->mdev);
mdev             2164 drivers/media/platform/vicodec/vicodec-core.c 	dev->mdev.dev = &pdev->dev;
mdev             2165 drivers/media/platform/vicodec/vicodec-core.c 	strscpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model));
mdev             2166 drivers/media/platform/vicodec/vicodec-core.c 	strscpy(dev->mdev.bus_info, "platform:vicodec",
mdev             2167 drivers/media/platform/vicodec/vicodec-core.c 		sizeof(dev->mdev.bus_info));
mdev             2168 drivers/media/platform/vicodec/vicodec-core.c 	media_device_init(&dev->mdev);
mdev             2169 drivers/media/platform/vicodec/vicodec-core.c 	dev->mdev.ops = &vicodec_m2m_media_ops;
mdev             2170 drivers/media/platform/vicodec/vicodec-core.c 	dev->v4l2_dev.mdev = &dev->mdev;
mdev             2212 drivers/media/platform/vicodec/vicodec-core.c 	ret = media_device_register(&dev->mdev);
mdev             2252 drivers/media/platform/vicodec/vicodec-core.c 	media_device_unregister(&dev->mdev);
mdev              194 drivers/media/platform/vim2m.c 	struct media_device	mdev;
mdev             1279 drivers/media/platform/vim2m.c 	media_device_cleanup(&dev->mdev);
mdev             1357 drivers/media/platform/vim2m.c 	dev->mdev.dev = &pdev->dev;
mdev             1358 drivers/media/platform/vim2m.c 	strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
mdev             1359 drivers/media/platform/vim2m.c 	strscpy(dev->mdev.bus_info, "platform:vim2m",
mdev             1360 drivers/media/platform/vim2m.c 		sizeof(dev->mdev.bus_info));
mdev             1361 drivers/media/platform/vim2m.c 	media_device_init(&dev->mdev);
mdev             1362 drivers/media/platform/vim2m.c 	dev->mdev.ops = &m2m_media_ops;
mdev             1363 drivers/media/platform/vim2m.c 	dev->v4l2_dev.mdev = &dev->mdev;
mdev             1372 drivers/media/platform/vim2m.c 	ret = media_device_register(&dev->mdev);
mdev             1403 drivers/media/platform/vim2m.c 	media_device_unregister(&dev->mdev);
mdev               35 drivers/media/platform/vimc/vimc-core.c 	struct media_device mdev;
mdev              175 drivers/media/platform/vimc/vimc-core.c 	ret = v4l2_device_register(vimc->mdev.dev, &vimc->v4l2_dev);
mdev              177 drivers/media/platform/vimc/vimc-core.c 		dev_err(vimc->mdev.dev,
mdev              193 drivers/media/platform/vimc/vimc-core.c 	ret = media_device_register(&vimc->mdev);
mdev              195 drivers/media/platform/vimc/vimc-core.c 		dev_err(vimc->mdev.dev,
mdev              203 drivers/media/platform/vimc/vimc-core.c 		dev_err(vimc->mdev.dev,
mdev              212 drivers/media/platform/vimc/vimc-core.c 	media_device_unregister(&vimc->mdev);
mdev              213 drivers/media/platform/vimc/vimc-core.c 	media_device_cleanup(&vimc->mdev);
mdev              229 drivers/media/platform/vimc/vimc-core.c 	media_device_unregister(&vimc->mdev);
mdev              230 drivers/media/platform/vimc/vimc-core.c 	media_device_cleanup(&vimc->mdev);
mdev              294 drivers/media/platform/vimc/vimc-core.c 	memset(&vimc->mdev, 0, sizeof(vimc->mdev));
mdev              307 drivers/media/platform/vimc/vimc-core.c 	vimc->v4l2_dev.mdev = &vimc->mdev;
mdev              310 drivers/media/platform/vimc/vimc-core.c 	strscpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME,
mdev              311 drivers/media/platform/vimc/vimc-core.c 		sizeof(vimc->mdev.model));
mdev              312 drivers/media/platform/vimc/vimc-core.c 	snprintf(vimc->mdev.bus_info, sizeof(vimc->mdev.bus_info),
mdev              314 drivers/media/platform/vimc/vimc-core.c 	vimc->mdev.dev = &pdev->dev;
mdev              315 drivers/media/platform/vimc/vimc-core.c 	media_device_init(&vimc->mdev);
mdev              321 drivers/media/platform/vimc/vimc-core.c 		media_device_cleanup(&vimc->mdev);
mdev              620 drivers/media/platform/vivid/vivid-core.c 	media_device_cleanup(&dev->mdev);
mdev              636 drivers/media/platform/vivid/vivid-core.c 	struct vivid_dev *dev = container_of(req->mdev, struct vivid_dev, mdev);
mdev              685 drivers/media/platform/vivid/vivid-core.c 	dev->v4l2_dev.mdev = &dev->mdev;
mdev              688 drivers/media/platform/vivid/vivid-core.c 	strscpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
mdev              689 drivers/media/platform/vivid/vivid-core.c 	snprintf(dev->mdev.bus_info, sizeof(dev->mdev.bus_info),
mdev              691 drivers/media/platform/vivid/vivid-core.c 	dev->mdev.dev = &pdev->dev;
mdev              692 drivers/media/platform/vivid/vivid-core.c 	media_device_init(&dev->mdev);
mdev              693 drivers/media/platform/vivid/vivid-core.c 	dev->mdev.ops = &vivid_media_ops;
mdev             1500 drivers/media/platform/vivid/vivid-core.c 	ret = media_device_register(&dev->mdev);
mdev             1502 drivers/media/platform/vivid/vivid-core.c 		dev_err(dev->mdev.dev,
mdev             1585 drivers/media/platform/vivid/vivid-core.c 		media_device_unregister(&dev->mdev);
mdev              128 drivers/media/platform/vivid/vivid-core.h 	struct media_device		mdev;
mdev              237 drivers/media/platform/vsp1/vsp1_drv.c 	struct media_device *mdev = &vsp1->media_dev;
mdev              243 drivers/media/platform/vsp1/vsp1_drv.c 	mdev->dev = vsp1->dev;
mdev              244 drivers/media/platform/vsp1/vsp1_drv.c 	mdev->hw_revision = vsp1->version;
mdev              245 drivers/media/platform/vsp1/vsp1_drv.c 	strscpy(mdev->model, vsp1->info->model, sizeof(mdev->model));
mdev              246 drivers/media/platform/vsp1/vsp1_drv.c 	snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
mdev              247 drivers/media/platform/vsp1/vsp1_drv.c 		 dev_name(mdev->dev));
mdev              248 drivers/media/platform/vsp1/vsp1_drv.c 	media_device_init(mdev);
mdev              259 drivers/media/platform/vsp1/vsp1_drv.c 	vdev->mdev = mdev;
mdev              471 drivers/media/platform/vsp1/vsp1_drv.c 		ret = media_device_register(mdev);
mdev              563 drivers/media/platform/vsp1/vsp1_video.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              568 drivers/media/platform/vsp1/vsp1_video.c 	ret = media_graph_walk_init(&graph, mdev);
mdev              695 drivers/media/platform/vsp1/vsp1_video.c 	struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
mdev              697 drivers/media/platform/vsp1/vsp1_video.c 	mutex_lock(&mdev->graph_mutex);
mdev              699 drivers/media/platform/vsp1/vsp1_video.c 	mutex_unlock(&mdev->graph_mutex);
mdev             1031 drivers/media/platform/vsp1/vsp1_video.c 	struct media_device *mdev = &video->vsp1->media_dev;
mdev             1043 drivers/media/platform/vsp1/vsp1_video.c 	mutex_lock(&mdev->graph_mutex);
mdev             1047 drivers/media/platform/vsp1/vsp1_video.c 		mutex_unlock(&mdev->graph_mutex);
mdev             1053 drivers/media/platform/vsp1/vsp1_video.c 		mutex_unlock(&mdev->graph_mutex);
mdev             1057 drivers/media/platform/vsp1/vsp1_video.c 	mutex_unlock(&mdev->graph_mutex);
mdev              179 drivers/media/platform/xilinx/xilinx-dma.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              184 drivers/media/platform/xilinx/xilinx-dma.c 	mutex_lock(&mdev->graph_mutex);
mdev              187 drivers/media/platform/xilinx/xilinx-dma.c 	ret = media_graph_walk_init(&graph, mdev);
mdev              189 drivers/media/platform/xilinx/xilinx-dma.c 		mutex_unlock(&mdev->graph_mutex);
mdev              211 drivers/media/platform/xilinx/xilinx-dma.c 	mutex_unlock(&mdev->graph_mutex);
mdev              571 drivers/media/platform/xilinx/xilinx-vipp.c 	xdev->v4l2_dev.mdev = &xdev->media_dev;
mdev              460 drivers/media/tuners/si2157.c 	if (cfg->mdev) {
mdev              461 drivers/media/tuners/si2157.c 		dev->mdev = cfg->mdev;
mdev              479 drivers/media/tuners/si2157.c 		ret = media_device_register_entity(cfg->mdev, &dev->ent);
mdev              512 drivers/media/tuners/si2157.c 	if (dev->mdev)
mdev               25 drivers/media/tuners/si2157.h 	struct media_device *mdev;
mdev               34 drivers/media/tuners/si2157_priv.h 	struct media_device	*mdev;
mdev               38 drivers/media/tuners/tda18250.h 	struct media_device *mdev;
mdev              128 drivers/media/usb/au0828/au0828-core.c 	struct media_device *mdev = dev->media_dev;
mdev              131 drivers/media/usb/au0828/au0828-core.c 	if (!mdev || !media_devnode_is_registered(mdev->devnode))
mdev              135 drivers/media/usb/au0828/au0828-core.c 	list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list) {
mdev              138 drivers/media/usb/au0828/au0828-core.c 		media_device_unregister_entity_notify(mdev, notify);
mdev              142 drivers/media/usb/au0828/au0828-core.c 	mutex_lock(&mdev->graph_mutex);
mdev              146 drivers/media/usb/au0828/au0828-core.c 	mutex_unlock(&mdev->graph_mutex);
mdev              199 drivers/media/usb/au0828/au0828-core.c 	struct media_device *mdev;
mdev              201 drivers/media/usb/au0828/au0828-core.c 	mdev = media_device_usb_allocate(udev, KBUILD_MODNAME, THIS_MODULE);
mdev              202 drivers/media/usb/au0828/au0828-core.c 	if (!mdev)
mdev              205 drivers/media/usb/au0828/au0828-core.c 	dev->media_dev = mdev;
mdev              295 drivers/media/usb/au0828/au0828-core.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              298 drivers/media/usb/au0828/au0828-core.c 	if (!mdev)
mdev              301 drivers/media/usb/au0828/au0828-core.c 	dev = mdev->source_priv;
mdev              445 drivers/media/usb/au0828/au0828-core.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              448 drivers/media/usb/au0828/au0828-core.c 	if (!mdev)
mdev              451 drivers/media/usb/au0828/au0828-core.c 	dev = mdev->source_priv;
mdev              430 drivers/media/usb/au0828/au0828-dvb.c 	dvb->adapter.mdev = dev->media_dev;
mdev              660 drivers/media/usb/au0828/au0828-video.c 	dev->v4l2_dev.mdev = dev->media_dev;
mdev             1382 drivers/media/usb/cx231xx/cx231xx-cards.c 	struct media_device *mdev;
mdev             1384 drivers/media/usb/cx231xx/cx231xx-cards.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev             1385 drivers/media/usb/cx231xx/cx231xx-cards.c 	if (!mdev)
mdev             1388 drivers/media/usb/cx231xx/cx231xx-cards.c 	media_device_usb_init(mdev, udev, dev->board.name);
mdev             1390 drivers/media/usb/cx231xx/cx231xx-cards.c 	dev->media_dev = mdev;
mdev             1814 drivers/media/usb/cx231xx/cx231xx-cards.c 	dev->v4l2_dev.mdev = dev->media_dev;
mdev              828 drivers/media/usb/cx231xx/cx231xx-dvb.c 		si2157_config.mdev = dev->media_dev;
mdev              873 drivers/media/usb/cx231xx/cx231xx-dvb.c 		si2157_config.mdev = dev->media_dev;
mdev              941 drivers/media/usb/cx231xx/cx231xx-dvb.c 		si2157_config.mdev = dev->media_dev;
mdev             1014 drivers/media/usb/cx231xx/cx231xx-dvb.c 		si2157_config.mdev = dev->media_dev;
mdev             1077 drivers/media/usb/cx231xx/cx231xx-dvb.c 		si2157_config.mdev = dev->media_dev;
mdev               93 drivers/media/usb/cx231xx/cx231xx-video.c 	struct media_device *mdev = dev->media_dev;
mdev               98 drivers/media/usb/cx231xx/cx231xx-video.c 	if (!mdev)
mdev              108 drivers/media/usb/cx231xx/cx231xx-video.c 	media_device_for_each_entity(entity, mdev) {
mdev              390 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	struct media_device *mdev;
mdev              394 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              395 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	if (!mdev)
mdev              398 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	media_device_usb_init(mdev, udev, d->name);
mdev              400 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	dvb_register_media_controller(&adap->dvb_adap, mdev);
mdev              410 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	return media_device_register(adap->dvb_adap.mdev);
mdev              420 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	if (!adap->dvb_adap.mdev)
mdev              423 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	media_device_unregister(adap->dvb_adap.mdev);
mdev              424 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	media_device_cleanup(adap->dvb_adap.mdev);
mdev              425 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	kfree(adap->dvb_adap.mdev);
mdev              426 drivers/media/usb/dvb-usb-v2/dvb_usb_core.c 	adap->dvb_adap.mdev = NULL;
mdev              875 drivers/media/usb/dvb-usb-v2/mxl111sf.c 	struct media_device *mdev = dvb_get_media_controller(&adap->dvb_adap);
mdev              902 drivers/media/usb/dvb-usb-v2/mxl111sf.c 	ret = media_device_register_entity(mdev, &state->tuner);
mdev              102 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	struct media_device *mdev;
mdev              106 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              107 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	if (!mdev)
mdev              110 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	media_device_usb_init(mdev, udev, d->desc->name);
mdev              112 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	dvb_register_media_controller(&adap->dvb_adap, mdev);
mdev              122 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	return media_device_register(adap->dvb_adap.mdev);
mdev              131 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	if (!adap->dvb_adap.mdev)
mdev              136 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	media_device_unregister(adap->dvb_adap.mdev);
mdev              137 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	media_device_cleanup(adap->dvb_adap.mdev);
mdev              138 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	kfree(adap->dvb_adap.mdev);
mdev              139 drivers/media/usb/dvb-usb/dvb-usb-dvb.c 	adap->dvb_adap.mdev = NULL;
mdev             3332 drivers/media/usb/em28xx/em28xx-cards.c 	struct media_device *mdev;
mdev             3334 drivers/media/usb/em28xx/em28xx-cards.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev             3335 drivers/media/usb/em28xx/em28xx-cards.c 	if (!mdev)
mdev             3339 drivers/media/usb/em28xx/em28xx-cards.c 		media_device_usb_init(mdev, udev, udev->product);
mdev             3341 drivers/media/usb/em28xx/em28xx-cards.c 		media_device_usb_init(mdev, udev, udev->manufacturer);
mdev             3343 drivers/media/usb/em28xx/em28xx-cards.c 		media_device_usb_init(mdev, udev, dev_name(&dev->intf->dev));
mdev             3345 drivers/media/usb/em28xx/em28xx-cards.c 	dev->media_dev = mdev;
mdev              995 drivers/media/usb/em28xx/em28xx-dvb.c 	dvb->adapter.mdev = dev->media_dev;
mdev             1245 drivers/media/usb/em28xx/em28xx-dvb.c 	si2157_config.mdev = dev->media_dev;
mdev             1282 drivers/media/usb/em28xx/em28xx-dvb.c 	si2157_config.mdev = dev->media_dev;
mdev             1352 drivers/media/usb/em28xx/em28xx-dvb.c 	si2157_config.mdev = dev->media_dev;
mdev             1392 drivers/media/usb/em28xx/em28xx-dvb.c 	si2157_config.mdev = dev->media_dev;
mdev              908 drivers/media/usb/em28xx/em28xx-video.c 	struct media_device *mdev = dev->media_dev;
mdev              914 drivers/media/usb/em28xx/em28xx-video.c 	if (!mdev || !v4l2->decoder)
mdev             2552 drivers/media/usb/em28xx/em28xx-video.c 	v4l2->v4l2_dev.mdev = dev->media_dev;
mdev              360 drivers/media/usb/siano/smsusb.c 	struct media_device *mdev;
mdev              365 drivers/media/usb/siano/smsusb.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              366 drivers/media/usb/siano/smsusb.c 	if (!mdev)
mdev              369 drivers/media/usb/siano/smsusb.c 	media_device_usb_init(mdev, udev, board->name);
mdev              371 drivers/media/usb/siano/smsusb.c 	ret = media_device_register(mdev);
mdev              373 drivers/media/usb/siano/smsusb.c 		media_device_cleanup(mdev);
mdev              374 drivers/media/usb/siano/smsusb.c 		kfree(mdev);
mdev              380 drivers/media/usb/siano/smsusb.c 	return mdev;
mdev              390 drivers/media/usb/siano/smsusb.c 	void *mdev;
mdev              450 drivers/media/usb/siano/smsusb.c 	mdev = siano_media_device_register(dev, board_id);
mdev              453 drivers/media/usb/siano/smsusb.c 	rc = smscore_register_device(&params, &dev->coredev, 0, mdev);
mdev              458 drivers/media/usb/siano/smsusb.c 		media_device_unregister(mdev);
mdev              460 drivers/media/usb/siano/smsusb.c 		kfree(mdev);
mdev             1900 drivers/media/usb/uvc/uvc_driver.c 	media_device_cleanup(&dev->mdev);
mdev             1959 drivers/media/usb/uvc/uvc_driver.c 	if (media_devnode_is_registered(dev->mdev.devnode))
mdev             1960 drivers/media/usb/uvc/uvc_driver.c 		media_device_unregister(&dev->mdev);
mdev             2184 drivers/media/usb/uvc/uvc_driver.c 	dev->mdev.dev = &intf->dev;
mdev             2185 drivers/media/usb/uvc/uvc_driver.c 	strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
mdev             2187 drivers/media/usb/uvc/uvc_driver.c 		strscpy(dev->mdev.serial, udev->serial,
mdev             2188 drivers/media/usb/uvc/uvc_driver.c 			sizeof(dev->mdev.serial));
mdev             2189 drivers/media/usb/uvc/uvc_driver.c 	usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
mdev             2190 drivers/media/usb/uvc/uvc_driver.c 	dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
mdev             2191 drivers/media/usb/uvc/uvc_driver.c 	media_device_init(&dev->mdev);
mdev             2193 drivers/media/usb/uvc/uvc_driver.c 	dev->vdev.mdev = &dev->mdev;
mdev             2234 drivers/media/usb/uvc/uvc_driver.c 	if (media_device_register(&dev->mdev) < 0)
mdev              302 drivers/media/usb/uvc/uvc_queue.c 		     struct media_device *mdev, struct v4l2_buffer *buf)
mdev              307 drivers/media/usb/uvc/uvc_queue.c 	ret = vb2_qbuf(&queue->queue, mdev, buf);
mdev              748 drivers/media/usb/uvc/uvc_v4l2.c 				stream->vdev.v4l2_dev->mdev, buf);
mdev              650 drivers/media/usb/uvc/uvcvideo.h 	struct media_device mdev;
mdev              757 drivers/media/usb/uvc/uvcvideo.h 		     struct media_device *mdev,
mdev             3542 drivers/media/v4l2-core/v4l2-ctrls.c 		     struct media_device *mdev, struct v4l2_ext_controls *cs)
mdev             3549 drivers/media/v4l2-core/v4l2-ctrls.c 		if (!mdev || cs->request_fd < 0)
mdev             3552 drivers/media/v4l2-core/v4l2-ctrls.c 		req = media_request_get_by_fd(mdev, cs->request_fd);
mdev             3910 drivers/media/v4l2-core/v4l2-ctrls.c 			     struct media_device *mdev,
mdev             3918 drivers/media/v4l2-core/v4l2-ctrls.c 		if (!mdev) {
mdev             3930 drivers/media/v4l2-core/v4l2-ctrls.c 		req = media_request_get_by_fd(mdev, cs->request_fd);
mdev             3976 drivers/media/v4l2-core/v4l2-ctrls.c 		       struct media_device *mdev,
mdev             3979 drivers/media/v4l2-core/v4l2-ctrls.c 	return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false);
mdev             3986 drivers/media/v4l2-core/v4l2-ctrls.c 		     struct media_device *mdev,
mdev             3989 drivers/media/v4l2-core/v4l2-ctrls.c 	return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true);
mdev              201 drivers/media/v4l2-core/v4l2-dev.c 	if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) {
mdev              451 drivers/media/v4l2-core/v4l2-dev.c 			mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
mdev              453 drivers/media/v4l2-core/v4l2-dev.c 			mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
mdev              757 drivers/media/v4l2-core/v4l2-dev.c 	if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M)
mdev              803 drivers/media/v4l2-core/v4l2-dev.c 		ret = media_device_register_entity(vdev->v4l2_dev->mdev,
mdev              812 drivers/media/v4l2-core/v4l2-dev.c 	vdev->intf_devnode = media_devnode_create(vdev->v4l2_dev->mdev,
mdev              145 drivers/media/v4l2-core/v4l2-device.c 	if (v4l2_dev->mdev) {
mdev              146 drivers/media/v4l2-core/v4l2-device.c 		err = media_device_register_entity(v4l2_dev->mdev, entity);
mdev              233 drivers/media/v4l2-core/v4l2-device.c 		if (vdev->v4l2_dev->mdev) {
mdev              279 drivers/media/v4l2-core/v4l2-device.c 	if (v4l2_dev->mdev) {
mdev             2198 drivers/media/v4l2-core/v4l2-ioctl.c 					vfd, vfd->v4l2_dev->mdev, p);
mdev             2201 drivers/media/v4l2-core/v4l2-ioctl.c 					vfd, vfd->v4l2_dev->mdev, p);
mdev             2219 drivers/media/v4l2-core/v4l2-ioctl.c 					vfd, vfd->v4l2_dev->mdev, p);
mdev             2222 drivers/media/v4l2-core/v4l2-ioctl.c 					vfd, vfd->v4l2_dev->mdev, p);
mdev             2240 drivers/media/v4l2-core/v4l2-ioctl.c 					  vfd, vfd->v4l2_dev->mdev, p);
mdev             2243 drivers/media/v4l2-core/v4l2-ioctl.c 					  vfd, vfd->v4l2_dev->mdev, p);
mdev             2873 drivers/media/v4l2-core/v4l2-ioctl.c 		req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex;
mdev               22 drivers/media/v4l2-core/v4l2-mc.c int v4l2_mc_create_media_graph(struct media_device *mdev)
mdev               33 drivers/media/v4l2-core/v4l2-mc.c 	if (!mdev)
mdev               36 drivers/media/v4l2-core/v4l2-mc.c 	media_device_for_each_entity(entity, mdev) {
mdev               67 drivers/media/v4l2-core/v4l2-mc.c 		dev_warn(mdev->dev, "Didn't find any I/O entity\n");
mdev               80 drivers/media/v4l2-core/v4l2-mc.c 			dev_warn(mdev->dev, "Didn't find a MEDIA_ENT_F_IO_V4L\n");
mdev               84 drivers/media/v4l2-core/v4l2-mc.c 		media_device_for_each_entity(entity, mdev) {
mdev               91 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "Failed to create a sensor link\n");
mdev              101 drivers/media/v4l2-core/v4l2-mc.c 		dev_warn(mdev->dev, "Decoder not found\n");
mdev              113 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n",
mdev              121 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "Couldn't create tuner->PLL link)\n");
mdev              130 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n",
mdev              138 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "couldn't link PLL to decoder\n");
mdev              147 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n",
mdev              164 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n",
mdev              172 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "couldn't link tuner->audio PLL\n");
mdev              185 drivers/media/v4l2-core/v4l2-mc.c 			dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n");
mdev              192 drivers/media/v4l2-core/v4l2-mc.c 			dev_warn(mdev->dev, "couldn't link decoder output to V4L I/O\n");
mdev              200 drivers/media/v4l2-core/v4l2-mc.c 			dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n");
mdev              207 drivers/media/v4l2-core/v4l2-mc.c 			dev_warn(mdev->dev, "couldn't link decoder output to SDR\n");
mdev              215 drivers/media/v4l2-core/v4l2-mc.c 			dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n");
mdev              222 drivers/media/v4l2-core/v4l2-mc.c 			dev_warn(mdev->dev, "couldn't link decoder output to VBI\n");
mdev              229 drivers/media/v4l2-core/v4l2-mc.c 	media_device_for_each_entity(entity, mdev) {
mdev              237 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n");
mdev              249 drivers/media/v4l2-core/v4l2-mc.c 				dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n");
mdev              271 drivers/media/v4l2-core/v4l2-mc.c 	struct media_device *mdev = vdev->entity.graph_obj.mdev;
mdev              274 drivers/media/v4l2-core/v4l2-mc.c 	if (!mdev)
mdev              277 drivers/media/v4l2-core/v4l2-mc.c 	mutex_lock(&mdev->graph_mutex);
mdev              278 drivers/media/v4l2-core/v4l2-mc.c 	if (!mdev->enable_source)
mdev              280 drivers/media/v4l2-core/v4l2-mc.c 	err = mdev->enable_source(&vdev->entity, &vdev->pipe);
mdev              284 drivers/media/v4l2-core/v4l2-mc.c 	mutex_unlock(&mdev->graph_mutex);
mdev              291 drivers/media/v4l2-core/v4l2-mc.c 	struct media_device *mdev = vdev->entity.graph_obj.mdev;
mdev              293 drivers/media/v4l2-core/v4l2-mc.c 	if (mdev) {
mdev              294 drivers/media/v4l2-core/v4l2-mc.c 		mutex_lock(&mdev->graph_mutex);
mdev              295 drivers/media/v4l2-core/v4l2-mc.c 		if (mdev->disable_source)
mdev              296 drivers/media/v4l2-core/v4l2-mc.c 			mdev->disable_source(&vdev->entity);
mdev              297 drivers/media/v4l2-core/v4l2-mc.c 		mutex_unlock(&mdev->graph_mutex);
mdev              428 drivers/media/v4l2-core/v4l2-mc.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              432 drivers/media/v4l2-core/v4l2-mc.c 	mutex_lock(&mdev->graph_mutex);
mdev              439 drivers/media/v4l2-core/v4l2-mc.c 	ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk);
mdev              443 drivers/media/v4l2-core/v4l2-mc.c 	mutex_unlock(&mdev->graph_mutex);
mdev              452 drivers/media/v4l2-core/v4l2-mc.c 	struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk;
mdev              502 drivers/media/v4l2-core/v4l2-mem2mem.c 	ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
mdev              527 drivers/media/v4l2-core/v4l2-mem2mem.c 	return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
mdev              723 drivers/media/v4l2-core/v4l2-mem2mem.c static int v4l2_m2m_register_entity(struct media_device *mdev,
mdev              774 drivers/media/v4l2-core/v4l2-mem2mem.c 	ret = media_device_register_entity(mdev, entity);
mdev              784 drivers/media/v4l2-core/v4l2-mem2mem.c 	struct media_device *mdev = vdev->v4l2_dev->mdev;
mdev              788 drivers/media/v4l2-core/v4l2-mem2mem.c 	if (!mdev)
mdev              798 drivers/media/v4l2-core/v4l2-mem2mem.c 	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
mdev              802 drivers/media/v4l2-core/v4l2-mem2mem.c 	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
mdev              806 drivers/media/v4l2-core/v4l2-mem2mem.c 	ret = v4l2_m2m_register_entity(mdev, m2m_dev,
mdev              823 drivers/media/v4l2-core/v4l2-mem2mem.c 	m2m_dev->intf_devnode = media_devnode_create(mdev,
mdev               66 drivers/media/v4l2-core/v4l2-subdev.c 	if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
mdev               69 drivers/media/v4l2-core/v4l2-subdev.c 		owner = sd->entity.graph_obj.mdev->dev->driver->owner;
mdev              375 drivers/media/v4l2-core/v4l2-subdev.c 					vdev, sd->v4l2_dev->mdev, arg);
mdev              381 drivers/media/v4l2-core/v4l2-subdev.c 					vdev, sd->v4l2_dev->mdev, arg);
mdev              387 drivers/media/v4l2-core/v4l2-subdev.c 					  vdev, sd->v4l2_dev->mdev, arg);
mdev               33 drivers/misc/mic/card/mic_debugfs.c 	struct mic_device *mdev = &mdrv->mdev;
mdev               35 drivers/misc/mic/card/mic_debugfs.c 	mic_send_intr(mdev, 0);
mdev               37 drivers/misc/mic/card/mic_debugfs.c 	mic_send_intr(mdev, 1);
mdev               39 drivers/misc/mic/card/mic_debugfs.c 	mic_send_intr(mdev, 2);
mdev               41 drivers/misc/mic/card/mic_debugfs.c 	mic_send_intr(mdev, 3);
mdev               31 drivers/misc/mic/card/mic_device.c 	struct mic_device *mdev = &mdrv->mdev;
mdev               36 drivers/misc/mic/card/mic_device.c 	lo = mic_read_spad(&mdrv->mdev, MIC_DPLO_SPAD);
mdev               37 drivers/misc/mic/card/mic_device.c 	hi = mic_read_spad(&mdrv->mdev, MIC_DPHI_SPAD);
mdev               40 drivers/misc/mic/card/mic_device.c 	mdrv->dp = mic_card_map(mdev, dp_dma_addr, MIC_DP_SIZE);
mdev               57 drivers/misc/mic/card/mic_device.c 	mic_card_unmap(&g_drv->mdev, g_drv->dp);
mdev              192 drivers/misc/mic/card/mic_device.c 	mic_ack_interrupt(&mdrv->mdev);
mdev              204 drivers/misc/mic/card/mic_device.c 	mic_send_intr(&mdrv->mdev, db);
mdev              219 drivers/misc/mic/card/mic_device.c 	return mic_card_map(&mdrv->mdev, pa, len);
mdev              226 drivers/misc/mic/card/mic_device.c 	mic_card_unmap(&mdrv->mdev, va);
mdev              263 drivers/misc/mic/card/mic_device.c 	mic_ack_interrupt(&mdrv->mdev);
mdev              282 drivers/misc/mic/card/mic_device.c 	mic_send_intr(&mdrv->mdev, db);
mdev              290 drivers/misc/mic/card/mic_device.c 	return mic_card_map(&mdrv->mdev, pa, len);
mdev              297 drivers/misc/mic/card/mic_device.c 	mic_card_unmap(&mdrv->mdev, va);
mdev              380 drivers/misc/mic/card/mic_device.c 					   0, node_id, &mdrv->mdev.mmio, NULL,
mdev               75 drivers/misc/mic/card/mic_device.h 	struct mic_device mdev;
mdev              124 drivers/misc/mic/card/mic_device.h u32 mic_read_spad(struct mic_device *mdev, unsigned int idx);
mdev              125 drivers/misc/mic/card/mic_device.h void mic_send_intr(struct mic_device *mdev, int doorbell);
mdev              128 drivers/misc/mic/card/mic_device.h u32 mic_ack_interrupt(struct mic_device *mdev);
mdev              131 drivers/misc/mic/card/mic_device.h mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size);
mdev              132 drivers/misc/mic/card/mic_device.h void mic_card_unmap(struct mic_device *mdev, void __iomem *addr);
mdev               36 drivers/misc/mic/card/mic_x100.c u32 mic_read_spad(struct mic_device *mdev, unsigned int idx)
mdev               38 drivers/misc/mic/card/mic_x100.c 	return mic_mmio_read(&mdev->mmio,
mdev               48 drivers/misc/mic/card/mic_x100.c void mic_send_intr(struct mic_device *mdev, int doorbell)
mdev               50 drivers/misc/mic/card/mic_x100.c 	struct mic_mw *mw = &mdev->mmio;
mdev              102 drivers/misc/mic/card/mic_x100.c u32 mic_ack_interrupt(struct mic_device *mdev)
mdev              173 drivers/misc/mic/card/mic_x100.c mic_card_map(struct mic_device *mdev, dma_addr_t addr, size_t size)
mdev              185 drivers/misc/mic/card/mic_x100.c void mic_card_unmap(struct mic_device *mdev, void __iomem *addr)
mdev              222 drivers/misc/mic/card/mic_x100.c 	mic_ack_interrupt(&mbdev_to_mdrv(mbdev)->mdev);
mdev              234 drivers/misc/mic/card/mic_x100.c 	struct mic_device *mdev = &mdrv->mdev;
mdev              243 drivers/misc/mic/card/mic_x100.c 	mdev->mmio.pa = MIC_X100_MMIO_BASE;
mdev              244 drivers/misc/mic/card/mic_x100.c 	mdev->mmio.len = MIC_X100_MMIO_LEN;
mdev              245 drivers/misc/mic/card/mic_x100.c 	mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
mdev              247 drivers/misc/mic/card/mic_x100.c 	if (!mdev->mmio.va) {
mdev              256 drivers/misc/mic/card/mic_x100.c 					       mdrv->mdev.mmio.va);
mdev               32 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(dev);
mdev               34 drivers/misc/mic/host/mic_boot.c 	return mic_map_single(mdev, va, size);
mdev               41 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(dev);
mdev               43 drivers/misc/mic/host/mic_boot.c 	mic_unmap_single(mdev, dma_addr, size);
mdev               56 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
mdev               58 drivers/misc/mic/host/mic_boot.c 	return mic_request_threaded_irq(mdev, func, NULL, name, data,
mdev               65 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
mdev               67 drivers/misc/mic/host/mic_boot.c 	mic_free_irq(mdev, cookie, data);
mdev               72 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
mdev               74 drivers/misc/mic/host/mic_boot.c 	mdev->ops->intr_workarounds(mdev);
mdev               79 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
mdev               81 drivers/misc/mic/host/mic_boot.c 	return mic_next_db(mdev);
mdev               86 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
mdev               88 drivers/misc/mic/host/mic_boot.c 	return mdev->dp;
mdev               98 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
mdev              100 drivers/misc/mic/host/mic_boot.c 	mdev->ops->send_intr(mdev, db);
mdev              106 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
mdev              108 drivers/misc/mic/host/mic_boot.c 	return mdev->aper.va + pa;
mdev              138 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              143 drivers/misc/mic/host/mic_boot.c 		tmp = mic_map_single(mdev, va, size);
mdev              158 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              160 drivers/misc/mic/host/mic_boot.c 	mic_unmap_single(mdev, dma_handle, size);
mdev              171 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              173 drivers/misc/mic/host/mic_boot.c 	return mic_map_single(mdev, va, size);
mdev              182 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              184 drivers/misc/mic/host/mic_boot.c 	mic_unmap_single(mdev, dma_addr, size);
mdev              192 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              197 drivers/misc/mic/host/mic_boot.c 	ret = dma_map_sg(&mdev->pdev->dev, sg, nents, dir);
mdev              202 drivers/misc/mic/host/mic_boot.c 		da = mic_map(mdev, sg_dma_address(s) + s->offset, s->length);
mdev              210 drivers/misc/mic/host/mic_boot.c 		mic_unmap(mdev, sg_dma_address(s), s->length);
mdev              211 drivers/misc/mic/host/mic_boot.c 		sg_dma_address(s) = mic_to_dma_addr(mdev, sg_dma_address(s));
mdev              213 drivers/misc/mic/host/mic_boot.c 	dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
mdev              223 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              229 drivers/misc/mic/host/mic_boot.c 		da = mic_to_dma_addr(mdev, sg_dma_address(s));
mdev              230 drivers/misc/mic/host/mic_boot.c 		mic_unmap(mdev, sg_dma_address(s), s->length);
mdev              233 drivers/misc/mic/host/mic_boot.c 	dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir);
mdev              251 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              253 drivers/misc/mic/host/mic_boot.c 	return mic_request_threaded_irq(mdev, func, NULL, name, data,
mdev              261 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              263 drivers/misc/mic/host/mic_boot.c 	mic_free_irq(mdev, cookie, data);
mdev              268 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              270 drivers/misc/mic/host/mic_boot.c 	mdev->ops->intr_workarounds(mdev);
mdev              275 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              277 drivers/misc/mic/host/mic_boot.c 	return mic_next_db(mdev);
mdev              282 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              284 drivers/misc/mic/host/mic_boot.c 	mdev->ops->send_intr(mdev, db);
mdev              290 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = scdev_to_mdev(scdev);
mdev              292 drivers/misc/mic/host/mic_boot.c 	return mdev->aper.va + pa;
mdev              321 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = dev_get_drvdata(dev->parent);
mdev              323 drivers/misc/mic/host/mic_boot.c 	return mic_map_single(mdev, va, size);
mdev              331 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = dev_get_drvdata(dev->parent);
mdev              332 drivers/misc/mic/host/mic_boot.c 	mic_unmap_single(mdev, dma_addr, size);
mdev              358 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = mbdev_to_mdev(mbdev);
mdev              359 drivers/misc/mic/host/mic_boot.c 	mdev->ops->intr_workarounds(mdev);
mdev              369 drivers/misc/mic/host/mic_boot.c void mic_bootparam_init(struct mic_device *mdev)
mdev              371 drivers/misc/mic/host/mic_boot.c 	struct mic_bootparam *bootparam = mdev->dp;
mdev              375 drivers/misc/mic/host/mic_boot.c 	bootparam->node_id = mdev->id + 1;
mdev              389 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = cosmdev_to_mdev(cdev);
mdev              391 drivers/misc/mic/host/mic_boot.c 	mdev->ops->reset_fw_ready(mdev);
mdev              392 drivers/misc/mic/host/mic_boot.c 	mdev->ops->reset(mdev);
mdev              397 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = cosmdev_to_mdev(cdev);
mdev              399 drivers/misc/mic/host/mic_boot.c 	return mdev->ops->is_fw_ready(mdev);
mdev              408 drivers/misc/mic/host/mic_boot.c static int mic_request_dma_chans(struct mic_device *mdev)
mdev              417 drivers/misc/mic/host/mic_boot.c 		chan = dma_request_channel(mask, mdev->ops->dma_filter,
mdev              418 drivers/misc/mic/host/mic_boot.c 					   &mdev->pdev->dev);
mdev              420 drivers/misc/mic/host/mic_boot.c 			mdev->dma_ch[mdev->num_dma_ch++] = chan;
mdev              421 drivers/misc/mic/host/mic_boot.c 			if (mdev->num_dma_ch >= MIC_MAX_DMA_CHAN)
mdev              425 drivers/misc/mic/host/mic_boot.c 	dev_info(&mdev->pdev->dev, "DMA channels # %d\n", mdev->num_dma_ch);
mdev              426 drivers/misc/mic/host/mic_boot.c 	return mdev->num_dma_ch;
mdev              435 drivers/misc/mic/host/mic_boot.c static void mic_free_dma_chans(struct mic_device *mdev)
mdev              439 drivers/misc/mic/host/mic_boot.c 	for (i = 0; i < mdev->num_dma_ch; i++) {
mdev              440 drivers/misc/mic/host/mic_boot.c 		dma_release_channel(mdev->dma_ch[i]);
mdev              441 drivers/misc/mic/host/mic_boot.c 		mdev->dma_ch[i] = NULL;
mdev              443 drivers/misc/mic/host/mic_boot.c 	mdev->num_dma_ch = 0;
mdev              458 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = cosmdev_to_mdev(cdev);
mdev              461 drivers/misc/mic/host/mic_boot.c 	mic_bootparam_init(mdev);
mdev              462 drivers/misc/mic/host/mic_boot.c 	mdev->dma_mbdev = mbus_register_device(&mdev->pdev->dev,
mdev              464 drivers/misc/mic/host/mic_boot.c 					       &mbus_hw_ops, id, mdev->mmio.va);
mdev              465 drivers/misc/mic/host/mic_boot.c 	if (IS_ERR(mdev->dma_mbdev)) {
mdev              466 drivers/misc/mic/host/mic_boot.c 		rc = PTR_ERR(mdev->dma_mbdev);
mdev              469 drivers/misc/mic/host/mic_boot.c 	if (!mic_request_dma_chans(mdev)) {
mdev              473 drivers/misc/mic/host/mic_boot.c 	mdev->scdev = scif_register_device(&mdev->pdev->dev, MIC_SCIF_DEV,
mdev              475 drivers/misc/mic/host/mic_boot.c 					   id + 1, 0, &mdev->mmio,
mdev              476 drivers/misc/mic/host/mic_boot.c 					   &mdev->aper, mdev->dp, NULL,
mdev              477 drivers/misc/mic/host/mic_boot.c 					   mdev->dma_ch, mdev->num_dma_ch,
mdev              479 drivers/misc/mic/host/mic_boot.c 	if (IS_ERR(mdev->scdev)) {
mdev              480 drivers/misc/mic/host/mic_boot.c 		rc = PTR_ERR(mdev->scdev);
mdev              484 drivers/misc/mic/host/mic_boot.c 	mdev->vpdev = vop_register_device(&mdev->pdev->dev,
mdev              486 drivers/misc/mic/host/mic_boot.c 					  &vop_hw_ops, id + 1, &mdev->aper,
mdev              487 drivers/misc/mic/host/mic_boot.c 					  mdev->dma_ch[0]);
mdev              488 drivers/misc/mic/host/mic_boot.c 	if (IS_ERR(mdev->vpdev)) {
mdev              489 drivers/misc/mic/host/mic_boot.c 		rc = PTR_ERR(mdev->vpdev);
mdev              493 drivers/misc/mic/host/mic_boot.c 	rc = mdev->ops->load_mic_fw(mdev, NULL);
mdev              496 drivers/misc/mic/host/mic_boot.c 	mic_smpt_restore(mdev);
mdev              497 drivers/misc/mic/host/mic_boot.c 	mic_intr_restore(mdev);
mdev              498 drivers/misc/mic/host/mic_boot.c 	mdev->intr_ops->enable_interrupts(mdev);
mdev              499 drivers/misc/mic/host/mic_boot.c 	mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
mdev              500 drivers/misc/mic/host/mic_boot.c 	mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
mdev              501 drivers/misc/mic/host/mic_boot.c 	mdev->ops->send_firmware_intr(mdev);
mdev              504 drivers/misc/mic/host/mic_boot.c 	vop_unregister_device(mdev->vpdev);
mdev              506 drivers/misc/mic/host/mic_boot.c 	scif_unregister_device(mdev->scdev);
mdev              508 drivers/misc/mic/host/mic_boot.c 	mic_free_dma_chans(mdev);
mdev              510 drivers/misc/mic/host/mic_boot.c 	mbus_unregister_device(mdev->dma_mbdev);
mdev              524 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = cosmdev_to_mdev(cdev);
mdev              531 drivers/misc/mic/host/mic_boot.c 	vop_unregister_device(mdev->vpdev);
mdev              532 drivers/misc/mic/host/mic_boot.c 	scif_unregister_device(mdev->scdev);
mdev              533 drivers/misc/mic/host/mic_boot.c 	mic_free_dma_chans(mdev);
mdev              534 drivers/misc/mic/host/mic_boot.c 	mbus_unregister_device(mdev->dma_mbdev);
mdev              535 drivers/misc/mic/host/mic_boot.c 	mic_bootparam_init(mdev);
mdev              540 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = cosmdev_to_mdev(cdev);
mdev              543 drivers/misc/mic/host/mic_boot.c 	return scnprintf(buf, PAGE_SIZE, "%s\n", family[mdev->family]);
mdev              548 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = cosmdev_to_mdev(cdev);
mdev              551 drivers/misc/mic/host/mic_boot.c 	switch (mdev->stepping) {
mdev              572 drivers/misc/mic/host/mic_boot.c 	struct mic_device *mdev = cosmdev_to_mdev(cdev);
mdev              574 drivers/misc/mic/host/mic_boot.c 	return &mdev->aper;
mdev               24 drivers/misc/mic/host/mic_debugfs.c 	struct mic_device *mdev = s->private;
mdev               28 drivers/misc/mic/host/mic_debugfs.c 		   mdev->id, "SMPT entry", "SW DMA addr", "RefCount");
mdev               31 drivers/misc/mic/host/mic_debugfs.c 	if (mdev->smpt) {
mdev               32 drivers/misc/mic/host/mic_debugfs.c 		struct mic_smpt_info *smpt_info = mdev->smpt;
mdev               49 drivers/misc/mic/host/mic_debugfs.c 	struct mic_device *mdev = s->private;
mdev               50 drivers/misc/mic/host/mic_debugfs.c 	u32 reg = mdev->ops->get_postcode(mdev);
mdev               60 drivers/misc/mic/host/mic_debugfs.c 	struct mic_device *mdev  = s->private;
mdev               65 drivers/misc/mic/host/mic_debugfs.c 	struct pci_dev *pdev = mdev->pdev;
mdev               68 drivers/misc/mic/host/mic_debugfs.c 		for (i = 0; i < mdev->irq_info.num_vectors; i++) {
mdev               70 drivers/misc/mic/host/mic_debugfs.c 				entry = mdev->irq_info.msix_entries[i].entry;
mdev               71 drivers/misc/mic/host/mic_debugfs.c 				vector = mdev->irq_info.msix_entries[i].vector;
mdev               77 drivers/misc/mic/host/mic_debugfs.c 			reg = mdev->intr_ops->read_msi_to_src_map(mdev, entry);
mdev               91 drivers/misc/mic/host/mic_debugfs.c 					   (mdev->irq_info.mic_msi_map[i] &
mdev              107 drivers/misc/mic/host/mic_debugfs.c void mic_create_debug_dir(struct mic_device *mdev)
mdev              114 drivers/misc/mic/host/mic_debugfs.c 	scnprintf(name, sizeof(name), "mic%d", mdev->id);
mdev              115 drivers/misc/mic/host/mic_debugfs.c 	mdev->dbg_dir = debugfs_create_dir(name, mic_dbg);
mdev              117 drivers/misc/mic/host/mic_debugfs.c 	debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev,
mdev              120 drivers/misc/mic/host/mic_debugfs.c 	debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
mdev              123 drivers/misc/mic/host/mic_debugfs.c 	debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
mdev              130 drivers/misc/mic/host/mic_debugfs.c void mic_delete_debug_dir(struct mic_device *mdev)
mdev              132 drivers/misc/mic/host/mic_debugfs.c 	if (!mdev->dbg_dir)
mdev              135 drivers/misc/mic/host/mic_debugfs.c 	debugfs_remove_recursive(mdev->dbg_dir);
mdev              112 drivers/misc/mic/host/mic_device.h 	u32 (*read_spad)(struct mic_device *mdev, unsigned int idx);
mdev              113 drivers/misc/mic/host/mic_device.h 	void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val);
mdev              114 drivers/misc/mic/host/mic_device.h 	void (*send_intr)(struct mic_device *mdev, int doorbell);
mdev              115 drivers/misc/mic/host/mic_device.h 	u32 (*ack_interrupt)(struct mic_device *mdev);
mdev              116 drivers/misc/mic/host/mic_device.h 	void (*intr_workarounds)(struct mic_device *mdev);
mdev              117 drivers/misc/mic/host/mic_device.h 	void (*reset)(struct mic_device *mdev);
mdev              118 drivers/misc/mic/host/mic_device.h 	void (*reset_fw_ready)(struct mic_device *mdev);
mdev              119 drivers/misc/mic/host/mic_device.h 	bool (*is_fw_ready)(struct mic_device *mdev);
mdev              120 drivers/misc/mic/host/mic_device.h 	void (*send_firmware_intr)(struct mic_device *mdev);
mdev              121 drivers/misc/mic/host/mic_device.h 	int (*load_mic_fw)(struct mic_device *mdev, const char *buf);
mdev              122 drivers/misc/mic/host/mic_device.h 	u32 (*get_postcode)(struct mic_device *mdev);
mdev              152 drivers/misc/mic/host/mic_device.h void mic_bootparam_init(struct mic_device *mdev);
mdev               17 drivers/misc/mic/host/mic_intr.c 	struct mic_device *mdev = dev;
mdev               18 drivers/misc/mic/host/mic_intr.c 	struct mic_intr_info *intr_info = mdev->intr_info;
mdev               19 drivers/misc/mic/host/mic_intr.c 	struct mic_irq_info *irq_info = &mdev->irq_info;
mdev               21 drivers/misc/mic/host/mic_intr.c 	struct pci_dev *pdev = mdev->pdev;
mdev               43 drivers/misc/mic/host/mic_intr.c 	struct mic_device *mdev = dev;
mdev               44 drivers/misc/mic/host/mic_intr.c 	struct mic_intr_info *intr_info = mdev->intr_info;
mdev               45 drivers/misc/mic/host/mic_intr.c 	struct mic_irq_info *irq_info = &mdev->irq_info;
mdev               47 drivers/misc/mic/host/mic_intr.c 	struct pci_dev *pdev = mdev->pdev;
mdev               51 drivers/misc/mic/host/mic_intr.c 	mask = mdev->ops->ack_interrupt(mdev);
mdev               71 drivers/misc/mic/host/mic_intr.c static u16 mic_map_src_to_offset(struct mic_device *mdev,
mdev               76 drivers/misc/mic/host/mic_intr.c 	if (intr_src >= mdev->intr_info->intr_len[type])
mdev               79 drivers/misc/mic/host/mic_intr.c 	return mdev->intr_info->intr_start_idx[type] + intr_src;
mdev               83 drivers/misc/mic/host/mic_intr.c static struct msix_entry *mic_get_available_vector(struct mic_device *mdev)
mdev               86 drivers/misc/mic/host/mic_intr.c 	struct mic_irq_info *info = &mdev->irq_info;
mdev              107 drivers/misc/mic/host/mic_intr.c static struct mic_intr_cb *mic_register_intr_callback(struct mic_device *mdev,
mdev              122 drivers/misc/mic/host/mic_intr.c 	intr_cb->cb_id = ida_simple_get(&mdev->irq_info.cb_ida,
mdev              129 drivers/misc/mic/host/mic_intr.c 	spin_lock(&mdev->irq_info.mic_thread_lock);
mdev              130 drivers/misc/mic/host/mic_intr.c 	spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
mdev              131 drivers/misc/mic/host/mic_intr.c 	list_add_tail(&intr_cb->list, &mdev->irq_info.cb_list[idx]);
mdev              132 drivers/misc/mic/host/mic_intr.c 	spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
mdev              133 drivers/misc/mic/host/mic_intr.c 	spin_unlock(&mdev->irq_info.mic_thread_lock);
mdev              150 drivers/misc/mic/host/mic_intr.c static u8 mic_unregister_intr_callback(struct mic_device *mdev, u32 idx)
mdev              157 drivers/misc/mic/host/mic_intr.c 	spin_lock(&mdev->irq_info.mic_thread_lock);
mdev              158 drivers/misc/mic/host/mic_intr.c 	spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
mdev              160 drivers/misc/mic/host/mic_intr.c 		list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
mdev              164 drivers/misc/mic/host/mic_intr.c 				ida_simple_remove(&mdev->irq_info.cb_ida,
mdev              168 drivers/misc/mic/host/mic_intr.c 					&mdev->irq_info.mic_intr_lock, flags);
mdev              169 drivers/misc/mic/host/mic_intr.c 				spin_unlock(&mdev->irq_info.mic_thread_lock);
mdev              174 drivers/misc/mic/host/mic_intr.c 	spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
mdev              175 drivers/misc/mic/host/mic_intr.c 	spin_unlock(&mdev->irq_info.mic_thread_lock);
mdev              187 drivers/misc/mic/host/mic_intr.c static int mic_setup_msix(struct mic_device *mdev, struct pci_dev *pdev)
mdev              190 drivers/misc/mic/host/mic_intr.c 	int entry_size = sizeof(*mdev->irq_info.msix_entries);
mdev              192 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.msix_entries = kmalloc_array(MIC_MIN_MSIX,
mdev              194 drivers/misc/mic/host/mic_intr.c 	if (!mdev->irq_info.msix_entries) {
mdev              200 drivers/misc/mic/host/mic_intr.c 		mdev->irq_info.msix_entries[i].entry = i;
mdev              202 drivers/misc/mic/host/mic_intr.c 	rc = pci_enable_msix_exact(pdev, mdev->irq_info.msix_entries,
mdev              209 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.num_vectors = MIC_MIN_MSIX;
mdev              210 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
mdev              211 drivers/misc/mic/host/mic_intr.c 		mdev->irq_info.num_vectors), GFP_KERNEL);
mdev              213 drivers/misc/mic/host/mic_intr.c 	if (!mdev->irq_info.mic_msi_map) {
mdev              218 drivers/misc/mic/host/mic_intr.c 	dev_dbg(&mdev->pdev->dev,
mdev              219 drivers/misc/mic/host/mic_intr.c 		"%d MSIx irqs setup\n", mdev->irq_info.num_vectors);
mdev              224 drivers/misc/mic/host/mic_intr.c 	kfree(mdev->irq_info.msix_entries);
mdev              226 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.num_vectors = 0;
mdev              236 drivers/misc/mic/host/mic_intr.c static int mic_setup_callbacks(struct mic_device *mdev)
mdev              240 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.cb_list = kmalloc_array(MIC_NUM_OFFSETS,
mdev              241 drivers/misc/mic/host/mic_intr.c 					       sizeof(*mdev->irq_info.cb_list),
mdev              243 drivers/misc/mic/host/mic_intr.c 	if (!mdev->irq_info.cb_list)
mdev              247 drivers/misc/mic/host/mic_intr.c 		INIT_LIST_HEAD(&mdev->irq_info.cb_list[i]);
mdev              248 drivers/misc/mic/host/mic_intr.c 	ida_init(&mdev->irq_info.cb_ida);
mdev              249 drivers/misc/mic/host/mic_intr.c 	spin_lock_init(&mdev->irq_info.mic_intr_lock);
mdev              250 drivers/misc/mic/host/mic_intr.c 	spin_lock_init(&mdev->irq_info.mic_thread_lock);
mdev              260 drivers/misc/mic/host/mic_intr.c static void mic_release_callbacks(struct mic_device *mdev)
mdev              267 drivers/misc/mic/host/mic_intr.c 	spin_lock(&mdev->irq_info.mic_thread_lock);
mdev              268 drivers/misc/mic/host/mic_intr.c 	spin_lock_irqsave(&mdev->irq_info.mic_intr_lock, flags);
mdev              270 drivers/misc/mic/host/mic_intr.c 		if (list_empty(&mdev->irq_info.cb_list[i]))
mdev              273 drivers/misc/mic/host/mic_intr.c 		list_for_each_safe(pos, tmp, &mdev->irq_info.cb_list[i]) {
mdev              276 drivers/misc/mic/host/mic_intr.c 			ida_simple_remove(&mdev->irq_info.cb_ida,
mdev              281 drivers/misc/mic/host/mic_intr.c 	spin_unlock_irqrestore(&mdev->irq_info.mic_intr_lock, flags);
mdev              282 drivers/misc/mic/host/mic_intr.c 	spin_unlock(&mdev->irq_info.mic_thread_lock);
mdev              283 drivers/misc/mic/host/mic_intr.c 	ida_destroy(&mdev->irq_info.cb_ida);
mdev              284 drivers/misc/mic/host/mic_intr.c 	kfree(mdev->irq_info.cb_list);
mdev              295 drivers/misc/mic/host/mic_intr.c static int mic_setup_msi(struct mic_device *mdev, struct pci_dev *pdev)
mdev              305 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.num_vectors = 1;
mdev              306 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.mic_msi_map = kzalloc((sizeof(u32) *
mdev              307 drivers/misc/mic/host/mic_intr.c 		mdev->irq_info.num_vectors), GFP_KERNEL);
mdev              309 drivers/misc/mic/host/mic_intr.c 	if (!mdev->irq_info.mic_msi_map) {
mdev              314 drivers/misc/mic/host/mic_intr.c 	rc = mic_setup_callbacks(mdev);
mdev              321 drivers/misc/mic/host/mic_intr.c 				  0, "mic-msi", mdev);
mdev              327 drivers/misc/mic/host/mic_intr.c 	dev_dbg(&pdev->dev, "%d MSI irqs setup\n", mdev->irq_info.num_vectors);
mdev              330 drivers/misc/mic/host/mic_intr.c 	mic_release_callbacks(mdev);
mdev              332 drivers/misc/mic/host/mic_intr.c 	kfree(mdev->irq_info.mic_msi_map);
mdev              335 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.num_vectors = 0;
mdev              347 drivers/misc/mic/host/mic_intr.c static int mic_setup_intx(struct mic_device *mdev, struct pci_dev *pdev)
mdev              353 drivers/misc/mic/host/mic_intr.c 	rc = mic_setup_callbacks(mdev);
mdev              360 drivers/misc/mic/host/mic_intr.c 				  IRQF_SHARED, "mic-intx", mdev);
mdev              367 drivers/misc/mic/host/mic_intr.c 	mic_release_callbacks(mdev);
mdev              381 drivers/misc/mic/host/mic_intr.c int mic_next_db(struct mic_device *mdev)
mdev              385 drivers/misc/mic/host/mic_intr.c 	next_db = mdev->irq_info.next_avail_src %
mdev              386 drivers/misc/mic/host/mic_intr.c 		mdev->intr_info->intr_len[MIC_INTR_DB];
mdev              387 drivers/misc/mic/host/mic_intr.c 	mdev->irq_info.next_avail_src++;
mdev              420 drivers/misc/mic/host/mic_intr.c mic_request_threaded_irq(struct mic_device *mdev,
mdev              431 drivers/misc/mic/host/mic_intr.c 	struct pci_dev *pdev = mdev->pdev;
mdev              433 drivers/misc/mic/host/mic_intr.c 	offset = mic_map_src_to_offset(mdev, intr_src, type);
mdev              435 drivers/misc/mic/host/mic_intr.c 		dev_err(&mdev->pdev->dev,
mdev              442 drivers/misc/mic/host/mic_intr.c 	if (mdev->irq_info.num_vectors > 1) {
mdev              443 drivers/misc/mic/host/mic_intr.c 		msix = mic_get_available_vector(mdev);
mdev              445 drivers/misc/mic/host/mic_intr.c 			dev_err(&mdev->pdev->dev,
mdev              454 drivers/misc/mic/host/mic_intr.c 			dev_dbg(&mdev->pdev->dev,
mdev              459 drivers/misc/mic/host/mic_intr.c 		mdev->irq_info.mic_msi_map[entry] |= BIT(offset);
mdev              460 drivers/misc/mic/host/mic_intr.c 		mdev->intr_ops->program_msi_to_src_map(mdev,
mdev              463 drivers/misc/mic/host/mic_intr.c 		dev_dbg(&mdev->pdev->dev, "irq: %d assigned for src: %d\n",
mdev              466 drivers/misc/mic/host/mic_intr.c 		intr_cb = mic_register_intr_callback(mdev, offset, handler,
mdev              469 drivers/misc/mic/host/mic_intr.c 			dev_err(&mdev->pdev->dev,
mdev              477 drivers/misc/mic/host/mic_intr.c 			mdev->irq_info.mic_msi_map[entry] |= (1 << offset);
mdev              478 drivers/misc/mic/host/mic_intr.c 			mdev->intr_ops->program_msi_to_src_map(mdev,
mdev              482 drivers/misc/mic/host/mic_intr.c 		dev_dbg(&mdev->pdev->dev, "callback %d registered for src: %d\n",
mdev              501 drivers/misc/mic/host/mic_intr.c void mic_free_irq(struct mic_device *mdev,
mdev              508 drivers/misc/mic/host/mic_intr.c 	struct pci_dev *pdev = mdev->pdev;
mdev              512 drivers/misc/mic/host/mic_intr.c 	if (mdev->irq_info.num_vectors > 1) {
mdev              513 drivers/misc/mic/host/mic_intr.c 		if (entry >= mdev->irq_info.num_vectors) {
mdev              514 drivers/misc/mic/host/mic_intr.c 			dev_warn(&mdev->pdev->dev,
mdev              516 drivers/misc/mic/host/mic_intr.c 				entry, mdev->irq_info.num_vectors);
mdev              519 drivers/misc/mic/host/mic_intr.c 		irq = mdev->irq_info.msix_entries[entry].vector;
mdev              521 drivers/misc/mic/host/mic_intr.c 		mdev->irq_info.mic_msi_map[entry] &= ~(BIT(offset));
mdev              522 drivers/misc/mic/host/mic_intr.c 		mdev->intr_ops->program_msi_to_src_map(mdev,
mdev              525 drivers/misc/mic/host/mic_intr.c 		dev_dbg(&mdev->pdev->dev, "irq: %d freed\n", irq);
mdev              528 drivers/misc/mic/host/mic_intr.c 		src_id = mic_unregister_intr_callback(mdev, offset);
mdev              530 drivers/misc/mic/host/mic_intr.c 			dev_warn(&mdev->pdev->dev, "Error unregistering callback\n");
mdev              534 drivers/misc/mic/host/mic_intr.c 			mdev->irq_info.mic_msi_map[entry] &= ~(BIT(src_id));
mdev              535 drivers/misc/mic/host/mic_intr.c 			mdev->intr_ops->program_msi_to_src_map(mdev,
mdev              538 drivers/misc/mic/host/mic_intr.c 		dev_dbg(&mdev->pdev->dev, "callback %d unregistered for src: %d\n",
mdev              551 drivers/misc/mic/host/mic_intr.c int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
mdev              555 drivers/misc/mic/host/mic_intr.c 	rc = mic_setup_msix(mdev, pdev);
mdev              559 drivers/misc/mic/host/mic_intr.c 	rc = mic_setup_msi(mdev, pdev);
mdev              563 drivers/misc/mic/host/mic_intr.c 	rc = mic_setup_intx(mdev, pdev);
mdev              565 drivers/misc/mic/host/mic_intr.c 		dev_err(&mdev->pdev->dev, "no usable interrupts\n");
mdev              569 drivers/misc/mic/host/mic_intr.c 	mdev->intr_ops->enable_interrupts(mdev);
mdev              581 drivers/misc/mic/host/mic_intr.c void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev)
mdev              585 drivers/misc/mic/host/mic_intr.c 	mdev->intr_ops->disable_interrupts(mdev);
mdev              586 drivers/misc/mic/host/mic_intr.c 	if (mdev->irq_info.num_vectors > 1) {
mdev              587 drivers/misc/mic/host/mic_intr.c 		for (i = 0; i < mdev->irq_info.num_vectors; i++) {
mdev              588 drivers/misc/mic/host/mic_intr.c 			if (mdev->irq_info.mic_msi_map[i])
mdev              590 drivers/misc/mic/host/mic_intr.c 					 mdev->irq_info.msix_entries[i].vector);
mdev              592 drivers/misc/mic/host/mic_intr.c 		kfree(mdev->irq_info.mic_msi_map);
mdev              593 drivers/misc/mic/host/mic_intr.c 		kfree(mdev->irq_info.msix_entries);
mdev              597 drivers/misc/mic/host/mic_intr.c 			free_irq(pdev->irq, mdev);
mdev              598 drivers/misc/mic/host/mic_intr.c 			kfree(mdev->irq_info.mic_msi_map);
mdev              601 drivers/misc/mic/host/mic_intr.c 			free_irq(pdev->irq, mdev);
mdev              603 drivers/misc/mic/host/mic_intr.c 		mic_release_callbacks(mdev);
mdev              618 drivers/misc/mic/host/mic_intr.c void mic_intr_restore(struct mic_device *mdev)
mdev              621 drivers/misc/mic/host/mic_intr.c 	struct pci_dev *pdev = mdev->pdev;
mdev              626 drivers/misc/mic/host/mic_intr.c 	for (entry = 0; entry < mdev->irq_info.num_vectors; entry++) {
mdev              628 drivers/misc/mic/host/mic_intr.c 			if (mdev->irq_info.mic_msi_map[entry] & BIT(offset))
mdev              629 drivers/misc/mic/host/mic_intr.c 				mdev->intr_ops->program_msi_to_src_map(mdev,
mdev              117 drivers/misc/mic/host/mic_intr.h 	void (*intr_init)(struct mic_device *mdev);
mdev              118 drivers/misc/mic/host/mic_intr.h 	void (*enable_interrupts)(struct mic_device *mdev);
mdev              119 drivers/misc/mic/host/mic_intr.h 	void (*disable_interrupts)(struct mic_device *mdev);
mdev              120 drivers/misc/mic/host/mic_intr.h 	void (*program_msi_to_src_map) (struct mic_device *mdev,
mdev              122 drivers/misc/mic/host/mic_intr.h 	u32 (*read_msi_to_src_map) (struct mic_device *mdev,
mdev              126 drivers/misc/mic/host/mic_intr.h int mic_next_db(struct mic_device *mdev);
mdev              128 drivers/misc/mic/host/mic_intr.h mic_request_threaded_irq(struct mic_device *mdev,
mdev              132 drivers/misc/mic/host/mic_intr.h void mic_free_irq(struct mic_device *mdev,
mdev              134 drivers/misc/mic/host/mic_intr.h int mic_setup_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
mdev              135 drivers/misc/mic/host/mic_intr.h void mic_free_interrupts(struct mic_device *mdev, struct pci_dev *pdev);
mdev              136 drivers/misc/mic/host/mic_intr.h void mic_intr_restore(struct mic_device *mdev);
mdev               49 drivers/misc/mic/host/mic_main.c static int mic_dp_init(struct mic_device *mdev)
mdev               51 drivers/misc/mic/host/mic_main.c 	mdev->dp = kzalloc(MIC_DP_SIZE, GFP_KERNEL);
mdev               52 drivers/misc/mic/host/mic_main.c 	if (!mdev->dp)
mdev               55 drivers/misc/mic/host/mic_main.c 	mdev->dp_dma_addr = mic_map_single(mdev,
mdev               56 drivers/misc/mic/host/mic_main.c 		mdev->dp, MIC_DP_SIZE);
mdev               57 drivers/misc/mic/host/mic_main.c 	if (mic_map_error(mdev->dp_dma_addr)) {
mdev               58 drivers/misc/mic/host/mic_main.c 		kfree(mdev->dp);
mdev               59 drivers/misc/mic/host/mic_main.c 		dev_err(&mdev->pdev->dev, "%s %d err %d\n",
mdev               63 drivers/misc/mic/host/mic_main.c 	mdev->ops->write_spad(mdev, MIC_DPLO_SPAD, mdev->dp_dma_addr);
mdev               64 drivers/misc/mic/host/mic_main.c 	mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
mdev               69 drivers/misc/mic/host/mic_main.c static void mic_dp_uninit(struct mic_device *mdev)
mdev               71 drivers/misc/mic/host/mic_main.c 	mic_unmap_single(mdev, mdev->dp_dma_addr, MIC_DP_SIZE);
mdev               72 drivers/misc/mic/host/mic_main.c 	kfree(mdev->dp);
mdev               82 drivers/misc/mic/host/mic_main.c static void mic_ops_init(struct mic_device *mdev)
mdev               84 drivers/misc/mic/host/mic_main.c 	switch (mdev->family) {
mdev               86 drivers/misc/mic/host/mic_main.c 		mdev->ops = &mic_x100_ops;
mdev               87 drivers/misc/mic/host/mic_main.c 		mdev->intr_ops = &mic_x100_intr_ops;
mdev               88 drivers/misc/mic/host/mic_main.c 		mdev->smpt_ops = &mic_x100_smpt_ops;
mdev              140 drivers/misc/mic/host/mic_main.c mic_device_init(struct mic_device *mdev, struct pci_dev *pdev)
mdev              142 drivers/misc/mic/host/mic_main.c 	mdev->pdev = pdev;
mdev              143 drivers/misc/mic/host/mic_main.c 	mdev->family = mic_get_family(pdev);
mdev              144 drivers/misc/mic/host/mic_main.c 	mdev->stepping = pdev->revision;
mdev              145 drivers/misc/mic/host/mic_main.c 	mic_ops_init(mdev);
mdev              146 drivers/misc/mic/host/mic_main.c 	mutex_init(&mdev->mic_mutex);
mdev              147 drivers/misc/mic/host/mic_main.c 	mdev->irq_info.next_avail_src = 0;
mdev              162 drivers/misc/mic/host/mic_main.c 	struct mic_device *mdev;
mdev              164 drivers/misc/mic/host/mic_main.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              165 drivers/misc/mic/host/mic_main.c 	if (!mdev) {
mdev              170 drivers/misc/mic/host/mic_main.c 	mdev->id = ida_simple_get(&g_mic_ida, 0, MIC_MAX_NUM_DEVS, GFP_KERNEL);
mdev              171 drivers/misc/mic/host/mic_main.c 	if (mdev->id < 0) {
mdev              172 drivers/misc/mic/host/mic_main.c 		rc = mdev->id;
mdev              177 drivers/misc/mic/host/mic_main.c 	mic_device_init(mdev, pdev);
mdev              199 drivers/misc/mic/host/mic_main.c 	mdev->mmio.pa = pci_resource_start(pdev, mdev->ops->mmio_bar);
mdev              200 drivers/misc/mic/host/mic_main.c 	mdev->mmio.len = pci_resource_len(pdev, mdev->ops->mmio_bar);
mdev              201 drivers/misc/mic/host/mic_main.c 	mdev->mmio.va = pci_ioremap_bar(pdev, mdev->ops->mmio_bar);
mdev              202 drivers/misc/mic/host/mic_main.c 	if (!mdev->mmio.va) {
mdev              208 drivers/misc/mic/host/mic_main.c 	mdev->aper.pa = pci_resource_start(pdev, mdev->ops->aper_bar);
mdev              209 drivers/misc/mic/host/mic_main.c 	mdev->aper.len = pci_resource_len(pdev, mdev->ops->aper_bar);
mdev              210 drivers/misc/mic/host/mic_main.c 	mdev->aper.va = ioremap_wc(mdev->aper.pa, mdev->aper.len);
mdev              211 drivers/misc/mic/host/mic_main.c 	if (!mdev->aper.va) {
mdev              217 drivers/misc/mic/host/mic_main.c 	mdev->intr_ops->intr_init(mdev);
mdev              218 drivers/misc/mic/host/mic_main.c 	rc = mic_setup_interrupts(mdev, pdev);
mdev              223 drivers/misc/mic/host/mic_main.c 	rc = mic_smpt_init(mdev);
mdev              229 drivers/misc/mic/host/mic_main.c 	pci_set_drvdata(pdev, mdev);
mdev              231 drivers/misc/mic/host/mic_main.c 	rc = mic_dp_init(mdev);
mdev              236 drivers/misc/mic/host/mic_main.c 	mic_bootparam_init(mdev);
mdev              237 drivers/misc/mic/host/mic_main.c 	mic_create_debug_dir(mdev);
mdev              239 drivers/misc/mic/host/mic_main.c 	mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops);
mdev              240 drivers/misc/mic/host/mic_main.c 	if (IS_ERR(mdev->cosm_dev)) {
mdev              241 drivers/misc/mic/host/mic_main.c 		rc = PTR_ERR(mdev->cosm_dev);
mdev              247 drivers/misc/mic/host/mic_main.c 	mic_delete_debug_dir(mdev);
mdev              248 drivers/misc/mic/host/mic_main.c 	mic_dp_uninit(mdev);
mdev              250 drivers/misc/mic/host/mic_main.c 	mic_smpt_uninit(mdev);
mdev              252 drivers/misc/mic/host/mic_main.c 	mic_free_interrupts(mdev, pdev);
mdev              254 drivers/misc/mic/host/mic_main.c 	iounmap(mdev->aper.va);
mdev              256 drivers/misc/mic/host/mic_main.c 	iounmap(mdev->mmio.va);
mdev              262 drivers/misc/mic/host/mic_main.c 	ida_simple_remove(&g_mic_ida, mdev->id);
mdev              264 drivers/misc/mic/host/mic_main.c 	kfree(mdev);
mdev              279 drivers/misc/mic/host/mic_main.c 	struct mic_device *mdev;
mdev              281 drivers/misc/mic/host/mic_main.c 	mdev = pci_get_drvdata(pdev);
mdev              282 drivers/misc/mic/host/mic_main.c 	if (!mdev)
mdev              285 drivers/misc/mic/host/mic_main.c 	cosm_unregister_device(mdev->cosm_dev);
mdev              286 drivers/misc/mic/host/mic_main.c 	mic_delete_debug_dir(mdev);
mdev              287 drivers/misc/mic/host/mic_main.c 	mic_dp_uninit(mdev);
mdev              288 drivers/misc/mic/host/mic_main.c 	mic_smpt_uninit(mdev);
mdev              289 drivers/misc/mic/host/mic_main.c 	mic_free_interrupts(mdev, pdev);
mdev              290 drivers/misc/mic/host/mic_main.c 	iounmap(mdev->aper.va);
mdev              291 drivers/misc/mic/host/mic_main.c 	iounmap(mdev->mmio.va);
mdev              294 drivers/misc/mic/host/mic_main.c 	ida_simple_remove(&g_mic_ida, mdev->id);
mdev              295 drivers/misc/mic/host/mic_main.c 	kfree(mdev);
mdev               15 drivers/misc/mic/host/mic_smpt.c static inline u64 mic_system_page_mask(struct mic_device *mdev)
mdev               17 drivers/misc/mic/host/mic_smpt.c 	return (1ULL << mdev->smpt->info.page_shift) - 1ULL;
mdev               20 drivers/misc/mic/host/mic_smpt.c static inline u8 mic_sys_addr_to_smpt(struct mic_device *mdev, dma_addr_t pa)
mdev               22 drivers/misc/mic/host/mic_smpt.c 	return (pa - mdev->smpt->info.base) >> mdev->smpt->info.page_shift;
mdev               25 drivers/misc/mic/host/mic_smpt.c static inline u64 mic_smpt_to_pa(struct mic_device *mdev, u8 index)
mdev               27 drivers/misc/mic/host/mic_smpt.c 	return mdev->smpt->info.base + (index * mdev->smpt->info.page_size);
mdev               30 drivers/misc/mic/host/mic_smpt.c static inline u64 mic_smpt_offset(struct mic_device *mdev, dma_addr_t pa)
mdev               32 drivers/misc/mic/host/mic_smpt.c 	return pa & mic_system_page_mask(mdev);
mdev               35 drivers/misc/mic/host/mic_smpt.c static inline u64 mic_smpt_align_low(struct mic_device *mdev, dma_addr_t pa)
mdev               37 drivers/misc/mic/host/mic_smpt.c 	return ALIGN(pa - mic_system_page_mask(mdev),
mdev               38 drivers/misc/mic/host/mic_smpt.c 		mdev->smpt->info.page_size);
mdev               41 drivers/misc/mic/host/mic_smpt.c static inline u64 mic_smpt_align_high(struct mic_device *mdev, dma_addr_t pa)
mdev               43 drivers/misc/mic/host/mic_smpt.c 	return ALIGN(pa, mdev->smpt->info.page_size);
mdev               47 drivers/misc/mic/host/mic_smpt.c static inline u64 mic_max_system_memory(struct mic_device *mdev)
mdev               49 drivers/misc/mic/host/mic_smpt.c 	return mdev->smpt->info.num_reg * mdev->smpt->info.page_size;
mdev               53 drivers/misc/mic/host/mic_smpt.c static inline u64 mic_max_system_addr(struct mic_device *mdev)
mdev               55 drivers/misc/mic/host/mic_smpt.c 	return mdev->smpt->info.base + mic_max_system_memory(mdev) - 1ULL;
mdev               60 drivers/misc/mic/host/mic_smpt.c mic_is_system_addr(struct mic_device *mdev, dma_addr_t pa)
mdev               62 drivers/misc/mic/host/mic_smpt.c 	return pa >= mdev->smpt->info.base && pa <= mic_max_system_addr(mdev);
mdev               67 drivers/misc/mic/host/mic_smpt.c 			       int entries, struct mic_device *mdev)
mdev               69 drivers/misc/mic/host/mic_smpt.c 	struct mic_smpt_info *smpt_info = mdev->smpt;
mdev               76 drivers/misc/mic/host/mic_smpt.c 			mdev->smpt_ops->set(mdev, addr, i);
mdev               87 drivers/misc/mic/host/mic_smpt.c static dma_addr_t mic_smpt_op(struct mic_device *mdev, u64 dma_addr,
mdev               96 drivers/misc/mic/host/mic_smpt.c 	struct mic_smpt_info *smpt_info = mdev->smpt;
mdev              125 drivers/misc/mic/host/mic_smpt.c 	mic_addr = mic_smpt_to_pa(mdev, spt);
mdev              126 drivers/misc/mic/host/mic_smpt.c 	mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev);
mdev              138 drivers/misc/mic/host/mic_smpt.c static int mic_get_smpt_ref_count(struct mic_device *mdev, dma_addr_t dma_addr,
mdev              146 drivers/misc/mic/host/mic_smpt.c 		ref[i++] = min(mic_smpt_align_high(mdev, start + 1),
mdev              148 drivers/misc/mic/host/mic_smpt.c 		start = mic_smpt_align_high(mdev, start + 1);
mdev              152 drivers/misc/mic/host/mic_smpt.c 		*smpt_start = mic_smpt_align_low(mdev, dma_addr);
mdev              165 drivers/misc/mic/host/mic_smpt.c dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr)
mdev              167 drivers/misc/mic/host/mic_smpt.c 	struct mic_smpt_info *smpt_info = mdev->smpt;
mdev              171 drivers/misc/mic/host/mic_smpt.c 	if (!mic_is_system_addr(mdev, mic_addr)) {
mdev              172 drivers/misc/mic/host/mic_smpt.c 		dev_err(&mdev->pdev->dev,
mdev              176 drivers/misc/mic/host/mic_smpt.c 	spt = mic_sys_addr_to_smpt(mdev, mic_addr);
mdev              178 drivers/misc/mic/host/mic_smpt.c 		mic_smpt_offset(mdev, mic_addr);
mdev              194 drivers/misc/mic/host/mic_smpt.c dma_addr_t mic_map(struct mic_device *mdev, dma_addr_t dma_addr, size_t size)
mdev              201 drivers/misc/mic/host/mic_smpt.c 	if (!size || size > mic_max_system_memory(mdev))
mdev              204 drivers/misc/mic/host/mic_smpt.c 	ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
mdev              208 drivers/misc/mic/host/mic_smpt.c 	num_entries = mic_get_smpt_ref_count(mdev, dma_addr, size,
mdev              212 drivers/misc/mic/host/mic_smpt.c 	mic_addr = mic_smpt_op(mdev, smpt_start, num_entries, ref, size);
mdev              221 drivers/misc/mic/host/mic_smpt.c 	if (!mic_addr && MIC_FAMILY_X100 == mdev->family) {
mdev              222 drivers/misc/mic/host/mic_smpt.c 		dev_err(&mdev->pdev->dev,
mdev              227 drivers/misc/mic/host/mic_smpt.c 		return mic_addr + mic_smpt_offset(mdev, dma_addr);
mdev              242 drivers/misc/mic/host/mic_smpt.c void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
mdev              244 drivers/misc/mic/host/mic_smpt.c 	struct mic_smpt_info *smpt_info = mdev->smpt;
mdev              254 drivers/misc/mic/host/mic_smpt.c 	if (!mic_is_system_addr(mdev, mic_addr)) {
mdev              255 drivers/misc/mic/host/mic_smpt.c 		dev_err(&mdev->pdev->dev,
mdev              260 drivers/misc/mic/host/mic_smpt.c 	spt = mic_sys_addr_to_smpt(mdev, mic_addr);
mdev              261 drivers/misc/mic/host/mic_smpt.c 	ref = kmalloc_array(mdev->smpt->info.num_reg, sizeof(s64), GFP_ATOMIC);
mdev              266 drivers/misc/mic/host/mic_smpt.c 	num_smpt = mic_get_smpt_ref_count(mdev, mic_addr, size, ref, NULL);
mdev              275 drivers/misc/mic/host/mic_smpt.c 			dev_warn(&mdev->pdev->dev,
mdev              295 drivers/misc/mic/host/mic_smpt.c dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size)
mdev              298 drivers/misc/mic/host/mic_smpt.c 	struct pci_dev *pdev = mdev->pdev;
mdev              303 drivers/misc/mic/host/mic_smpt.c 		mic_addr = mic_map(mdev, dma_addr, size);
mdev              305 drivers/misc/mic/host/mic_smpt.c 			dev_err(&mdev->pdev->dev,
mdev              327 drivers/misc/mic/host/mic_smpt.c mic_unmap_single(struct mic_device *mdev, dma_addr_t mic_addr, size_t size)
mdev              329 drivers/misc/mic/host/mic_smpt.c 	struct pci_dev *pdev = mdev->pdev;
mdev              330 drivers/misc/mic/host/mic_smpt.c 	dma_addr_t dma_addr = mic_to_dma_addr(mdev, mic_addr);
mdev              331 drivers/misc/mic/host/mic_smpt.c 	mic_unmap(mdev, mic_addr, size);
mdev              342 drivers/misc/mic/host/mic_smpt.c int mic_smpt_init(struct mic_device *mdev)
mdev              348 drivers/misc/mic/host/mic_smpt.c 	mdev->smpt = kmalloc(sizeof(*mdev->smpt), GFP_KERNEL);
mdev              349 drivers/misc/mic/host/mic_smpt.c 	if (!mdev->smpt)
mdev              352 drivers/misc/mic/host/mic_smpt.c 	smpt_info = mdev->smpt;
mdev              353 drivers/misc/mic/host/mic_smpt.c 	mdev->smpt_ops->init(mdev);
mdev              365 drivers/misc/mic/host/mic_smpt.c 		mdev->smpt_ops->set(mdev, dma_addr, i);
mdev              383 drivers/misc/mic/host/mic_smpt.c void mic_smpt_uninit(struct mic_device *mdev)
mdev              385 drivers/misc/mic/host/mic_smpt.c 	struct mic_smpt_info *smpt_info = mdev->smpt;
mdev              388 drivers/misc/mic/host/mic_smpt.c 	dev_dbg(&mdev->pdev->dev,
mdev              390 drivers/misc/mic/host/mic_smpt.c 		mdev->id, smpt_info->ref_count,
mdev              394 drivers/misc/mic/host/mic_smpt.c 		dev_dbg(&mdev->pdev->dev,
mdev              399 drivers/misc/mic/host/mic_smpt.c 			dev_warn(&mdev->pdev->dev,
mdev              418 drivers/misc/mic/host/mic_smpt.c void mic_smpt_restore(struct mic_device *mdev)
mdev              423 drivers/misc/mic/host/mic_smpt.c 	for (i = 0; i < mdev->smpt->info.num_reg; i++) {
mdev              424 drivers/misc/mic/host/mic_smpt.c 		dma_addr = mdev->smpt->entry[i].dma_addr;
mdev              425 drivers/misc/mic/host/mic_smpt.c 		mdev->smpt_ops->set(mdev, dma_addr, i);
mdev               17 drivers/misc/mic/host/mic_smpt.h 	void (*init)(struct mic_device *mdev);
mdev               18 drivers/misc/mic/host/mic_smpt.h 	void (*set)(struct mic_device *mdev, dma_addr_t dma_addr, u8 index);
mdev               63 drivers/misc/mic/host/mic_smpt.h dma_addr_t mic_map_single(struct mic_device *mdev, void *va, size_t size);
mdev               64 drivers/misc/mic/host/mic_smpt.h void mic_unmap_single(struct mic_device *mdev,
mdev               66 drivers/misc/mic/host/mic_smpt.h dma_addr_t mic_map(struct mic_device *mdev,
mdev               68 drivers/misc/mic/host/mic_smpt.h void mic_unmap(struct mic_device *mdev, dma_addr_t mic_addr, size_t size);
mdev               69 drivers/misc/mic/host/mic_smpt.h dma_addr_t mic_to_dma_addr(struct mic_device *mdev, dma_addr_t mic_addr);
mdev               83 drivers/misc/mic/host/mic_smpt.h int mic_smpt_init(struct mic_device *mdev);
mdev               84 drivers/misc/mic/host/mic_smpt.h void mic_smpt_uninit(struct mic_device *mdev);
mdev               85 drivers/misc/mic/host/mic_smpt.h void mic_smpt_restore(struct mic_device *mdev);
mdev               32 drivers/misc/mic/host/mic_x100.c mic_x100_write_spad(struct mic_device *mdev, unsigned int idx, u32 val)
mdev               34 drivers/misc/mic/host/mic_x100.c 	dev_dbg(&mdev->pdev->dev, "Writing 0x%x to scratch pad index %d\n",
mdev               36 drivers/misc/mic/host/mic_x100.c 	mic_mmio_write(&mdev->mmio, val,
mdev               51 drivers/misc/mic/host/mic_x100.c mic_x100_read_spad(struct mic_device *mdev, unsigned int idx)
mdev               53 drivers/misc/mic/host/mic_x100.c 	u32 val = mic_mmio_read(&mdev->mmio,
mdev               57 drivers/misc/mic/host/mic_x100.c 	dev_dbg(&mdev->pdev->dev,
mdev               66 drivers/misc/mic/host/mic_x100.c static void mic_x100_enable_interrupts(struct mic_device *mdev)
mdev               69 drivers/misc/mic/host/mic_x100.c 	struct mic_mw *mw = &mdev->mmio;
mdev               81 drivers/misc/mic/host/mic_x100.c 	if (mdev->irq_info.num_vectors > 1) {
mdev               93 drivers/misc/mic/host/mic_x100.c static void mic_x100_disable_interrupts(struct mic_device *mdev)
mdev               96 drivers/misc/mic/host/mic_x100.c 	struct mic_mw *mw = &mdev->mmio;
mdev              104 drivers/misc/mic/host/mic_x100.c 	if (mdev->irq_info.num_vectors > 1) {
mdev              116 drivers/misc/mic/host/mic_x100.c static void mic_x100_send_sbox_intr(struct mic_device *mdev,
mdev              119 drivers/misc/mic/host/mic_x100.c 	struct mic_mw *mw = &mdev->mmio;
mdev              137 drivers/misc/mic/host/mic_x100.c static void mic_x100_send_rdmasr_intr(struct mic_device *mdev,
mdev              143 drivers/misc/mic/host/mic_x100.c 	mic_mmio_write(&mdev->mmio, 0,
mdev              152 drivers/misc/mic/host/mic_x100.c static void mic_x100_send_intr(struct mic_device *mdev, int doorbell)
mdev              156 drivers/misc/mic/host/mic_x100.c 		mic_x100_send_sbox_intr(mdev, doorbell);
mdev              159 drivers/misc/mic/host/mic_x100.c 		mic_x100_send_rdmasr_intr(mdev, rdmasr_db);
mdev              170 drivers/misc/mic/host/mic_x100.c static u32 mic_x100_ack_interrupt(struct mic_device *mdev)
mdev              173 drivers/misc/mic/host/mic_x100.c 	u32 reg = mic_mmio_read(&mdev->mmio, sicr0);
mdev              174 drivers/misc/mic/host/mic_x100.c 	mic_mmio_write(&mdev->mmio, reg, sicr0);
mdev              185 drivers/misc/mic/host/mic_x100.c static void mic_x100_intr_workarounds(struct mic_device *mdev)
mdev              187 drivers/misc/mic/host/mic_x100.c 	struct mic_mw *mw = &mdev->mmio;
mdev              190 drivers/misc/mic/host/mic_x100.c 	if (MIC_A0_STEP == mdev->stepping)
mdev              194 drivers/misc/mic/host/mic_x100.c 	if (mdev->stepping >= MIC_B0_STEP)
mdev              195 drivers/misc/mic/host/mic_x100.c 		mdev->intr_ops->enable_interrupts(mdev);
mdev              203 drivers/misc/mic/host/mic_x100.c static void mic_x100_hw_intr_init(struct mic_device *mdev)
mdev              205 drivers/misc/mic/host/mic_x100.c 	mdev->intr_info = (struct mic_intr_info *)mic_x100_intr_init;
mdev              218 drivers/misc/mic/host/mic_x100.c mic_x100_read_msi_to_src_map(struct mic_device *mdev, int idx)
mdev              220 drivers/misc/mic/host/mic_x100.c 	return mic_mmio_read(&mdev->mmio,
mdev              236 drivers/misc/mic/host/mic_x100.c mic_x100_program_msi_to_src_map(struct mic_device *mdev,
mdev              240 drivers/misc/mic/host/mic_x100.c 	struct mic_mw *mw = &mdev->mmio;
mdev              256 drivers/misc/mic/host/mic_x100.c static void mic_x100_reset_fw_ready(struct mic_device *mdev)
mdev              258 drivers/misc/mic/host/mic_x100.c 	mdev->ops->write_spad(mdev, MIC_X100_DOWNLOAD_INFO, 0);
mdev              265 drivers/misc/mic/host/mic_x100.c static bool mic_x100_is_fw_ready(struct mic_device *mdev)
mdev              267 drivers/misc/mic/host/mic_x100.c 	u32 scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
mdev              275 drivers/misc/mic/host/mic_x100.c static u32 mic_x100_get_apic_id(struct mic_device *mdev)
mdev              279 drivers/misc/mic/host/mic_x100.c 	scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
mdev              287 drivers/misc/mic/host/mic_x100.c static void mic_x100_send_firmware_intr(struct mic_device *mdev)
mdev              292 drivers/misc/mic/host/mic_x100.c 	struct mic_mw *mw = &mdev->mmio;
mdev              300 drivers/misc/mic/host/mic_x100.c 	mic_mmio_write(mw, mic_x100_get_apic_id(mdev),
mdev              313 drivers/misc/mic/host/mic_x100.c static void mic_x100_hw_reset(struct mic_device *mdev)
mdev              317 drivers/misc/mic/host/mic_x100.c 	struct mic_mw *mw = &mdev->mmio;
mdev              340 drivers/misc/mic/host/mic_x100.c mic_x100_load_command_line(struct mic_device *mdev, const struct firmware *fw)
mdev              345 drivers/misc/mic/host/mic_x100.c 	void __iomem *cmd_line_va = mdev->aper.va + mdev->bootaddr + fw->size;
mdev              348 drivers/misc/mic/host/mic_x100.c 	boot_mem = mdev->aper.len >> 20;
mdev              355 drivers/misc/mic/host/mic_x100.c 	if (mdev->cosm_dev->cmdline)
mdev              357 drivers/misc/mic/host/mic_x100.c 			 mdev->cosm_dev->cmdline);
mdev              370 drivers/misc/mic/host/mic_x100.c mic_x100_load_ramdisk(struct mic_device *mdev)
mdev              374 drivers/misc/mic/host/mic_x100.c 	struct boot_params __iomem *bp = mdev->aper.va + mdev->bootaddr;
mdev              376 drivers/misc/mic/host/mic_x100.c 	rc = request_firmware(&fw, mdev->cosm_dev->ramdisk, &mdev->pdev->dev);
mdev              378 drivers/misc/mic/host/mic_x100.c 		dev_err(&mdev->pdev->dev,
mdev              380 drivers/misc/mic/host/mic_x100.c 			rc, mdev->cosm_dev->ramdisk);
mdev              387 drivers/misc/mic/host/mic_x100.c 	memcpy_toio(mdev->aper.va + (mdev->bootaddr << 1), fw->data, fw->size);
mdev              388 drivers/misc/mic/host/mic_x100.c 	iowrite32(mdev->bootaddr << 1, &bp->hdr.ramdisk_image);
mdev              405 drivers/misc/mic/host/mic_x100.c mic_x100_get_boot_addr(struct mic_device *mdev)
mdev              410 drivers/misc/mic/host/mic_x100.c 	scratch2 = mdev->ops->read_spad(mdev, MIC_X100_DOWNLOAD_INFO);
mdev              412 drivers/misc/mic/host/mic_x100.c 	dev_dbg(&mdev->pdev->dev, "%s %d boot_addr 0x%x\n",
mdev              415 drivers/misc/mic/host/mic_x100.c 		dev_err(&mdev->pdev->dev,
mdev              421 drivers/misc/mic/host/mic_x100.c 	mdev->bootaddr = boot_addr;
mdev              434 drivers/misc/mic/host/mic_x100.c mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
mdev              439 drivers/misc/mic/host/mic_x100.c 	rc = mic_x100_get_boot_addr(mdev);
mdev              443 drivers/misc/mic/host/mic_x100.c 	rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev);
mdev              445 drivers/misc/mic/host/mic_x100.c 		dev_err(&mdev->pdev->dev,
mdev              447 drivers/misc/mic/host/mic_x100.c 			rc, mdev->cosm_dev->firmware);
mdev              450 drivers/misc/mic/host/mic_x100.c 	if (mdev->bootaddr > mdev->aper.len - fw->size) {
mdev              452 drivers/misc/mic/host/mic_x100.c 		dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n",
mdev              453 drivers/misc/mic/host/mic_x100.c 			__func__, __LINE__, rc, mdev->bootaddr);
mdev              456 drivers/misc/mic/host/mic_x100.c 	memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
mdev              457 drivers/misc/mic/host/mic_x100.c 	mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
mdev              458 drivers/misc/mic/host/mic_x100.c 	if (!strcmp(mdev->cosm_dev->bootmode, "flash")) {
mdev              460 drivers/misc/mic/host/mic_x100.c 		dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
mdev              465 drivers/misc/mic/host/mic_x100.c 	rc = mic_x100_load_command_line(mdev, fw);
mdev              467 drivers/misc/mic/host/mic_x100.c 		dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
mdev              473 drivers/misc/mic/host/mic_x100.c 	if (mdev->cosm_dev->ramdisk)
mdev              474 drivers/misc/mic/host/mic_x100.c 		rc = mic_x100_load_ramdisk(mdev);
mdev              489 drivers/misc/mic/host/mic_x100.c static u32 mic_x100_get_postcode(struct mic_device *mdev)
mdev              491 drivers/misc/mic/host/mic_x100.c 	return mic_mmio_read(&mdev->mmio, MIC_X100_POSTCODE);
mdev              501 drivers/misc/mic/host/mic_x100.c mic_x100_smpt_set(struct mic_device *mdev, dma_addr_t dma_addr, u8 index)
mdev              515 drivers/misc/mic/host/mic_x100.c 			dma_addr >> mdev->smpt->info.page_shift);
mdev              516 drivers/misc/mic/host/mic_x100.c 	mic_mmio_write(&mdev->mmio, smpt_reg_val,
mdev              527 drivers/misc/mic/host/mic_x100.c static void mic_x100_smpt_hw_init(struct mic_device *mdev)
mdev              529 drivers/misc/mic/host/mic_x100.c 	struct mic_smpt_hw_info *info = &mdev->smpt->info;
mdev               66 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev              162 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device, "SCIFAPI close: ep %p %s\n",
mdev              179 drivers/misc/mic/scif/scif_api.c 		dev_err(scif_info.mdev.this_device,
mdev              334 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev              376 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev              389 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev              597 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device, "SCIFAPI connect: ep %p %s\n", ep,
mdev              747 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev              813 drivers/misc/mic/scif/scif_api.c 		dev_err(scif_info.mdev.this_device,
mdev             1103 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1164 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1221 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1263 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1309 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1437 drivers/misc/mic/scif/scif_api.c 	dev_dbg(scif_info.mdev.this_device,
mdev              173 drivers/misc/mic/scif/scif_dma.c 		dev_info(scif_info.mdev.this_device,
mdev              782 drivers/misc/mic/scif/scif_dma.c 	dev_err(scif_info.mdev.this_device,
mdev             1077 drivers/misc/mic/scif/scif_dma.c 	dev_err(scif_info.mdev.this_device,
mdev             1195 drivers/misc/mic/scif/scif_dma.c 	dev_err(scif_info.mdev.this_device,
mdev             1429 drivers/misc/mic/scif/scif_dma.c 	dev_err(scif_info.mdev.this_device,
mdev             1815 drivers/misc/mic/scif/scif_dma.c 		dev_err(scif_info.mdev.this_device,
mdev             1828 drivers/misc/mic/scif/scif_dma.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1856 drivers/misc/mic/scif/scif_dma.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1884 drivers/misc/mic/scif/scif_dma.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1915 drivers/misc/mic/scif/scif_dma.c 	dev_dbg(scif_info.mdev.this_device,
mdev               69 drivers/misc/mic/scif/scif_fd.c 		dev_dbg(scif_info.mdev.this_device, "%s err %d\n", str, err);
mdev              315 drivers/misc/mic/scif/scif_fence.c 		dev_err(scif_info.mdev.this_device,
mdev              638 drivers/misc/mic/scif/scif_fence.c 	dev_dbg(scif_info.mdev.this_device,
mdev              672 drivers/misc/mic/scif/scif_fence.c 		dev_err(scif_info.mdev.this_device,
mdev              674 drivers/misc/mic/scif/scif_fence.c 	dev_dbg(scif_info.mdev.this_device,
mdev              686 drivers/misc/mic/scif/scif_fence.c 	dev_dbg(scif_info.mdev.this_device,
mdev              708 drivers/misc/mic/scif/scif_fence.c 		dev_err(scif_info.mdev.this_device,
mdev              720 drivers/misc/mic/scif/scif_fence.c 	dev_dbg(scif_info.mdev.this_device,
mdev              771 drivers/misc/mic/scif/scif_fence.c 		dev_err(scif_info.mdev.this_device,
mdev               20 drivers/misc/mic/scif/scif_main.c 	.mdev = {
mdev              310 drivers/misc/mic/scif/scif_main.c 	struct miscdevice *mdev = &scif_info.mdev;
mdev              321 drivers/misc/mic/scif/scif_main.c 	rc = misc_register(mdev);
mdev              338 drivers/misc/mic/scif/scif_main.c 	misc_deregister(&scif_info.mdev);
mdev               98 drivers/misc/mic/scif/scif_main.h 	struct miscdevice mdev;
mdev               79 drivers/misc/mic/scif/scif_mmap.c 		dev_dbg(scif_info.mdev.this_device,
mdev              132 drivers/misc/mic/scif/scif_mmap.c 			dev_err(scif_info.mdev.this_device,
mdev              220 drivers/misc/mic/scif/scif_mmap.c 	dev_dbg(scif_info.mdev.this_device,
mdev              436 drivers/misc/mic/scif/scif_mmap.c 		dev_err(scif_info.mdev.this_device,
mdev              528 drivers/misc/mic/scif/scif_mmap.c 	dev_dbg(scif_info.mdev.this_device,
mdev              553 drivers/misc/mic/scif/scif_mmap.c 	dev_dbg(scif_info.mdev.this_device,
mdev              559 drivers/misc/mic/scif/scif_mmap.c 	dev_dbg(scif_info.mdev.this_device,
mdev              573 drivers/misc/mic/scif/scif_mmap.c 		dev_err(scif_info.mdev.this_device,
mdev              612 drivers/misc/mic/scif/scif_mmap.c 	dev_dbg(scif_info.mdev.this_device,
mdev              288 drivers/misc/mic/scif/scif_rma.c 		dev_err(scif_info.mdev.this_device,
mdev              324 drivers/misc/mic/scif/scif_rma.c 		dev_err(scif_info.mdev.this_device,
mdev              684 drivers/misc/mic/scif/scif_rma.c 			dev_err(scif_info.mdev.this_device,
mdev             1419 drivers/misc/mic/scif/scif_rma.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1440 drivers/misc/mic/scif/scif_rma.c 		dev_err(scif_info.mdev.this_device,
mdev             1579 drivers/misc/mic/scif/scif_rma.c 	dev_dbg(scif_info.mdev.this_device,
mdev             1701 drivers/misc/mic/scif/scif_rma.c 	dev_dbg(scif_info.mdev.this_device,
mdev              182 drivers/misc/mic/scif/scif_rma_list.c 	dev_err(scif_info.mdev.this_device,
mdev              264 drivers/misc/mic/scif/scif_rma_list.c 			dev_err(scif_info.mdev.this_device,
mdev             1119 drivers/misc/mic/vop/vop_vringh.c 	struct miscdevice *mdev;
mdev             1124 drivers/misc/mic/vop/vop_vringh.c 	mdev = &vi->miscdev;
mdev             1125 drivers/misc/mic/vop/vop_vringh.c 	mdev->minor = MISC_DYNAMIC_MINOR;
mdev             1127 drivers/misc/mic/vop/vop_vringh.c 	mdev->name = vi->name;
mdev             1128 drivers/misc/mic/vop/vop_vringh.c 	mdev->fops = &vop_fops;
mdev             1129 drivers/misc/mic/vop/vop_vringh.c 	mdev->parent = &vpdev->dev;
mdev             1131 drivers/misc/mic/vop/vop_vringh.c 	rc = misc_register(mdev);
mdev               24 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev               60 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev               66 drivers/mtd/maps/vmu-flash.c 	mdev = mpart->mdev;
mdev               67 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev               91 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev               94 drivers/mtd/maps/vmu-flash.c 	mdev = mq->dev;
mdev               95 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              114 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev              121 drivers/mtd/maps/vmu-flash.c 	mdev = mpart->mdev;
mdev              123 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              131 drivers/mtd/maps/vmu-flash.c 			dev_err(&mdev->dev, "VMU at (%d, %d) - read fails due"
mdev              132 drivers/mtd/maps/vmu-flash.c 				" to lack of memory\n", mdev->port,
mdev              133 drivers/mtd/maps/vmu-flash.c 				mdev->unit);
mdev              147 drivers/mtd/maps/vmu-flash.c 		if (atomic_read(&mdev->busy) == 1) {
mdev              148 drivers/mtd/maps/vmu-flash.c 			wait_event_interruptible_timeout(mdev->maple_wait,
mdev              149 drivers/mtd/maps/vmu-flash.c 				atomic_read(&mdev->busy) == 0, HZ);
mdev              150 drivers/mtd/maps/vmu-flash.c 			if (atomic_read(&mdev->busy) == 1) {
mdev              151 drivers/mtd/maps/vmu-flash.c 				dev_notice(&mdev->dev, "VMU at (%d, %d)"
mdev              152 drivers/mtd/maps/vmu-flash.c 					" is busy\n", mdev->port, mdev->unit);
mdev              158 drivers/mtd/maps/vmu-flash.c 		atomic_set(&mdev->busy, 1);
mdev              162 drivers/mtd/maps/vmu-flash.c 			atomic_set(&mdev->busy, 0);
mdev              167 drivers/mtd/maps/vmu-flash.c 		maple_getcond_callback(mdev, vmu_blockread, 0,
mdev              169 drivers/mtd/maps/vmu-flash.c 		error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
mdev              172 drivers/mtd/maps/vmu-flash.c 		wait = wait_event_interruptible_timeout(mdev->maple_wait,
mdev              173 drivers/mtd/maps/vmu-flash.c 			(atomic_read(&mdev->busy) == 0 ||
mdev              174 drivers/mtd/maps/vmu-flash.c 			atomic_read(&mdev->busy) == 2), HZ * 3);
mdev              180 drivers/mtd/maps/vmu-flash.c 		if (error || atomic_read(&mdev->busy) == 2) {
mdev              181 drivers/mtd/maps/vmu-flash.c 			if (atomic_read(&mdev->busy) == 2)
mdev              183 drivers/mtd/maps/vmu-flash.c 			atomic_set(&mdev->busy, 0);
mdev              189 drivers/mtd/maps/vmu-flash.c 			atomic_set(&mdev->busy, 0);
mdev              191 drivers/mtd/maps/vmu-flash.c 			list_del_init(&(mdev->mq->list));
mdev              192 drivers/mtd/maps/vmu-flash.c 			kfree(mdev->mq->sendbuf);
mdev              193 drivers/mtd/maps/vmu-flash.c 			mdev->mq->sendbuf = NULL;
mdev              195 drivers/mtd/maps/vmu-flash.c 				dev_warn(&mdev->dev, "VMU read on (%d, %d)"
mdev              197 drivers/mtd/maps/vmu-flash.c 					mdev->port, mdev->unit, num);
mdev              199 drivers/mtd/maps/vmu-flash.c 				dev_notice(&mdev->dev, "VMU read on (%d, %d)"
mdev              201 drivers/mtd/maps/vmu-flash.c 					mdev->port, mdev->unit, num);
mdev              231 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev              236 drivers/mtd/maps/vmu-flash.c 	mdev = mpart->mdev;
mdev              238 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              252 drivers/mtd/maps/vmu-flash.c 		if (atomic_read(&mdev->busy) == 1) {
mdev              253 drivers/mtd/maps/vmu-flash.c 			wait_event_interruptible_timeout(mdev->maple_wait,
mdev              254 drivers/mtd/maps/vmu-flash.c 				atomic_read(&mdev->busy) == 0, HZ);
mdev              255 drivers/mtd/maps/vmu-flash.c 			if (atomic_read(&mdev->busy) == 1) {
mdev              257 drivers/mtd/maps/vmu-flash.c 				dev_notice(&mdev->dev, "VMU write at (%d, %d)"
mdev              259 drivers/mtd/maps/vmu-flash.c 					mdev->port, mdev->unit);
mdev              263 drivers/mtd/maps/vmu-flash.c 		atomic_set(&mdev->busy, 1);
mdev              265 drivers/mtd/maps/vmu-flash.c 		locking = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
mdev              267 drivers/mtd/maps/vmu-flash.c 		wait = wait_event_interruptible_timeout(mdev->maple_wait,
mdev              268 drivers/mtd/maps/vmu-flash.c 			atomic_read(&mdev->busy) == 0, HZ/10);
mdev              271 drivers/mtd/maps/vmu-flash.c 			atomic_set(&mdev->busy, 0);
mdev              274 drivers/mtd/maps/vmu-flash.c 		if (atomic_read(&mdev->busy) == 2) {
mdev              275 drivers/mtd/maps/vmu-flash.c 			atomic_set(&mdev->busy, 0);
mdev              278 drivers/mtd/maps/vmu-flash.c 			dev_warn(&mdev->dev, "Write at (%d, %d) of block"
mdev              280 drivers/mtd/maps/vmu-flash.c 				" communicate with VMU", mdev->port,
mdev              281 drivers/mtd/maps/vmu-flash.c 				mdev->unit, num, x);
mdev              282 drivers/mtd/maps/vmu-flash.c 			atomic_set(&mdev->busy, 0);
mdev              283 drivers/mtd/maps/vmu-flash.c 			kfree(mdev->mq->sendbuf);
mdev              284 drivers/mtd/maps/vmu-flash.c 			mdev->mq->sendbuf = NULL;
mdev              285 drivers/mtd/maps/vmu-flash.c 			list_del_init(&(mdev->mq->list));
mdev              296 drivers/mtd/maps/vmu-flash.c 	dev_err(&mdev->dev, "VMU (%d, %d): write failed\n", mdev->port,
mdev              297 drivers/mtd/maps/vmu-flash.c 		mdev->unit);
mdev              308 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev              313 drivers/mtd/maps/vmu-flash.c 	mdev = mpart->mdev;
mdev              315 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              353 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev              362 drivers/mtd/maps/vmu-flash.c 	mdev = mpart->mdev;
mdev              364 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              419 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev              428 drivers/mtd/maps/vmu-flash.c 	mdev = mpart->mdev;
mdev              430 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              489 drivers/mtd/maps/vmu-flash.c 	dev_err(&mdev->dev, "VMU write failing with error %d\n", error);
mdev              501 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev;
mdev              511 drivers/mtd/maps/vmu-flash.c 	mdev = mq->dev;
mdev              512 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              517 drivers/mtd/maps/vmu-flash.c 	dev_info(&mdev->dev, "VMU device at partition %d has %d user "
mdev              530 drivers/mtd/maps/vmu-flash.c 		mdev->port, mdev->unit, card->partition);
mdev              546 drivers/mtd/maps/vmu-flash.c 	mpart->mdev = mdev;
mdev              560 drivers/mtd/maps/vmu-flash.c 	maple_getcond_callback(mdev, NULL, 0,
mdev              569 drivers/mtd/maps/vmu-flash.c 		maple_getcond_callback(mdev, vmu_queryblocks, 0,
mdev              571 drivers/mtd/maps/vmu-flash.c 		maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
mdev              577 drivers/mtd/maps/vmu-flash.c 	dev_err(&mdev->dev, "Could not register maple device at (%d, %d)"
mdev              578 drivers/mtd/maps/vmu-flash.c 		"error is 0x%X\n", mdev->port, mdev->unit, error);
mdev              589 drivers/mtd/maps/vmu-flash.c 	maple_getcond_callback(mdev, NULL, 0,
mdev              597 drivers/mtd/maps/vmu-flash.c static int vmu_connect(struct maple_device *mdev)
mdev              604 drivers/mtd/maps/vmu-flash.c 	test_flash_data = be32_to_cpu(mdev->devinfo.function);
mdev              610 drivers/mtd/maps/vmu-flash.c 	basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
mdev              644 drivers/mtd/maps/vmu-flash.c 	maple_set_drvdata(mdev, card);
mdev              651 drivers/mtd/maps/vmu-flash.c 	maple_getcond_callback(mdev, vmu_queryblocks, 0,
mdev              655 drivers/mtd/maps/vmu-flash.c 	if (atomic_read(&mdev->busy) == 1) {
mdev              656 drivers/mtd/maps/vmu-flash.c 		wait_event_interruptible_timeout(mdev->maple_wait,
mdev              657 drivers/mtd/maps/vmu-flash.c 			atomic_read(&mdev->busy) == 0, HZ);
mdev              658 drivers/mtd/maps/vmu-flash.c 		if (atomic_read(&mdev->busy) == 1) {
mdev              659 drivers/mtd/maps/vmu-flash.c 			dev_notice(&mdev->dev, "VMU at (%d, %d) is busy\n",
mdev              660 drivers/mtd/maps/vmu-flash.c 				mdev->port, mdev->unit);
mdev              666 drivers/mtd/maps/vmu-flash.c 	atomic_set(&mdev->busy, 1);
mdev              672 drivers/mtd/maps/vmu-flash.c 	error = maple_add_packet(mdev, MAPLE_FUNC_MEMCARD,
mdev              675 drivers/mtd/maps/vmu-flash.c 		dev_err(&mdev->dev, "Could not lock VMU at (%d, %d)"
mdev              676 drivers/mtd/maps/vmu-flash.c 			" error is 0x%X\n", mdev->port, mdev->unit, error);
mdev              691 drivers/mtd/maps/vmu-flash.c static void vmu_disconnect(struct maple_device *mdev)
mdev              697 drivers/mtd/maps/vmu-flash.c 	mdev->callback = NULL;
mdev              698 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              701 drivers/mtd/maps/vmu-flash.c 		mpart->mdev = NULL;
mdev              713 drivers/mtd/maps/vmu-flash.c static int vmu_can_unload(struct maple_device *mdev)
mdev              719 drivers/mtd/maps/vmu-flash.c 	card = maple_get_drvdata(mdev);
mdev              730 drivers/mtd/maps/vmu-flash.c static void vmu_file_error(struct maple_device *mdev, void *recvbuf)
mdev              737 drivers/mtd/maps/vmu-flash.c 		dev_notice(&mdev->dev, ERRSTR " invalid partition number\n",
mdev              738 drivers/mtd/maps/vmu-flash.c 			mdev->port, mdev->unit);
mdev              742 drivers/mtd/maps/vmu-flash.c 		dev_notice(&mdev->dev, ERRSTR " phase error\n",
mdev              743 drivers/mtd/maps/vmu-flash.c 			mdev->port, mdev->unit);
mdev              747 drivers/mtd/maps/vmu-flash.c 		dev_notice(&mdev->dev, ERRSTR " invalid block number\n",
mdev              748 drivers/mtd/maps/vmu-flash.c 			mdev->port, mdev->unit);
mdev              752 drivers/mtd/maps/vmu-flash.c 		dev_notice(&mdev->dev, ERRSTR " write error\n",
mdev              753 drivers/mtd/maps/vmu-flash.c 			mdev->port, mdev->unit);
mdev              757 drivers/mtd/maps/vmu-flash.c 		dev_notice(&mdev->dev, ERRSTR " invalid write length\n",
mdev              758 drivers/mtd/maps/vmu-flash.c 			mdev->port, mdev->unit);
mdev              762 drivers/mtd/maps/vmu-flash.c 		dev_notice(&mdev->dev, ERRSTR " bad CRC\n",
mdev              763 drivers/mtd/maps/vmu-flash.c 			mdev->port, mdev->unit);
mdev              767 drivers/mtd/maps/vmu-flash.c 		dev_notice(&mdev->dev, ERRSTR " 0x%X\n",
mdev              768 drivers/mtd/maps/vmu-flash.c 			mdev->port, mdev->unit, error);
mdev              776 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev = to_maple_dev(dev);
mdev              779 drivers/mtd/maps/vmu-flash.c 	mdev->can_unload = vmu_can_unload;
mdev              780 drivers/mtd/maps/vmu-flash.c 	mdev->fileerr_handler = vmu_file_error;
mdev              781 drivers/mtd/maps/vmu-flash.c 	mdev->driver = mdrv;
mdev              783 drivers/mtd/maps/vmu-flash.c 	error = vmu_connect(mdev);
mdev              792 drivers/mtd/maps/vmu-flash.c 	struct maple_device *mdev = to_maple_dev(dev);
mdev              794 drivers/mtd/maps/vmu-flash.c 	vmu_disconnect(mdev);
mdev               68 drivers/net/ethernet/apple/bmac.c 	struct macio_dev *mdev;
mdev              234 drivers/net/ethernet/apple/bmac.c 	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
mdev              453 drivers/net/ethernet/apple/bmac.c static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
mdev              455 drivers/net/ethernet/apple/bmac.c 	struct net_device* dev = macio_get_drvdata(mdev);
mdev              499 drivers/net/ethernet/apple/bmac.c        	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
mdev              503 drivers/net/ethernet/apple/bmac.c static int bmac_resume(struct macio_dev *mdev)
mdev              505 drivers/net/ethernet/apple/bmac.c 	struct net_device* dev = macio_get_drvdata(mdev);
mdev             1237 drivers/net/ethernet/apple/bmac.c static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
mdev             1246 drivers/net/ethernet/apple/bmac.c 	if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
mdev             1250 drivers/net/ethernet/apple/bmac.c 	prop_addr = of_get_property(macio_get_of_node(mdev),
mdev             1253 drivers/net/ethernet/apple/bmac.c 		prop_addr = of_get_property(macio_get_of_node(mdev),
mdev             1267 drivers/net/ethernet/apple/bmac.c 	SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
mdev             1268 drivers/net/ethernet/apple/bmac.c 	macio_set_drvdata(mdev, dev);
mdev             1270 drivers/net/ethernet/apple/bmac.c 	bp->mdev = mdev;
mdev             1273 drivers/net/ethernet/apple/bmac.c 	if (macio_request_resources(mdev, "bmac")) {
mdev             1279 drivers/net/ethernet/apple/bmac.c 		ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
mdev             1283 drivers/net/ethernet/apple/bmac.c 	dev->irq = macio_irq(mdev, 0);
mdev             1304 drivers/net/ethernet/apple/bmac.c 	bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
mdev             1307 drivers/net/ethernet/apple/bmac.c 	bp->tx_dma_intr = macio_irq(mdev, 1);
mdev             1308 drivers/net/ethernet/apple/bmac.c 	bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
mdev             1311 drivers/net/ethernet/apple/bmac.c 	bp->rx_dma_intr = macio_irq(mdev, 2);
mdev             1341 drivers/net/ethernet/apple/bmac.c 	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
mdev             1368 drivers/net/ethernet/apple/bmac.c 	macio_release_resources(mdev);
mdev             1370 drivers/net/ethernet/apple/bmac.c 	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
mdev             1429 drivers/net/ethernet/apple/bmac.c 	pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
mdev             1472 drivers/net/ethernet/apple/bmac.c 	struct net_device *dev = macio_get_drvdata(bp->mdev);
mdev             1595 drivers/net/ethernet/apple/bmac.c static int bmac_remove(struct macio_dev *mdev)
mdev             1597 drivers/net/ethernet/apple/bmac.c 	struct net_device *dev = macio_get_drvdata(mdev);
mdev             1610 drivers/net/ethernet/apple/bmac.c 	macio_release_resources(mdev);
mdev               67 drivers/net/ethernet/apple/mace.c     struct macio_dev *mdev;
mdev              109 drivers/net/ethernet/apple/mace.c static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
mdev              111 drivers/net/ethernet/apple/mace.c 	struct device_node *mace = macio_get_of_node(mdev);
mdev              117 drivers/net/ethernet/apple/mace.c 	if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
mdev              143 drivers/net/ethernet/apple/mace.c 	if (macio_request_resources(mdev, "mace")) {
mdev              153 drivers/net/ethernet/apple/mace.c 	SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
mdev              156 drivers/net/ethernet/apple/mace.c 	mp->mdev = mdev;
mdev              157 drivers/net/ethernet/apple/mace.c 	macio_set_drvdata(mdev, dev);
mdev              159 drivers/net/ethernet/apple/mace.c 	dev->base_addr = macio_resource_start(mdev, 0);
mdev              166 drivers/net/ethernet/apple/mace.c 	dev->irq = macio_irq(mdev, 0);
mdev              179 drivers/net/ethernet/apple/mace.c 	mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
mdev              185 drivers/net/ethernet/apple/mace.c 	mp->tx_dma_intr = macio_irq(mdev, 1);
mdev              187 drivers/net/ethernet/apple/mace.c 	mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
mdev              193 drivers/net/ethernet/apple/mace.c 	mp->rx_dma_intr = macio_irq(mdev, 2);
mdev              255 drivers/net/ethernet/apple/mace.c 	free_irq(macio_irq(mdev, 2), dev);
mdev              257 drivers/net/ethernet/apple/mace.c 	free_irq(macio_irq(mdev, 1), dev);
mdev              259 drivers/net/ethernet/apple/mace.c 	free_irq(macio_irq(mdev, 0), dev);
mdev              269 drivers/net/ethernet/apple/mace.c 	macio_release_resources(mdev);
mdev              274 drivers/net/ethernet/apple/mace.c static int mace_remove(struct macio_dev *mdev)
mdev              276 drivers/net/ethernet/apple/mace.c 	struct net_device *dev = macio_get_drvdata(mdev);
mdev              281 drivers/net/ethernet/apple/mace.c 	macio_set_drvdata(mdev, NULL);
mdev              297 drivers/net/ethernet/apple/mace.c 	macio_release_resources(mdev);
mdev              806 drivers/net/ethernet/apple/mace.c     struct net_device *dev = macio_get_drvdata(mp->mdev);
mdev               22 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
mdev               25 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	tx_hdr = mdev->mbase + mbox->tx_start;
mdev               26 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	rx_hdr = mdev->mbase + mbox->rx_start;
mdev               28 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	spin_lock(&mdev->mbox_lock);
mdev               29 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	mdev->msg_size = 0;
mdev               30 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	mdev->rsp_size = 0;
mdev               33 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	spin_unlock(&mdev->mbox_lock);
mdev               50 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	struct otx2_mbox_dev *mdev;
mdev              123 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 		mdev = &mbox->dev[devid];
mdev              124 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
mdev              125 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 		spin_lock_init(&mdev->mbox_lock);
mdev              136 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
mdev              139 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	while (mdev->num_msgs != mdev->msgs_acked) {
mdev              151 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
mdev              155 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 		if (mdev->num_msgs == mdev->msgs_acked)
mdev              165 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
mdev              168 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	tx_hdr = mdev->mbase + mbox->tx_start;
mdev              169 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	rx_hdr = mdev->mbase + mbox->rx_start;
mdev              171 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	spin_lock(&mdev->mbox_lock);
mdev              173 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	mdev->msg_size = 0;
mdev              174 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	mdev->rsp_size = 0;
mdev              175 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	mdev->msgs_acked = 0;
mdev              184 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	tx_hdr->num_msgs = mdev->num_msgs;
mdev              186 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	spin_unlock(&mdev->mbox_lock);
mdev              199 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
mdev              202 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	spin_lock(&mdev->mbox_lock);
mdev              206 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
mdev              208 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
mdev              211 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	if (mdev->msg_size == 0)
mdev              212 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 		mdev->num_msgs = 0;
mdev              213 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	mdev->num_msgs++;
mdev              215 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
mdev              221 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	mdev->msg_size += size;
mdev              222 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	mdev->rsp_size += size_rsp;
mdev              223 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
mdev              225 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	spin_unlock(&mdev->mbox_lock);
mdev              236 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
mdev              239 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	if (mdev->num_msgs != mdev->msgs_acked)
mdev              242 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
mdev              243 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
mdev              244 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 		struct mbox_msghdr *prsp = mdev->mbase + irsp;
mdev              279 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
mdev              282 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	spin_lock(&mdev->mbox_lock);
mdev              283 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	ret = mdev->num_msgs != 0;
mdev              284 drivers/net/ethernet/marvell/octeontx2/af/mbox.c 	spin_unlock(&mdev->mbox_lock);
mdev             1420 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct otx2_mbox_dev *mdev;
mdev             1439 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	mdev = &mbox->dev[devid];
mdev             1442 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	req_hdr = mdev->mbase + mbox->rx_start;
mdev             1449 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		msg = mdev->mbase + offset;
mdev             1503 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct otx2_mbox_dev *mdev;
mdev             1523 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	mdev = &mbox->dev[devid];
mdev             1525 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	rsp_hdr = mdev->mbase + mbox->rx_start;
mdev             1534 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		msg = mdev->mbase + offset;
mdev             1561 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		mdev->msgs_acked++;
mdev             1687 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	struct otx2_mbox_dev *mdev;
mdev             1698 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		mdev = &mbox->dev[i];
mdev             1699 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		hdr = mdev->mbase + mbox->rx_start;
mdev             1704 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		mdev = &mbox->dev[i];
mdev             1705 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 		hdr = mdev->mbase + mbox->rx_start;
mdev               43 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	struct mlx4_en_dev *mdev =
mdev               45 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	struct mlx4_dev *dev = mdev->dev;
mdev               61 drivers/net/ethernet/mellanox/mlx4/en_clock.c void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
mdev               69 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		seq = read_seqbegin(&mdev->clock_lock);
mdev               70 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		nsec = timecounter_cyc2time(&mdev->clock, timestamp);
mdev               71 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	} while (read_seqretry(&mdev->clock_lock, seq));
mdev               83 drivers/net/ethernet/mellanox/mlx4/en_clock.c void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
mdev               85 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	if (mdev->ptp_clock) {
mdev               86 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		ptp_clock_unregister(mdev->ptp_clock);
mdev               87 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		mdev->ptp_clock = NULL;
mdev               88 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		mlx4_info(mdev, "removed PHC\n");
mdev               99 drivers/net/ethernet/mellanox/mlx4/en_clock.c void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
mdev              101 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
mdev              106 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		write_seqlock_irqsave(&mdev->clock_lock, flags);
mdev              107 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		timecounter_read(&mdev->clock);
mdev              108 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		write_sequnlock_irqrestore(&mdev->clock_lock, flags);
mdev              109 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		mdev->last_overflow_check = jiffies;
mdev              127 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
mdev              134 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mult = mdev->nominal_c_mult;
mdev              139 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_seqlock_irqsave(&mdev->clock_lock, flags);
mdev              140 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	timecounter_read(&mdev->clock);
mdev              141 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
mdev              142 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
mdev              156 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
mdev              160 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_seqlock_irqsave(&mdev->clock_lock, flags);
mdev              161 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	timecounter_adjtime(&mdev->clock, delta);
mdev              162 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
mdev              178 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
mdev              183 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_seqlock_irqsave(&mdev->clock_lock, flags);
mdev              184 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	ns = timecounter_read(&mdev->clock);
mdev              185 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
mdev              203 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
mdev              209 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_seqlock_irqsave(&mdev->clock_lock, flags);
mdev              210 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	timecounter_init(&mdev->clock, &mdev->cycles, ns);
mdev              211 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
mdev              263 drivers/net/ethernet/mellanox/mlx4/en_clock.c void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
mdev              265 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	struct mlx4_dev *dev = mdev->dev;
mdev              272 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	if (mdev->ptp_clock)
mdev              275 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	seqlock_init(&mdev->clock_lock);
mdev              277 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev              278 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mdev->cycles.read = mlx4_en_read_clock;
mdev              279 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
mdev              280 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
mdev              281 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mdev->cycles.mult =
mdev              282 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
mdev              283 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mdev->nominal_c_mult = mdev->cycles.mult;
mdev              285 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_seqlock_irqsave(&mdev->clock_lock, flags);
mdev              286 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	timecounter_init(&mdev->clock, &mdev->cycles,
mdev              288 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	write_sequnlock_irqrestore(&mdev->clock_lock, flags);
mdev              291 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
mdev              292 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
mdev              294 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
mdev              295 drivers/net/ethernet/mellanox/mlx4/en_clock.c 					     &mdev->pdev->dev);
mdev              296 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	if (IS_ERR(mdev->ptp_clock)) {
mdev              297 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		mdev->ptp_clock = NULL;
mdev              298 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		mlx4_err(mdev, "ptp_clock_register failed\n");
mdev              299 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	} else if (mdev->ptp_clock) {
mdev              300 drivers/net/ethernet/mellanox/mlx4/en_clock.c 		mlx4_info(mdev, "registered PHC clock\n");
mdev               51 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev               62 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
mdev               66 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	cq->vector = mdev->dev->caps.num_comp_vectors;
mdev               71 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
mdev               72 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
mdev               74 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
mdev               92 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev               97 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	cq->dev = mdev->pndev[priv->port];
mdev              105 drivers/net/ethernet/mellanox/mlx4/en_cq.c 		if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
mdev              109 drivers/net/ethernet/mellanox/mlx4/en_cq.c 			err = mlx4_assign_eq(mdev->dev, priv->port,
mdev              112 drivers/net/ethernet/mellanox/mlx4/en_cq.c 				mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
mdev              121 drivers/net/ethernet/mellanox/mlx4/en_cq.c 			irq_to_desc(mlx4_eq_get_irq(mdev->dev,
mdev              141 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
mdev              142 drivers/net/ethernet/mellanox/mlx4/en_cq.c 			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
mdev              171 drivers/net/ethernet/mellanox/mlx4/en_cq.c 		mlx4_release_eq(mdev->dev, cq->vector);
mdev              172 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	cq->vector = mdev->dev->caps.num_comp_vectors;
mdev              178 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              181 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
mdev              182 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
mdev              184 drivers/net/ethernet/mellanox/mlx4/en_cq.c 		mlx4_release_eq(priv->mdev->dev, cq->vector);
mdev              199 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	mlx4_cq_free(priv->mdev->dev, &cq->mcq);
mdev              205 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
mdev              211 drivers/net/ethernet/mellanox/mlx4/en_cq.c 	mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
mdev              212 drivers/net/ethernet/mellanox/mlx4/en_cq.c 		    &priv->mdev->uar_lock);
mdev              100 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 		*cap = 1 <<  mlx4_max_tc(priv->mdev->dev);
mdev              149 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 		*num = mlx4_max_tc(priv->mdev->dev);
mdev              160 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              204 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	if (mlx4_SET_PORT_general(mdev->dev, priv->port,
mdev              345 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              372 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
mdev              380 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              387 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
mdev              416 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              431 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
mdev              439 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
mdev              551 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
mdev              554 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
mdev              564 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 		err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
mdev              571 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 			mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
mdev              600 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
mdev              616 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
mdev              619 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
mdev              653 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 		err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
mdev              659 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 			mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
mdev              663 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
mdev              677 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
mdev              680 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
mdev              691 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 		err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
mdev              698 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 			mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
mdev              706 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
mdev               89 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev               96 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		(u16) (mdev->dev->caps.fw_ver >> 32),
mdev               97 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
mdev               98 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		(u16) (mdev->dev->caps.fw_ver & 0xffff));
mdev               99 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
mdev              230 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_caps *caps = &priv->mdev->dev->caps;
mdev              254 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
mdev              280 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	if (!(priv->mdev->dev->caps.flags & mask))
mdev              286 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
mdev              300 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
mdev              354 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
mdev              458 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
mdev              540 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              543 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
mdev              754 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
mdev              872 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
mdev              879 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
mdev              934 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	if (!(priv->mdev->dev->caps.flags2 &
mdev              942 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
mdev              981 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
mdev              989 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mutex_lock(&priv->mdev->state_lock);
mdev              996 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mutex_unlock(&priv->mdev->state_lock);
mdev             1078 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1090 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
mdev             1098 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
mdev             1122 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1157 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mutex_lock(&mdev->state_lock);
mdev             1181 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mutex_unlock(&mdev->state_lock);
mdev             1216 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
mdev             1222 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
mdev             1260 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1292 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mutex_lock(&mdev->state_lock);
mdev             1311 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mutex_unlock(&mdev->state_lock);
mdev             1630 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
mdev             1641 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
mdev             1676 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	err = mlx4_flow_detach(priv->mdev->dev, rule->id);
mdev             1726 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1733 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	    (mdev->dev->caps.steering_mode !=
mdev             1769 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1771 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	if (mdev->dev->caps.steering_mode !=
mdev             1801 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up;
mdev             1812 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1828 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mutex_lock(&mdev->state_lock);
mdev             1873 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	mutex_unlock(&mdev->state_lock);
mdev             1882 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1889 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
mdev             1903 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		if (mdev->ptp_clock)
mdev             1904 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 			info->phc_index = ptp_clock_index(mdev->ptp_clock);
mdev             1913 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1952 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
mdev             2020 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2025 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	ret = mlx4_get_module_info(mdev->dev, priv->port,
mdev             2064 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2078 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 		ret = mlx4_get_module_info(mdev->dev, priv->port,
mdev             2103 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2105 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
mdev             2119 drivers/net/ethernet/mellanox/mlx4/en_ethtool.c 	err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
mdev               96 drivers/net/ethernet/mellanox/mlx4/en_main.c 		       level, DRV_NAME, dev_name(&priv->mdev->pdev->dev),
mdev              117 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (mlx4_is_mfunc(priv->mdev->dev) &&
mdev              124 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
mdev              127 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mutex_lock(&priv->mdev->state_lock);
mdev              128 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if ((priv->mdev->dev->caps.flags2 &
mdev              145 drivers/net/ethernet/mellanox/mlx4/en_main.c 			mlx4_warn(priv->mdev, "failed to change mcast loopback\n");
mdev              147 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mutex_unlock(&priv->mdev->state_lock);
mdev              150 drivers/net/ethernet/mellanox/mlx4/en_main.c static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
mdev              152 drivers/net/ethernet/mellanox/mlx4/en_main.c 	struct mlx4_en_profile *params = &mdev->profile;
mdev              160 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (params->udp_rss && !(mdev->dev->caps.flags
mdev              162 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
mdev              196 drivers/net/ethernet/mellanox/mlx4/en_main.c 	struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
mdev              202 drivers/net/ethernet/mellanox/mlx4/en_main.c 		if (!mdev->pndev[port])
mdev              204 drivers/net/ethernet/mellanox/mlx4/en_main.c 		priv = netdev_priv(mdev->pndev[port]);
mdev              208 drivers/net/ethernet/mellanox/mlx4/en_main.c 		queue_work(mdev->workqueue, &priv->linkstate_task);
mdev              212 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mlx4_err(mdev, "Internal error detected, restarting device\n");
mdev              220 drivers/net/ethernet/mellanox/mlx4/en_main.c 		    !mdev->pndev[port])
mdev              222 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
mdev              229 drivers/net/ethernet/mellanox/mlx4/en_main.c 	struct mlx4_en_dev *mdev = endev_ptr;
mdev              232 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mutex_lock(&mdev->state_lock);
mdev              233 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->device_up = false;
mdev              234 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mutex_unlock(&mdev->state_lock);
mdev              237 drivers/net/ethernet/mellanox/mlx4/en_main.c 		if (mdev->pndev[i])
mdev              238 drivers/net/ethernet/mellanox/mlx4/en_main.c 			mlx4_en_destroy_netdev(mdev->pndev[i]);
mdev              240 drivers/net/ethernet/mellanox/mlx4/en_main.c 	flush_workqueue(mdev->workqueue);
mdev              241 drivers/net/ethernet/mellanox/mlx4/en_main.c 	destroy_workqueue(mdev->workqueue);
mdev              242 drivers/net/ethernet/mellanox/mlx4/en_main.c 	(void) mlx4_mr_free(dev, &mdev->mr);
mdev              243 drivers/net/ethernet/mellanox/mlx4/en_main.c 	iounmap(mdev->uar_map);
mdev              244 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mlx4_uar_free(dev, &mdev->priv_uar);
mdev              245 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mlx4_pd_free(dev, mdev->priv_pdn);
mdev              246 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (mdev->nb.notifier_call)
mdev              247 drivers/net/ethernet/mellanox/mlx4/en_main.c 		unregister_netdevice_notifier(&mdev->nb);
mdev              248 drivers/net/ethernet/mellanox/mlx4/en_main.c 	kfree(mdev);
mdev              254 drivers/net/ethernet/mellanox/mlx4/en_main.c 	struct mlx4_en_dev *mdev = ctx;
mdev              258 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mlx4_info(mdev, "Activating port:%d\n", i);
mdev              259 drivers/net/ethernet/mellanox/mlx4/en_main.c 		if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
mdev              260 drivers/net/ethernet/mellanox/mlx4/en_main.c 			mdev->pndev[i] = NULL;
mdev              264 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->nb.notifier_call = mlx4_en_netdev_event;
mdev              265 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (register_netdevice_notifier(&mdev->nb)) {
mdev              266 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mdev->nb.notifier_call = NULL;
mdev              267 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mlx4_err(mdev, "Failed to create notifier\n");
mdev              273 drivers/net/ethernet/mellanox/mlx4/en_main.c 	struct mlx4_en_dev *mdev;
mdev              278 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              279 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (!mdev)
mdev              282 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
mdev              285 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (mlx4_uar_alloc(dev, &mdev->priv_uar))
mdev              288 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
mdev              290 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (!mdev->uar_map)
mdev              292 drivers/net/ethernet/mellanox/mlx4/en_main.c 	spin_lock_init(&mdev->uar_lock);
mdev              294 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->dev = dev;
mdev              295 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->dma_device = &dev->persist->pdev->dev;
mdev              296 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->pdev = dev->persist->pdev;
mdev              297 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->device_up = false;
mdev              299 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
mdev              300 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (!mdev->LSO_support)
mdev              301 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
mdev              303 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
mdev              305 drivers/net/ethernet/mellanox/mlx4/en_main.c 			 0, 0, &mdev->mr)) {
mdev              306 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mlx4_err(mdev, "Failed allocating memory region\n");
mdev              309 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
mdev              310 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mlx4_err(mdev, "Failed enabling memory region\n");
mdev              315 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mlx4_en_get_profile(mdev);
mdev              318 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->port_cnt = 0;
mdev              320 drivers/net/ethernet/mellanox/mlx4/en_main.c 		mdev->port_cnt++;
mdev              323 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mlx4_en_set_num_rx_rings(mdev);
mdev              328 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->workqueue = create_singlethread_workqueue("mlx4_en");
mdev              329 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (!mdev->workqueue)
mdev              334 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mutex_init(&mdev->state_lock);
mdev              335 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mdev->device_up = true;
mdev              337 drivers/net/ethernet/mellanox/mlx4/en_main.c 	return mdev;
mdev              340 drivers/net/ethernet/mellanox/mlx4/en_main.c 	(void) mlx4_mr_free(dev, &mdev->mr);
mdev              342 drivers/net/ethernet/mellanox/mlx4/en_main.c 	if (mdev->uar_map)
mdev              343 drivers/net/ethernet/mellanox/mlx4/en_main.c 		iounmap(mdev->uar_map);
mdev              345 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mlx4_uar_free(dev, &mdev->priv_uar);
mdev              347 drivers/net/ethernet/mellanox/mlx4/en_main.c 	mlx4_pd_free(dev, mdev->priv_pdn);
mdev              349 drivers/net/ethernet/mellanox/mlx4/en_main.c 	kfree(mdev);
mdev               74 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!mlx4_is_slave(priv->mdev->dev)) {
mdev               91 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              102 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev              136 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev              256 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
mdev              261 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
mdev              327 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
mdev              409 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_work(priv->mdev->workqueue, &filter->work);
mdev              475 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              484 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev              485 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->device_up && priv->port_up) {
mdev              486 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
mdev              492 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
mdev              497 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev              505 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              513 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev              514 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mlx4_unregister_vlan(mdev->dev, priv->port, vid);
mdev              516 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->device_up && priv->port_up) {
mdev              517 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
mdev              521 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev              542 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
mdev              543 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	    priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
mdev              546 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
mdev              560 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              561 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_dev *dev = mdev->dev;
mdev              612 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              613 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_dev *dev = mdev->dev;
mdev              638 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              639 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_dev *dev = mdev->dev;
mdev              677 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              678 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_dev *dev = mdev->dev;
mdev              697 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              698 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_dev *dev = mdev->dev;
mdev              730 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 					mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
mdev              747 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              750 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN))
mdev              753 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac);
mdev              782 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              790 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev              799 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev              887 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
mdev              891 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				     struct mlx4_en_dev *mdev)
mdev              901 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		switch (mdev->dev->caps.steering_mode) {
mdev              903 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			err = mlx4_flow_steer_promisc_add(mdev->dev,
mdev              913 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			err = mlx4_unicast_promisc_add(mdev->dev,
mdev              923 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				err = mlx4_multicast_promisc_add(mdev->dev,
mdev              933 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			err = mlx4_SET_PORT_qpn_calc(mdev->dev,
mdev              943 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
mdev              951 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				       struct mlx4_en_dev *mdev)
mdev              960 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	switch (mdev->dev->caps.steering_mode) {
mdev              962 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_flow_steer_promisc_remove(mdev->dev,
mdev              971 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_unicast_promisc_remove(mdev->dev,
mdev              978 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			err = mlx4_multicast_promisc_remove(mdev->dev,
mdev              988 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_PORT_qpn_calc(mdev->dev,
mdev              999 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				 struct mlx4_en_dev *mdev)
mdev             1008 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
mdev             1015 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			switch (mdev->dev->caps.steering_mode) {
mdev             1017 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				err = mlx4_flow_steer_promisc_add(mdev->dev,
mdev             1024 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				err = mlx4_multicast_promisc_add(mdev->dev,
mdev             1039 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			switch (mdev->dev->caps.steering_mode) {
mdev             1041 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				err = mlx4_flow_steer_promisc_remove(mdev->dev,
mdev             1047 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				err = mlx4_multicast_promisc_remove(mdev->dev,
mdev             1060 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
mdev             1066 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
mdev             1076 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mdev             1079 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
mdev             1090 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				err = mlx4_multicast_detach(mdev->dev,
mdev             1099 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 					err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
mdev             1112 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				err = mlx4_multicast_attach(mdev->dev,
mdev             1132 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				 struct mlx4_en_dev *mdev)
mdev             1172 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				mlx4_unregister_mac(mdev->dev, priv->port, mac);
mdev             1213 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			err = mlx4_register_mac(mdev->dev, priv->port, mac);
mdev             1227 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				mlx4_unregister_mac(mdev->dev, priv->port, mac);
mdev             1255 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1258 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             1259 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!mdev->device_up) {
mdev             1269 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
mdev             1279 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_en_do_uc_filter(priv, dev, mdev);
mdev             1284 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_en_set_promisc_mode(priv, mdev);
mdev             1290 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_en_clear_promisc_mode(priv, mdev);
mdev             1292 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mlx4_en_do_multicast(priv, dev, mdev);
mdev             1294 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             1329 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
mdev             1354 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
mdev             1361 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
mdev             1369 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1387 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_work(mdev->workqueue, &priv->watchdog_task);
mdev             1519 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1522 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             1523 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->device_up) {
mdev             1525 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
mdev             1532 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
mdev             1534 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
mdev             1536 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
mdev             1538 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             1549 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1551 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             1552 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->device_up) {
mdev             1553 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mdev             1554 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_en_ptp_overflow_check(mdev);
mdev             1557 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		queue_delayed_work(mdev->workqueue, &priv->service_task,
mdev             1560 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             1567 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1570 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             1583 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             1589 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	int numa_node = priv->mdev->dev->numa_node;
mdev             1619 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1693 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mdev->mac_removed[priv->port] = 0;
mdev             1696 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_get_default_counter_index(mdev->dev, priv->port);
mdev             1761 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
mdev             1773 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
mdev             1781 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
mdev             1787 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
mdev             1788 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
mdev             1798 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_INIT_PORT(mdev->dev, priv->port);
mdev             1805 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
mdev             1807 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_warn(mdev, "Failed setting steering rules\n");
mdev             1812 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
mdev             1815 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_warn(mdev, "Failed Attaching Broadcast\n");
mdev             1821 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_work(mdev->workqueue, &priv->rx_mode_task);
mdev             1823 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
mdev             1876 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1888 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mlx4_CLOSE_PORT(mdev->dev, priv->port);
mdev             1905 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
mdev             1908 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.steering_mode ==
mdev             1912 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_flow_steer_promisc_remove(mdev->dev,
mdev             1915 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_flow_steer_promisc_remove(mdev->dev,
mdev             1922 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
mdev             1927 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
mdev             1936 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
mdev             1941 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
mdev             1944 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
mdev             1953 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
mdev             1956 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.steering_mode ==
mdev             1961 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			mlx4_flow_detach(mdev->dev, flow->id);
mdev             1981 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
mdev             1989 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
mdev             1990 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mdev->mac_removed[priv->port] = 1;
mdev             2008 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2014 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             2020 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             2027 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2031 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!mlx4_is_slave(mdev->dev))
mdev             2032 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
mdev             2069 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2072 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             2074 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!mdev->device_up) {
mdev             2088 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             2096 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2100 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             2105 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             2175 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
mdev             2214 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	dst->mdev = src->mdev;
mdev             2295 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		lockdep_is_held(&priv->mdev->state_lock));
mdev             2321 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2327 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
mdev             2333 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
mdev             2338 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	flush_workqueue(mdev->workqueue);
mdev             2340 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mdev             2341 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_en_remove_timestamp(mdev);
mdev             2344 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             2345 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mdev->pndev[priv->port] = NULL;
mdev             2346 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mdev->upper[priv->port] = NULL;
mdev             2353 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             2374 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2387 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mutex_lock(&mdev->state_lock);
mdev             2388 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!mdev->device_up) {
mdev             2398 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				queue_work(mdev->workqueue, &priv->watchdog_task);
mdev             2401 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mutex_unlock(&mdev->state_lock);
mdev             2409 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2420 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
mdev             2490 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = en_priv->mdev;
mdev             2497 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	    !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
mdev             2523 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
mdev             2562 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = en_priv->mdev;
mdev             2564 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
mdev             2571 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = en_priv->mdev;
mdev             2573 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
mdev             2581 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = en_priv->mdev;
mdev             2583 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
mdev             2590 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = en_priv->mdev;
mdev             2592 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
mdev             2598 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = en_priv->mdev;
mdev             2600 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
mdev             2606 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = en_priv->mdev;
mdev             2608 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
mdev             2615 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = en_priv->mdev;
mdev             2617 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
mdev             2625 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_dev *mdev = priv->mdev->dev;
mdev             2627 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
mdev             2646 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
mdev             2650 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
mdev             2664 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
mdev             2685 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
mdev             2696 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
mdev             2712 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
mdev             2721 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
mdev             2755 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
mdev             2770 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
mdev             2778 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2799 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mutex_lock(&mdev->state_lock);
mdev             2803 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 					lockdep_is_held(&mdev->state_lock));
mdev             2808 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mutex_unlock(&mdev->state_lock);
mdev             2827 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             2857 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 					lockdep_is_held(&mdev->state_lock));
mdev             2868 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			queue_work(mdev->workqueue, &priv->watchdog_task);
mdev             2873 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             2882 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             2889 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             2892 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		lockdep_is_held(&mdev->state_lock));
mdev             2895 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev             2988 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_dev *dev = bond->priv->mdev->dev;
mdev             3028 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_work(priv->mdev->workqueue, &bond->work);
mdev             3037 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev;
mdev             3048 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mdev = container_of(this, struct mlx4_en_dev, nb);
mdev             3049 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	dev = mdev->dev;
mdev             3056 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!port && (mdev->pndev[i] == ndev))
mdev             3058 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mdev->upper[i] = mdev->pndev[i] ?
mdev             3059 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
mdev             3061 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (!mdev->upper[i])
mdev             3066 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (mdev->upper[i] != mdev->upper[i-1])
mdev             3238 drivers/net/ethernet/mellanox/mlx4/en_netdev.c int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
mdev             3254 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
mdev             3263 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
mdev             3278 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->mdev = mdev;
mdev             3279 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->ddev = &mdev->pdev->dev;
mdev             3287 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
mdev             3312 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
mdev             3313 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->cqe_size = mdev->dev->caps.cqe_size;
mdev             3317 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!mlx4_is_slave(priv->mdev->dev)) {
mdev             3333 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
mdev             3346 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
mdev             3348 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
mdev             3354 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
mdev             3360 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	} else if (mlx4_is_slave(priv->mdev->dev) &&
mdev             3361 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		   (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
mdev             3383 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
mdev             3394 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mlx4_is_master(priv->mdev->dev))
mdev             3408 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->LSO_support)
mdev             3411 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.tunnel_offload_mode ==
mdev             3437 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
mdev             3443 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mlx4_is_slave(mdev->dev)) {
mdev             3447 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = get_phv_bit(mdev->dev, port, &phv);
mdev             3452 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
mdev             3465 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
mdev             3466 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		    !(mdev->dev->caps.flags2 &
mdev             3471 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
mdev             3474 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
mdev             3477 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.steering_mode ==
mdev             3479 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	    mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
mdev             3482 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
mdev             3486 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
mdev             3488 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	} else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
mdev             3500 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mdev->pndev[port] = dev;
mdev             3501 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mdev->upper[port] = NULL;
mdev             3513 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
mdev             3523 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
mdev             3524 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
mdev             3534 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	err = mlx4_INIT_PORT(mdev->dev, priv->port);
mdev             3539 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
mdev             3542 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mdev             3543 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		mlx4_en_init_timestamp(mdev);
mdev             3545 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	queue_delayed_work(mdev->workqueue, &priv->service_task,
mdev             3548 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
mdev             3549 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				 mdev->profile.prof[priv->port].rx_ppp,
mdev             3550 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				 mdev->profile.prof[priv->port].rx_pause,
mdev             3551 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				 mdev->profile.prof[priv->port].tx_ppp,
mdev             3552 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				 mdev->profile.prof[priv->port].tx_pause);
mdev             3561 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
mdev             3576 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             3599 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_lock(&mdev->state_lock);
mdev             3654 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	mutex_unlock(&mdev->state_lock);
mdev               72 drivers/net/ethernet/mellanox/mlx4/en_port.c int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
mdev               75 drivers/net/ethernet/mellanox/mlx4/en_port.c 	struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
mdev               80 drivers/net/ethernet/mellanox/mlx4/en_port.c 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
mdev               83 drivers/net/ethernet/mellanox/mlx4/en_port.c 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
mdev              127 drivers/net/ethernet/mellanox/mlx4/en_port.c 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mdev              153 drivers/net/ethernet/mellanox/mlx4/en_port.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              157 drivers/net/ethernet/mellanox/mlx4/en_port.c 	if (!priv->port_up || mlx4_is_master(mdev->dev))
mdev              183 drivers/net/ethernet/mellanox/mlx4/en_port.c int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
mdev              188 drivers/net/ethernet/mellanox/mlx4/en_port.c 	struct net_device *dev = mdev->pndev[port];
mdev              198 drivers/net/ethernet/mellanox/mlx4/en_port.c 	mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
mdev              202 drivers/net/ethernet/mellanox/mlx4/en_port.c 	mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
mdev              204 drivers/net/ethernet/mellanox/mlx4/en_port.c 		mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mdev              208 drivers/net/ethernet/mellanox/mlx4/en_port.c 	err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
mdev              217 drivers/net/ethernet/mellanox/mlx4/en_port.c 	counter_index = mlx4_get_default_counter_index(mdev->dev, port);
mdev              218 drivers/net/ethernet/mellanox/mlx4/en_port.c 	err = mlx4_get_counter_stats(mdev->dev, counter_index,
mdev              225 drivers/net/ethernet/mellanox/mlx4/en_port.c 	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
mdev              228 drivers/net/ethernet/mellanox/mlx4/en_port.c 		err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
mdev              278 drivers/net/ethernet/mellanox/mlx4/en_port.c 	if (!mlx4_is_slave(mdev->dev)) {
mdev              297 drivers/net/ethernet/mellanox/mlx4/en_port.c 		if (mlx4_is_master(mdev->dev)) {
mdev              428 drivers/net/ethernet/mellanox/mlx4/en_port.c 	mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mdev              429 drivers/net/ethernet/mellanox/mlx4/en_port.c 	mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
mdev               44 drivers/net/ethernet/mellanox/mlx4/en_resources.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev               49 drivers/net/ethernet/mellanox/mlx4/en_resources.c 	context->pd = cpu_to_be32(mdev->priv_pdn);
mdev               55 drivers/net/ethernet/mellanox/mlx4/en_resources.c 		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
mdev               61 drivers/net/ethernet/mellanox/mlx4/en_resources.c 	context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
mdev               62 drivers/net/ethernet/mellanox/mlx4/en_resources.c 					mdev->priv_uar.index));
mdev               75 drivers/net/ethernet/mellanox/mlx4/en_resources.c 	    (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
mdev               77 drivers/net/ethernet/mellanox/mlx4/en_resources.c 			    MLX4_SINK_COUNTER_INDEX(mdev->dev)) {
mdev               88 drivers/net/ethernet/mellanox/mlx4/en_resources.c 	    (mdev->dev->caps.tunnel_offload_mode ==  MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
mdev              104 drivers/net/ethernet/mellanox/mlx4/en_resources.c 	ret = mlx4_update_qp(priv->mdev->dev, qp->qpn,
mdev              119 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
mdev              243 drivers/net/ethernet/mellanox/mlx4/en_rx.c void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
mdev              248 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_dev *dev = mdev->dev;
mdev              253 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					 mlx4_get_eqs_per_port(mdev->dev, i),
mdev              258 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mdev->profile.prof[i].rx_ring_num =
mdev              267 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              301 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
mdev              302 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
mdev              303 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
mdev              433 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              439 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					lockdep_is_held(&mdev->state_lock));
mdev              443 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
mdev              829 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
mdev             1061 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1069 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
mdev             1082 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
mdev             1091 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
mdev             1093 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_remove(mdev->dev, qp);
mdev             1094 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_free(mdev->dev, qp);
mdev             1107 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
mdev             1114 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
mdev             1117 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
mdev             1129 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
mdev             1130 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
mdev             1131 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
mdev             1137 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1153 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
mdev             1187 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
mdev             1208 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (priv->mdev->profile.udp_rss) {
mdev             1213 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
mdev             1232 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
mdev             1240 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
mdev             1242 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
mdev             1243 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_qp_free(mdev->dev, rss_map->indir_qp);
mdev             1249 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
mdev             1251 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
mdev             1252 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
mdev             1254 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
mdev             1260 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev             1265 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
mdev             1268 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
mdev             1269 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_free(mdev->dev, rss_map->indir_qp);
mdev             1275 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
mdev             1277 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
mdev             1278 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
mdev             1280 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
mdev               45 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
mdev              117 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              121 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	err = mlx4_test_async(mdev->dev);
mdev              123 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
mdev              131 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 		err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
mdev              141 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
mdev              152 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
mdev              185 drivers/net/ethernet/mellanox/mlx4/en_selftest.c 		if (priv->mdev->dev->caps.flags &
mdev               54 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev               91 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
mdev               92 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
mdev               93 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
mdev              105 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
mdev              113 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
mdev              120 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
mdev              123 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf.uar = &mdev->priv_uar;
mdev              124 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		ring->bf.uar->map = mdev->uar_map;
mdev              139 drivers/net/ethernet/mellanox/mlx4/en_tx.c 						     priv->mdev->dev->numa_node),
mdev              146 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
mdev              148 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
mdev              164 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              169 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		mlx4_bf_free(mdev->dev, &ring->bf);
mdev              170 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_remove(mdev->dev, &ring->sp_qp);
mdev              171 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_free(mdev->dev, &ring->sp_qp);
mdev              172 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
mdev              173 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
mdev              186 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              199 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	ring->mr_key = cpu_to_be32(mdev->mr.key);
mdev              205 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
mdev              208 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context,
mdev              220 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_en_dev *mdev = priv->mdev;
mdev              222 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state,
mdev              291 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp);
mdev             1162 drivers/net/ethernet/mellanox/mlx4/main.c 	struct mlx4_dev *mdev = info->dev;
mdev             1166 drivers/net/ethernet/mellanox/mlx4/main.c 		(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
mdev             1168 drivers/net/ethernet/mellanox/mlx4/main.c 	if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
mdev             1179 drivers/net/ethernet/mellanox/mlx4/main.c 	struct mlx4_dev *mdev = info->dev;
mdev             1180 drivers/net/ethernet/mellanox/mlx4/main.c 	struct mlx4_priv *priv = mlx4_priv(mdev);
mdev             1186 drivers/net/ethernet/mellanox/mlx4/main.c 	if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
mdev             1187 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_err(mdev,
mdev             1193 drivers/net/ethernet/mellanox/mlx4/main.c 	mlx4_stop_sense(mdev);
mdev             1198 drivers/net/ethernet/mellanox/mlx4/main.c 	mdev->caps.possible_type[info->port] = info->tmp_type;
mdev             1200 drivers/net/ethernet/mellanox/mlx4/main.c 	for (i = 0; i < mdev->caps.num_ports; i++) {
mdev             1202 drivers/net/ethernet/mellanox/mlx4/main.c 					mdev->caps.possible_type[i+1];
mdev             1204 drivers/net/ethernet/mellanox/mlx4/main.c 			types[i] = mdev->caps.port_type[i+1];
mdev             1207 drivers/net/ethernet/mellanox/mlx4/main.c 	if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
mdev             1208 drivers/net/ethernet/mellanox/mlx4/main.c 	    !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
mdev             1209 drivers/net/ethernet/mellanox/mlx4/main.c 		for (i = 1; i <= mdev->caps.num_ports; i++) {
mdev             1210 drivers/net/ethernet/mellanox/mlx4/main.c 			if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev             1211 drivers/net/ethernet/mellanox/mlx4/main.c 				mdev->caps.possible_type[i] = mdev->caps.port_type[i];
mdev             1217 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
mdev             1221 drivers/net/ethernet/mellanox/mlx4/main.c 	mlx4_do_sense_ports(mdev, new_types, types);
mdev             1223 drivers/net/ethernet/mellanox/mlx4/main.c 	err = mlx4_check_port_params(mdev, new_types);
mdev             1230 drivers/net/ethernet/mellanox/mlx4/main.c 	for (i = 0; i < mdev->caps.num_ports; i++)
mdev             1233 drivers/net/ethernet/mellanox/mlx4/main.c 	err = mlx4_change_port_types(mdev, new_types);
mdev             1236 drivers/net/ethernet/mellanox/mlx4/main.c 	mlx4_start_sense(mdev);
mdev             1248 drivers/net/ethernet/mellanox/mlx4/main.c 	struct mlx4_dev *mdev = info->dev;
mdev             1262 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_err(mdev, "%s is not supported port type\n", buf);
mdev             1313 drivers/net/ethernet/mellanox/mlx4/main.c 	struct mlx4_dev *mdev = info->dev;
mdev             1315 drivers/net/ethernet/mellanox/mlx4/main.c 	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
mdev             1316 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
mdev             1319 drivers/net/ethernet/mellanox/mlx4/main.c 			ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
mdev             1329 drivers/net/ethernet/mellanox/mlx4/main.c 	struct mlx4_dev *mdev = info->dev;
mdev             1330 drivers/net/ethernet/mellanox/mlx4/main.c 	struct mlx4_priv *priv = mlx4_priv(mdev);
mdev             1333 drivers/net/ethernet/mellanox/mlx4/main.c 	if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
mdev             1334 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
mdev             1343 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
mdev             1347 drivers/net/ethernet/mellanox/mlx4/main.c 	mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
mdev             1349 drivers/net/ethernet/mellanox/mlx4/main.c 	mlx4_stop_sense(mdev);
mdev             1351 drivers/net/ethernet/mellanox/mlx4/main.c 	mlx4_unregister_device(mdev);
mdev             1352 drivers/net/ethernet/mellanox/mlx4/main.c 	for (port = 1; port <= mdev->caps.num_ports; port++) {
mdev             1353 drivers/net/ethernet/mellanox/mlx4/main.c 		mlx4_CLOSE_PORT(mdev, port);
mdev             1354 drivers/net/ethernet/mellanox/mlx4/main.c 		err = mlx4_SET_PORT(mdev, port, -1);
mdev             1356 drivers/net/ethernet/mellanox/mlx4/main.c 			mlx4_err(mdev, "Failed to set port %d, aborting\n",
mdev             1361 drivers/net/ethernet/mellanox/mlx4/main.c 	err = mlx4_register_device(mdev);
mdev             1364 drivers/net/ethernet/mellanox/mlx4/main.c 	mlx4_start_sense(mdev);
mdev              217 drivers/net/ethernet/mellanox/mlx4/mlx4.h #define mlx4_dbg(mdev, format, ...)					\
mdev              221 drivers/net/ethernet/mellanox/mlx4/mlx4.h 			   &(mdev)->persist->pdev->dev, format,		\
mdev              225 drivers/net/ethernet/mellanox/mlx4/mlx4.h #define mlx4_err(mdev, format, ...)					\
mdev              226 drivers/net/ethernet/mellanox/mlx4/mlx4.h 	dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
mdev              227 drivers/net/ethernet/mellanox/mlx4/mlx4.h #define mlx4_info(mdev, format, ...)					\
mdev              228 drivers/net/ethernet/mellanox/mlx4/mlx4.h 	dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
mdev              229 drivers/net/ethernet/mellanox/mlx4/mlx4.h #define mlx4_warn(mdev, format, ...)					\
mdev              230 drivers/net/ethernet/mellanox/mlx4/mlx4.h 	dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
mdev              534 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 	struct mlx4_en_dev *mdev;
mdev              672 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
mdev              724 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
mdev              768 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
mdev              769 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
mdev              785 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
mdev              804 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
mdev              807 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
mdev              808 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
mdev              836 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h #define mlx4_err(mdev, format, ...)					\
mdev              838 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 	       dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
mdev              839 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h #define mlx4_info(mdev, format, ...)					\
mdev              841 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 		dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
mdev              842 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h #define mlx4_warn(mdev, format, ...)					\
mdev              844 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h 		dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
mdev               42 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
mdev               44 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c 	return mlx5_fpga_ipsec_device_caps(mdev);
mdev               48 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
mdev               50 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c 	return mlx5_fpga_ipsec_counters_count(mdev);
mdev               53 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
mdev               56 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c 	return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
mdev               59 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
mdev               65 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c 	return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
mdev               74 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
mdev               76 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c 	return mlx5_fpga_ipsec_init(mdev);
mdev               84 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
mdev               86 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c 	mlx5_fpga_ipsec_cleanup(mdev);
mdev               90 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
mdev               96 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c 	xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags);
mdev              100 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c 	xfrm->mdev = mdev;
mdev               42 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
mdev               45 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
mdev               46 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
mdev               49 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
mdev               56 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
mdev               58 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h #define MLX5_IPSEC_DEV(mdev) false
mdev               65 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
mdev               87 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
mdev               43 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
mdev               48 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
mdev               53 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
mdev               56 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
mdev               59 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
mdev               65 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
mdev               67 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	return mlx5_fpga_is_tls_device(mdev) ||
mdev               68 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 		mlx5_accel_is_ktls_device(mdev);
mdev               71 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	return mlx5_fpga_tls_device_caps(mdev);
mdev               76 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	return mlx5_fpga_tls_init(mdev);
mdev               81 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
mdev               83 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	mlx5_fpga_tls_cleanup(mdev);
mdev               88 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
mdev              116 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	return mlx5_create_encryption_key(mdev, key, sz_bytes, p_key_id);
mdev              119 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
mdev              121 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c 	mlx5_destroy_encryption_key(mdev, key_id);
mdev               41 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
mdev               44 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
mdev               46 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
mdev               48 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h 	if (!MLX5_CAP_GEN(mdev, tls_tx))
mdev               51 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h 	if (!MLX5_CAP_GEN(mdev, log_max_dek))
mdev               54 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h 	return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
mdev               57 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
mdev               63 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h 			return MLX5_CAP_TLS(mdev,  tls_1_2_aes_gcm_128);
mdev               71 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
mdev               75 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
mdev               80 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
mdev              106 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
mdev              110 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
mdev              112 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
mdev              114 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
mdev              115 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
mdev              116 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
mdev              117 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
mdev              122 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
mdev              126 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
mdev              128 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle,
mdev              130 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
mdev              132 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h 	return mlx5_accel_is_ktls_device(mdev);
mdev              134 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
mdev              135 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
mdev              136 drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
mdev              252 drivers/net/ethernet/mellanox/mlx5/core/dev.c static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol)
mdev              260 drivers/net/ethernet/mellanox/mlx5/core/dev.c 			dev_ctx = mlx5_get_device(intf, &mdev->priv);
mdev              270 drivers/net/ethernet/mellanox/mlx5/core/dev.c void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
mdev              273 drivers/net/ethernet/mellanox/mlx5/core/dev.c 	if (mlx5_has_added_dev_by_protocol(mdev, protocol)) {
mdev              274 drivers/net/ethernet/mellanox/mlx5/core/dev.c 		mlx5_remove_dev_by_protocol(mdev, protocol);
mdev              275 drivers/net/ethernet/mellanox/mlx5/core/dev.c 		mlx5_add_dev_by_protocol(mdev, protocol);
mdev               82 drivers/net/ethernet/mellanox/mlx5/core/en.h #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
mdev               83 drivers/net/ethernet/mellanox/mlx5/core/en.h 	(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
mdev               84 drivers/net/ethernet/mellanox/mlx5/core/en.h #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
mdev               85 drivers/net/ethernet/mellanox/mlx5/core/en.h 	max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
mdev               86 drivers/net/ethernet/mellanox/mlx5/core/en.h #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
mdev               87 drivers/net/ethernet/mellanox/mlx5/core/en.h 	MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
mdev              166 drivers/net/ethernet/mellanox/mlx5/core/en.h static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
mdev              168 drivers/net/ethernet/mellanox/mlx5/core/en.h 	if (mlx5_lag_is_lacp_owner(mdev))
mdev              171 drivers/net/ethernet/mellanox/mlx5/core/en.h 	return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
mdev              187 drivers/net/ethernet/mellanox/mlx5/core/en.h static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
mdev              191 drivers/net/ethernet/mellanox/mlx5/core/en.h 		min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
mdev              329 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct mlx5_core_dev      *mdev;
mdev              695 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct mlx5_core_dev  *mdev;
mdev              737 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct mlx5_core_dev      *mdev;
mdev              852 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct mlx5_core_dev      *mdev;
mdev              884 drivers/net/ethernet/mellanox/mlx5/core/en.h 	int	(*init)(struct mlx5_core_dev *mdev,
mdev              941 drivers/net/ethernet/mellanox/mlx5/core/en.h bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
mdev              942 drivers/net/ethernet/mellanox/mlx5/core/en.h bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
mdev             1057 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
mdev             1058 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
mdev             1068 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
mdev             1073 drivers/net/ethernet/mellanox/mlx5/core/en.h static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
mdev             1075 drivers/net/ethernet/mellanox/mlx5/core/en.h 	return MLX5_CAP_ETH(mdev, swp) &&
mdev             1076 drivers/net/ethernet/mellanox/mlx5/core/en.h 		MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
mdev             1088 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_create_tir(struct mlx5_core_dev *mdev,
mdev             1090 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
mdev             1092 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
mdev             1093 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
mdev             1114 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
mdev             1115 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
mdev             1171 drivers/net/ethernet/mellanox/mlx5/core/en.h 		      struct mlx5_core_dev *mdev,
mdev             1176 drivers/net/ethernet/mellanox/mlx5/core/en.h mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
mdev             1182 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mdev             1187 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
mdev              105 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
mdev              257 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type);
mdev              258 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev);
mdev               45 drivers/net/ethernet/mellanox/mlx5/core/en/health.c 	err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out));
mdev              132 drivers/net/ethernet/mellanox/mlx5/core/en/health.c 	struct mlx5_core_dev *mdev = channel->mdev;
mdev              140 drivers/net/ethernet/mellanox/mlx5/core/en/health.c 	err = mlx5e_modify_sq(mdev, sqn, &msp);
mdev              150 drivers/net/ethernet/mellanox/mlx5/core/en/health.c 	err = mlx5e_modify_sq(mdev, sqn, &msp);
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 		mlx5_core_err(priv->mdev,
mdev              132 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 	agent = mlx5_hv_vhca_agent_create(priv->mdev->hv_vhca,
mdev               25 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev               27 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
mdev               29 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
mdev               30 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	    MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) <
mdev               33 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) <
mdev               46 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
mdev              116 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              117 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
mdev              118 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	int num_q_counters      = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
mdev              119 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	int num_ppcnt_counters  = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
mdev              120 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 				  MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
mdev              139 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              161 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c 	mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
mdev               89 drivers/net/ethernet/mellanox/mlx5/core/en/params.c bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
mdev              103 drivers/net/ethernet/mellanox/mlx5/core/en/params.c 	if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
mdev              126 drivers/net/ethernet/mellanox/mlx5/core/en/params.c u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
mdev              130 drivers/net/ethernet/mellanox/mlx5/core/en/params.c 	if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
mdev              133 drivers/net/ethernet/mellanox/mlx5/core/en/params.c 	return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
mdev              136 drivers/net/ethernet/mellanox/mlx5/core/en/params.c u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
mdev              141 drivers/net/ethernet/mellanox/mlx5/core/en/params.c 		mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
mdev              144 drivers/net/ethernet/mellanox/mlx5/core/en/params.c u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
mdev              150 drivers/net/ethernet/mellanox/mlx5/core/en/params.c 		mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
mdev               87 drivers/net/ethernet/mellanox/mlx5/core/en/params.h bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
mdev               92 drivers/net/ethernet/mellanox/mlx5/core/en/params.h u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
mdev               95 drivers/net/ethernet/mellanox/mlx5/core/en/params.h u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
mdev               98 drivers/net/ethernet/mellanox/mlx5/core/en/params.h u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
mdev               81 drivers/net/ethernet/mellanox/mlx5/core/en/port.c static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
mdev               85 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
mdev              157 drivers/net/ethernet/mellanox/mlx5/core/en/port.c u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
mdev              166 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
mdev              173 drivers/net/ethernet/mellanox/mlx5/core/en/port.c int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
mdev              180 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
mdev              181 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
mdev              186 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 		err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto);
mdev              190 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	*speed = mlx5e_port_ptys2speed(mdev, eproto.oper, force_legacy);
mdev              198 drivers/net/ethernet/mellanox/mlx5/core/en/port.c int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
mdev              208 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
mdev              209 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
mdev              213 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
mdev              222 drivers/net/ethernet/mellanox/mlx5/core/en/port.c u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
mdev              230 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
mdev              238 drivers/net/ethernet/mellanox/mlx5/core/en/port.c int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out)
mdev              249 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PBMC, 0, 0);
mdev              255 drivers/net/ethernet/mellanox/mlx5/core/en/port.c int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in)
mdev              266 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PBMC, 0, 1);
mdev              273 drivers/net/ethernet/mellanox/mlx5/core/en/port.c int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer)
mdev              290 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 0);
mdev              297 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 		mlx5_core_dbg(mdev, "prio %d, buffer %d\n", prio, buffer[prio]);
mdev              305 drivers/net/ethernet/mellanox/mlx5/core/en/port.c int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer)
mdev              323 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 0);
mdev              338 drivers/net/ethernet/mellanox/mlx5/core/en/port.c 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 1);
mdev               51 drivers/net/ethernet/mellanox/mlx5/core/en/port.h u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
mdev               53 drivers/net/ethernet/mellanox/mlx5/core/en/port.h int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
mdev               54 drivers/net/ethernet/mellanox/mlx5/core/en/port.h int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
mdev               55 drivers/net/ethernet/mellanox/mlx5/core/en/port.h u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
mdev               58 drivers/net/ethernet/mellanox/mlx5/core/en/port.h int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
mdev               59 drivers/net/ethernet/mellanox/mlx5/core/en/port.h int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
mdev               60 drivers/net/ethernet/mellanox/mlx5/core/en/port.h int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
mdev               61 drivers/net/ethernet/mellanox/mlx5/core/en/port.h int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
mdev               37 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev               49 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 	err = mlx5e_port_query_pbmc(mdev, out);
mdev               91 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              102 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 	err = mlx5e_port_query_pbmc(mdev, in);
mdev              119 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 	err = mlx5e_port_set_pbmc(mdev, in);
mdev              134 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 	err = mlx5e_port_linkspeed(priv->mdev, &speed);
mdev              238 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
mdev              243 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 	err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
mdev              253 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 		err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
mdev              291 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 		err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
mdev              303 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 		err = fill_pfc_en(priv->mdev, &curr_pfc_en);
mdev              354 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c 		err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
mdev               42 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
mdev               43 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h 				     MLX5_CAP_PCAM_REG(mdev, pbmc) && \
mdev               44 drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h 				     MLX5_CAP_PCAM_REG(mdev, pptb))
mdev               58 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5_core_dev *mdev;
mdev               67 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mdev = icosq->channel->mdev;
mdev               69 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
mdev              139 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5_core_dev *mdev;
mdev              146 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mdev = rq->mdev;
mdev              148 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_query_rq_state(mdev, rq->rqn, &state);
mdev              248 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
mdev              252 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state);
mdev              319 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
mdev              382 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct devlink *devlink = priv_to_devlink(priv->mdev);
mdev               36 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c 	struct mlx5_core_dev *mdev;
mdev               43 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c 	mdev = sq->channel->mdev;
mdev               49 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c 	err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
mdev              156 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c 	err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
mdev              287 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              290 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c 	devlink = priv_to_devlink(mdev);
mdev               29 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev               48 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 			  mlx5_lag_is_sriov(priv->mdev));
mdev               84 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev               88 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	if (mlx5_lag_is_multipath(mdev)) {
mdev               89 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		struct mlx5_eswitch *esw = mdev->priv.eswitch;
mdev              100 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
mdev              212 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
mdev              240 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
mdev              303 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
mdev              331 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
mdev              359 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 		mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
mdev              422 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
mdev               12 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	return !!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_GENEVE);
mdev              143 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ft_field_support.outer_geneve_vni)) {
mdev              159 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	u8 max_tlv_option_data_len = MLX5_CAP_GEN(priv->mdev, max_geneve_tlv_option_data_len);
mdev              160 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	u8 max_tlv_options = MLX5_CAP_GEN(priv->mdev, max_geneve_tlv_options);
mdev              180 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
mdev              200 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
mdev              249 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	res = mlx5_geneve_tlv_option_add(priv->mdev->geneve, option_key);
mdev              288 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ft_field_support.outer_geneve_oam)) {
mdev              298 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c 	if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
mdev                9 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c 	return !!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap);
mdev               10 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 	return !!MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap);
mdev               32 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 	if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan,
mdev               69 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 	if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
mdev              125 drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c 	if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
mdev               15 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 			      struct mlx5_core_dev *mdev)
mdev               31 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 		return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
mdev               72 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 	if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
mdev               13 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h 			      struct mlx5_core_dev *mdev);
mdev               12 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	struct device *dev = priv->mdev->device;
mdev               39 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	struct device *dev = priv->mdev->device;
mdev              172 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c 	if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
mdev               45 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
mdev               47 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h 	return mlx5_tx_swp_supported(mdev);
mdev               88 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
mdev              224 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	    !(mlx5_accel_ipsec_device_caps(priv->mdev) &
mdev              273 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	    !(mlx5_accel_ipsec_device_caps(priv->mdev) &
mdev              326 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 		mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
mdev              344 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 			mlx5_accel_esp_create_hw_context(priv->mdev,
mdev              403 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	if (!MLX5_IPSEC_DEV(priv->mdev)) {
mdev              417 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
mdev              515 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              521 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
mdev              522 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	    !MLX5_CAP_ETH(mdev, swp)) {
mdev              523 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 		mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
mdev              527 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
mdev              532 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	if (!MLX5_CAP_ETH(mdev, swp_csum)) {
mdev              533 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 		mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
mdev              540 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
mdev              541 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	    !MLX5_CAP_ETH(mdev, swp_lso)) {
mdev              542 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 		mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
mdev              546 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
mdev              111 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c 	ret = mlx5_accel_ipsec_counters_read(priv->mdev, (u64 *)&priv->ipsec->stats,
mdev                7 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
mdev               16 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	return mlx5e_create_tis(mdev, in, tisn);
mdev               27 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev               33 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
mdev               45 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	err = mlx5e_ktls_create_tis(mdev, &tx_priv->tisn);
mdev               49 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	err = mlx5_ktls_create_key(mdev, crypto_info, &tx_priv->key_id);
mdev               58 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
mdev               72 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
mdev               86 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	if (!mlx5_accel_is_ktls_device(priv->mdev))
mdev              109 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              110 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	u32 caps = mlx5_accel_tls_device_caps(mdev);
mdev              123 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
mdev              159 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	mlx5_accel_tls_del_flow(priv->mdev, handle,
mdev              178 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
mdev              195 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	if (mlx5_accel_is_ktls_device(priv->mdev)) {
mdev              200 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	if (!mlx5_accel_is_tls_device(priv->mdev))
mdev              203 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	caps = mlx5_accel_tls_device_caps(priv->mdev);
mdev              101 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h 	if (mlx5_accel_is_ktls_device(priv->mdev))
mdev              272 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
mdev               39 drivers/net/ethernet/mellanox/mlx5/core/en_common.c int mlx5e_create_tir(struct mlx5_core_dev *mdev,
mdev               44 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
mdev               48 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mutex_lock(&mdev->mlx5e_res.td.list_lock);
mdev               49 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
mdev               50 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
mdev               55 drivers/net/ethernet/mellanox/mlx5/core/en_common.c void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
mdev               58 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mutex_lock(&mdev->mlx5e_res.td.list_lock);
mdev               59 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mlx5_core_destroy_tir(mdev, tir->tirn);
mdev               61 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
mdev               64 drivers/net/ethernet/mellanox/mlx5/core/en_common.c static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
mdev               85 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
mdev               91 drivers/net/ethernet/mellanox/mlx5/core/en_common.c int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
mdev               93 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	struct mlx5e_resources *res = &mdev->mlx5e_res;
mdev               96 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	err = mlx5_core_alloc_pd(mdev, &res->pdn);
mdev               98 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 		mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
mdev              102 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
mdev              104 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 		mlx5_core_err(mdev, "alloc td failed, %d\n", err);
mdev              108 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey);
mdev              110 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 		mlx5_core_err(mdev, "create mkey failed, %d\n", err);
mdev              114 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
mdev              116 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 		mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
mdev              120 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
mdev              121 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mutex_init(&mdev->mlx5e_res.td.list_lock);
mdev              126 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mlx5_core_destroy_mkey(mdev, &res->mkey);
mdev              128 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mdev              130 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mlx5_core_dealloc_pd(mdev, res->pdn);
mdev              134 drivers/net/ethernet/mellanox/mlx5/core/en_common.c void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mdev              136 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	struct mlx5e_resources *res = &mdev->mlx5e_res;
mdev              138 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mlx5_free_bfreg(mdev, &res->bfreg);
mdev              139 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mlx5_core_destroy_mkey(mdev, &res->mkey);
mdev              140 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mdev              141 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mlx5_core_dealloc_pd(mdev, res->pdn);
mdev              147 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              167 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mutex_lock(&mdev->mlx5e_res.td.list_lock);
mdev              168 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
mdev              170 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 		err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
mdev              179 drivers/net/ethernet/mellanox/mlx5/core/en_common.c 	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
mdev               52 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg)  && \
mdev               53 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 				   MLX5_CAP_QCAM_REG(mdev, qpts) && \
mdev               54 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 				   MLX5_CAP_QCAM_REG(mdev, qpdpm))
mdev               64 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev               68 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_query_port_dcbx_param(mdev, param);
mdev               76 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	return mlx5_set_port_dcbx_param(mdev, param);
mdev               84 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(priv->mdev, dcbx))
mdev              102 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              109 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(priv->mdev, ets))
mdev              112 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
mdev              114 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
mdev              118 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
mdev              122 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
mdev              243 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              246 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	int max_tc = mlx5_max_tc(mdev);
mdev              252 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
mdev              256 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_set_port_tc_group(mdev, tc_group);
mdev              260 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
mdev              318 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(priv->mdev, ets))
mdev              336 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              340 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
mdev              346 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (MLX5_BUFFER_SUPPORTED(mdev))
mdev              349 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
mdev              356 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              364 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
mdev              366 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
mdev              369 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		mlx5_toggle_port_link(mdev);
mdev              380 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (MLX5_BUFFER_SUPPORTED(mdev)) {
mdev              414 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
mdev              446 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
mdev              447 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	    !MLX5_DSCP_SUPPORTED(priv->mdev))
mdev              499 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if  (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
mdev              500 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	     !MLX5_DSCP_SUPPORTED(priv->mdev))
mdev              542 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              548 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
mdev              554 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
mdev              577 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              586 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
mdev              608 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
mdev              615 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              621 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(mdev, ets))
mdev              651 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
mdev              683 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	mlx5_query_mac_address(priv->mdev, perm_addr);
mdev              728 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              730 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(priv->mdev, ets)) {
mdev              745 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (mlx5_query_port_prio_tc(mdev, priority, pgid))
mdev              818 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              832 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		*cap = 1 << mlx5_max_tc(mdev);
mdev              835 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		*cap = 1 << mlx5_max_tc(mdev);
mdev              861 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              866 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		*num = mlx5_max_tc(mdev) + 1;
mdev              900 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              905 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_BUFFER_SUPPORTED(mdev))
mdev              908 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5e_port_query_priority2buffer(mdev, buffer);
mdev              930 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              938 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_BUFFER_SUPPORTED(mdev))
mdev              942 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
mdev              945 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 		mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
mdev              947 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
mdev             1019 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!mlx5_query_port_dcbx_param(priv->mdev, out))
mdev             1035 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(priv->mdev, ets))
mdev             1039 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
mdev             1068 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
mdev             1071 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_DSCP_SUPPORTED(priv->mdev))
mdev             1104 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	mlx5_query_min_inline(priv->mdev, &params->tx_min_inline_mode);
mdev             1139 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_set_trust_state(priv->mdev, trust_state);
mdev             1152 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
mdev             1162 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1167 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_DSCP_SUPPORTED(mdev))
mdev             1170 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
mdev             1176 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
mdev             1189 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (!MLX5_CAP_GEN(priv->mdev, qos))
mdev             1192 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c 	if (MLX5_CAP_GEN(priv->mdev, dcbx))
mdev               38 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c 			struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
mdev               40 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c 	mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts);
mdev               51 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c 	mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
mdev               61 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c 	mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
mdev               41 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev               48 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 		 fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
mdev               49 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 		 mdev->board_id);
mdev               50 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	strlcpy(drvinfo->bus_info, dev_name(mdev->device),
mdev              199 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
mdev              203 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
mdev              486 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
mdev              516 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              524 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 			mlx5_core_modify_cq_moderation(mdev,
mdev              530 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 		mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
mdev              540 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              545 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
mdev              605 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
mdev              614 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
mdev              792 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
mdev              805 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
mdev              809 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
mdev              870 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
mdev              874 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
mdev              882 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              897 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
mdev              903 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
mdev              928 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
mdev              933 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	get_supported(mdev, eth_proto_cap, link_ksettings);
mdev              945 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
mdev              956 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = get_fec_supported_advertised(mdev, link_ksettings);
mdev             1034 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1055 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
mdev             1062 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
mdev             1069 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 		mlx5e_port_speed2linkmodes(mdev, speed, !ext);
mdev             1087 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
mdev             1097 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
mdev             1098 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	mlx5_toggle_port_link(mdev);
mdev             1229 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1231 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
mdev             1232 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	    !MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
mdev             1235 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	return mlx5_query_port_stall_watermark(mdev, pfc_prevention_tout, NULL);
mdev             1242 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1246 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
mdev             1247 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	    !MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
mdev             1264 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	return mlx5_set_port_stall_watermark(mdev, critical_tout,
mdev             1311 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1314 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
mdev             1333 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1336 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
mdev             1342 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = mlx5_set_port_pause(mdev,
mdev             1364 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1366 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	info->phc_index = mlx5_clock_get_ptp_index(mdev);
mdev             1368 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
mdev             1393 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
mdev             1397 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (MLX5_CAP_GEN(mdev, wol_g))
mdev             1400 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (MLX5_CAP_GEN(mdev, wol_s))
mdev             1403 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (MLX5_CAP_GEN(mdev, wol_a))
mdev             1406 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (MLX5_CAP_GEN(mdev, wol_b))
mdev             1409 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (MLX5_CAP_GEN(mdev, wol_m))
mdev             1412 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (MLX5_CAP_GEN(mdev, wol_u))
mdev             1415 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (MLX5_CAP_GEN(mdev, wol_p))
mdev             1481 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1487 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	wol->supported = mlx5e_get_wol_supported(mdev);
mdev             1491 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = mlx5_query_port_wol(mdev, &mlx5_wol_mode);
mdev             1501 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1502 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	__u32 wol_supported = mlx5e_get_wol_supported(mdev);
mdev             1513 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	return mlx5_set_port_wol(mdev, mlx5_wol_mode);
mdev             1520 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1525 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
mdev             1546 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1562 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = mlx5e_set_fec_mode(mdev, fec_policy);
mdev             1567 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	mlx5_toggle_port_link(mdev);
mdev             1586 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1589 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_GEN(mdev, beacon_led))
mdev             1603 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	return mlx5_set_port_beacon(mdev, beacon_duration);
mdev             1610 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *dev = priv->mdev;
mdev             1653 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1664 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 		size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
mdev             1687 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1702 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	err = mlx5_firmware_flash(mdev, fw, NULL);
mdev             1722 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1736 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
mdev             1772 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
mdev             1801 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1803 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (!MLX5_CAP_GEN(mdev, cqe_compression))
mdev             1820 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1824 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 		if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
mdev             1826 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 		if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
mdev             1836 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	mlx5e_set_rq_type(mdev, &new_channels.params);
mdev             1871 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1875 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 	if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
mdev              124 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
mdev              144 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
mdev              440 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 			l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
mdev              448 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 			l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
mdev              520 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
mdev              521 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
mdev              543 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
mdev              558 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
mdev              769 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type)
mdev              773 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
mdev              776 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip);
mdev              782 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
mdev              787 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto))
mdev              793 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
mdev              795 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	return (mlx5e_any_tunnel_proto_supported(mdev) &&
mdev              796 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
mdev              817 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
mdev              881 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
mdev              888 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 		if (!mlx5e_tunnel_proto_supported(priv->mdev,
mdev             1143 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
mdev             1171 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
mdev             1188 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
mdev             1527 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
mdev               97 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 	ns = mlx5_get_flow_namespace(priv->mdev,
mdev              102 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
mdev               68 drivers/net/ethernet/mellanox/mlx5/core/en_main.c bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
mdev               70 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
mdev               71 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
mdev               72 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		MLX5_CAP_ETH(mdev, reg_umr_sq);
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
mdev               79 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
mdev               86 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
mdev               93 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
mdev               98 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		       BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
mdev              102 drivers/net/ethernet/mellanox/mlx5/core/en_main.c bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
mdev              105 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
mdev              108 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_IPSEC_DEV(mdev))
mdev              116 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
mdev              123 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
mdev              125 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
mdev              133 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              136 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	port_state = mlx5_query_vport_state(mdev,
mdev              224 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_notifier_register(priv->mdev, &priv->events_nb);
mdev              229 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
mdev              267 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
mdev              289 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
mdev              295 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
mdev              301 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
mdev              305 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
mdev              381 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = c->mdev;
mdev              397 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->clock   = &mdev->clock;
mdev              400 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->mdev    = mdev;
mdev              426 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
mdev              432 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
mdev              443 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
mdev              453 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (MLX5_IPSEC_DEV(mdev)) {
mdev              467 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
mdev              471 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
mdev              473 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
mdev              475 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_create_rq_umr_mkey(mdev, rq);
mdev              485 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
mdev              537 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n",
mdev              624 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
mdev              651 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
mdev              677 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = rq->mdev;
mdev              705 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
mdev              714 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = rq->mdev;
mdev              734 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
mdev              745 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              765 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
mdev              775 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = c->mdev;
mdev              794 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
mdev              803 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_destroy_rq(rq->mdev, rq->rqn);
mdev              900 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
mdev              996 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = c->mdev;
mdev             1003 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
mdev             1015 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
mdev             1069 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = c->mdev;
mdev             1074 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
mdev             1077 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
mdev             1138 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = c->mdev;
mdev             1144 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	sq->clock     = &mdev->clock;
mdev             1149 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
mdev             1155 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
mdev             1157 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_IPSEC_DEV(c->priv->mdev))
mdev             1160 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (mlx5_accel_is_tls_device(c->priv->mdev)) {
mdev             1169 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
mdev             1203 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
mdev             1228 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
mdev             1235 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.bfreg.index);
mdev             1243 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_create_sq(mdev, in, inlen, sqn);
mdev             1250 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
mdev             1272 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
mdev             1279 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
mdev             1281 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_destroy_sq(mdev, sqn);
mdev             1284 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
mdev             1292 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_create_sq(mdev, param, csp, sqn);
mdev             1298 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_modify_sq(mdev, *sqn, &msp);
mdev             1300 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_destroy_sq(mdev, *sqn);
mdev             1329 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
mdev             1392 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = c->mdev;
mdev             1397 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_destroy_sq(mdev, sq->sqn);
mdev             1400 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_rl_remove_rate(mdev, &rl);
mdev             1427 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
mdev             1456 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_destroy_sq(c->mdev, sq->sqn);
mdev             1477 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
mdev             1528 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_destroy_sq(c->mdev, sq->sqn);
mdev             1533 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
mdev             1543 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
mdev             1547 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
mdev             1568 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	cq->mdev = mdev;
mdev             1577 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = c->priv->mdev;
mdev             1584 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_cq_common(mdev, param, cq);
mdev             1600 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = cq->mdev;
mdev             1610 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
mdev             1629 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
mdev             1634 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
mdev             1648 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
mdev             1654 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = c->mdev;
mdev             1665 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_CAP_GEN(mdev, cq_moderation))
mdev             1666 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
mdev             1748 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1761 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_rl_remove_rate(mdev, &rl);
mdev             1768 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
mdev             1780 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
mdev             1786 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5_rl_remove_rate(mdev, &rl);
mdev             1797 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1801 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!mlx5_rl_is_supported(mdev)) {
mdev             1810 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
mdev             1828 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
mdev             1835 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
mdev             1955 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
mdev             1957 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
mdev             1959 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
mdev             1968 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
mdev             1976 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
mdev             1985 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	c->mdev     = priv->mdev;
mdev             1989 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	c->pdev     = priv->mdev->device;
mdev             1991 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
mdev             1996 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
mdev             2072 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
mdev             2083 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_IPSEC_DEV(mdev))
mdev             2153 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             2161 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
mdev             2164 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
mdev             2170 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
mdev             2178 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.pdn);
mdev             2183 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.buf_numa_node = dev_to_node(mdev->device);
mdev             2189 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             2198 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.buf_numa_node = dev_to_node(mdev->device);
mdev             2208 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
mdev             2210 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
mdev             2221 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
mdev             2222 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		    !!MLX5_IPSEC_DEV(priv->mdev);
mdev             2233 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
mdev             2234 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
mdev             2243 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             2250 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
mdev             2301 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
mdev             2437 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             2457 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
mdev             2468 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
mdev             2478 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
mdev             2496 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err);
mdev             2555 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             2571 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
mdev             2746 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             2758 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
mdev             2770 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
mdev             2777 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             2797 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
mdev             2804 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
mdev             2816 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
mdev             2822 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
mdev             2827 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
mdev             2831 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
mdev             2837 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
mdev             2839 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
mdev             2848 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             2852 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
mdev             2856 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_query_mtu(mdev, params, &mtu);
mdev             2869 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev  = priv->mdev;
mdev             2875 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
mdev             3073 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mdev             3076 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (mlx5_vxlan_allowed(priv->mdev->vxlan))
mdev             3110 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
mdev             3117 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
mdev             3127 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
mdev             3135 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->mdev = mdev;
mdev             3140 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
mdev             3144 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.buf_numa_node = dev_to_node(mdev->device);
mdev             3145 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	param->wq.db_numa_node  = dev_to_node(mdev->device);
mdev             3147 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5e_alloc_cq_common(mdev, param, cq);
mdev             3153 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             3161 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
mdev             3169 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
mdev             3179 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
mdev             3203 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
mdev             3207 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
mdev             3210 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.pdn);
mdev             3212 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (mlx5_lag_is_lacp_owner(mdev))
mdev             3215 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
mdev             3218 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
mdev             3220 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_destroy_tis(mdev, tisn);
mdev             3227 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
mdev             3229 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
mdev             3232 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
mdev             3234 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
mdev             3242 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
mdev             3251 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			if (mlx5e_lag_should_assign_affinity(priv->mdev))
mdev             3254 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
mdev             3265 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
mdev             3280 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mdev             3333 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
mdev             3335 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
mdev             3340 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
mdev             3348 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
mdev             3350 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
mdev             3362 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
mdev             3365 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
mdev             3391 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
mdev             3399 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err);
mdev             3401 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_destroy_tir(priv->mdev, &tirs[ix]);
mdev             3414 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
mdev             3421 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
mdev             3429 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_destroy_tir(priv->mdev, &tirs[i]);
mdev             3661 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             3689 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
mdev             3690 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		    mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
mdev             3736 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             3738 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_set_port_fcs(mdev, !enable);
mdev             3884 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				   struct mlx5_core_dev *mdev)
mdev             3897 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
mdev             3947 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				    &new_channels.params, priv->mdev)) {
mdev             3953 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
mdev             3995 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
mdev             3996 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    (mlx5_clock_get_ptp_index(priv->mdev) == -1))
mdev             4063 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
mdev             4087 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4089 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
mdev             4096 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4101 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
mdev             4108 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4110 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
mdev             4116 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4118 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
mdev             4125 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4127 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
mdev             4157 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4159 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
mdev             4167 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4170 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
mdev             4181 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4183 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
mdev             4202 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_vxlan_add_port(priv->mdev->vxlan, port);
mdev             4216 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_vxlan_del_port(priv->mdev->vxlan, port);
mdev             4246 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
mdev             4259 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
mdev             4290 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
mdev             4298 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
mdev             4303 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
mdev             4393 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_IPSEC_DEV(priv->mdev)) {
mdev             4450 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_set_rq_type(priv->mdev, &new_channels.params);
mdev             4467 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
mdev             4544 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4548 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
mdev             4561 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4589 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
mdev             4635 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
mdev             4637 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
mdev             4639 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
mdev             4640 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
mdev             4641 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    !MLX5_CAP_ETH(mdev, csum_cap) ||
mdev             4642 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    !MLX5_CAP_ETH(mdev, max_lso_cap) ||
mdev             4643 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    !MLX5_CAP_ETH(mdev, vlan_cap) ||
mdev             4644 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
mdev             4645 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    MLX5_CAP_FLOWTABLE(mdev,
mdev             4648 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(mdev,
mdev             4652 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mdev             4653 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
mdev             4654 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
mdev             4655 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(mdev, "CQ moderation is not supported\n");
mdev             4669 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
mdev             4674 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_port_max_linkspeed(mdev, &link_speed);
mdev             4675 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
mdev             4676 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
mdev             4748 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
mdev             4754 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
mdev             4757 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
mdev             4760 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
mdev             4770 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!slow_pci_heuristic(mdev) &&
mdev             4771 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    mlx5e_striding_rq_possible(mdev, params) &&
mdev             4772 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
mdev             4775 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_set_rq_type(mdev, params);
mdev             4776 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_init_rq_type_params(mdev, params);
mdev             4794 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mdev             4814 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
mdev             4818 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_CAP_GEN(mdev, cqe_compression) &&
mdev             4819 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    MLX5_CAP_GEN(mdev, vport_group_manager))
mdev             4820 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
mdev             4826 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_build_rq_params(mdev, params);
mdev             4833 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
mdev             4834 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			params->lro_en = !slow_pci_heuristic(mdev);
mdev             4836 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
mdev             4839 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
mdev             4842 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mdev             4843 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mdev             4848 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
mdev             4853 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_tunnel_inner_ft_supported(mdev);
mdev             4863 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
mdev             4865 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
mdev             4867 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
mdev             4874 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             4878 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	SET_NETDEV_DEV(netdev, mdev->device);
mdev             4883 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
mdev             4907 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
mdev             4908 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    mlx5e_check_fragmented_striding_rq_cap(mdev))
mdev             4917 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
mdev             4918 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    mlx5e_any_tunnel_proto_supported(mdev)) {
mdev             4925 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
mdev             4933 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
mdev             4942 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) {
mdev             4956 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
mdev             4961 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_CAP_ETH(mdev, scatter_fcs))
mdev             4978 drivers/net/ethernet/mellanox/mlx5/core/en_main.c #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
mdev             5003 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             5006 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
mdev             5008 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
mdev             5012 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
mdev             5014 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
mdev             5022 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
mdev             5025 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
mdev             5028 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mdev             5037 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
mdev             5041 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
mdev             5048 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
mdev             5051 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mdev             5068 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             5075 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
mdev             5105 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
mdev             5156 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
mdev             5169 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             5175 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
mdev             5180 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_lag_add(mdev, netdev);
mdev             5204 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             5224 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_lag_remove(mdev);
mdev             5255 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		      struct mlx5_core_dev *mdev,
mdev             5260 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	priv->mdev        = mdev;
mdev             5282 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	netdev->rx_cpu_rmap =  mlx5_eq_table_get_rmap(mdev);
mdev             5293 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
mdev             5305 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
mdev             5309 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = profile->init(mdev, netdev, profile, ppriv);
mdev             5311 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
mdev             5333 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	max_nch = mlx5e_get_max_num_channels(priv->mdev);
mdev             5335 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
mdev             5390 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
mdev             5399 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_create_mdev_resources(mdev);
mdev             5405 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_destroy_mdev_resources(mdev);
mdev             5412 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
mdev             5418 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev)
mdev             5426 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_destroy_mdev_resources(mdev);
mdev             5429 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void *mlx5e_add(struct mlx5_core_dev *mdev)
mdev             5436 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_check_required_hca_cap(mdev);
mdev             5441 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_ESWITCH_MANAGER(mdev) &&
mdev             5442 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	    mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
mdev             5443 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_rep_register_vport_reps(mdev);
mdev             5444 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		return mdev;
mdev             5448 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	nch = mlx5e_get_max_num_channels(mdev);
mdev             5449 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
mdev             5451 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
mdev             5457 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_attach(mdev, priv);
mdev             5459 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
mdev             5465 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
mdev             5475 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_detach(mdev, priv);
mdev             5481 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
mdev             5486 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
mdev             5487 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_rep_unregister_vport_reps(mdev);
mdev             5496 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_detach(mdev, vpriv);
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev               80 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		 fw_rev_maj(mdev), fw_rev_min(mdev),
mdev               81 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		 fw_rev_sub(mdev), mdev->board_id);
mdev               90 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
mdev              137 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev              240 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev              403 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
mdev              467 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev              496 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev              519 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
mdev              527 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_fc_queue_stats_work(priv->mdev,
mdev              603 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev              843 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
mdev              971 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5_fc_update_sampling_interval(priv->mdev,
mdev             1166 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	if (!mlx5_modify_vport_admin_state(priv->mdev,
mdev             1185 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_modify_vport_admin_state(priv->mdev,
mdev             1271 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	if (!MLX5_ESWITCH_MANAGER(priv->mdev))
mdev             1411 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1414 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
mdev             1429 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5e_build_rq_params(mdev, params);
mdev             1432 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mdev             1438 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
mdev             1449 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1452 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		SET_NETDEV_DEV(netdev, mdev->device);
mdev             1455 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5_query_mac_address(mdev, netdev->dev_addr);
mdev             1458 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		if (MLX5_CAP_GEN(mdev, qos))
mdev             1488 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
mdev             1496 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
mdev             1520 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
mdev             1539 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             1558 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1565 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
mdev             1631 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
mdev             1646 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
mdev             1653 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 			mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
mdev             1727 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1732 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
mdev             1739 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_lag_add(mdev, netdev);
mdev             1741 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_notifier_register(mdev, &priv->events_nb);
mdev             1750 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1756 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_notifier_unregister(mdev, &priv->events_nb);
mdev             1758 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	mlx5_lag_remove(mdev);
mdev             1948 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_core_dev *dev = priv->mdev;
mdev             1958 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 		mlx5e_destroy_mdev_resources(priv->mdev);
mdev             1978 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
mdev             1980 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
mdev             1985 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
mdev             1987 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
mdev              186 drivers/net/ethernet/mellanox/mlx5/core/en_rep.h void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
mdev              187 drivers/net/ethernet/mellanox/mlx5/core/en_rep.h void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
mdev              211 drivers/net/ethernet/mellanox/mlx5/core/en_rep.h 	return (MLX5_ESWITCH_MANAGER(priv->mdev) && priv->ppriv);
mdev               66 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	struct mlx5_core_health *health = &priv->mdev->priv.health;
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	port_state = mlx5_query_vport_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0);
mdev               89 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	return mlx5e_port_linkspeed(priv->mdev, &speed);
mdev              227 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 	err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
mdev              232 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 		err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
mdev              254 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 		mlx5_nic_vport_update_local_lb(priv->mdev, false);
mdev              263 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c 		mlx5_nic_vport_update_local_lb(priv->mdev, false);
mdev              365 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	    !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
mdev              370 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	    !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
mdev              396 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
mdev              397 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 		NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
mdev              405 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
mdev              409 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
mdev              420 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
mdev              424 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
mdev              435 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              437 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
mdev              444 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
mdev              524 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              529 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
mdev              584 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
mdev              585 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
mdev              590 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              595 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
mdev              601 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
mdev              644 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              652 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
mdev              705 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              710 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
mdev              716 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
mdev              742 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              748 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
mdev              751 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
mdev              760 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              765 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
mdev              772 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
mdev              782 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              789 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
mdev              797 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
mdev              809 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              817 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
mdev              819 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
mdev              824 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
mdev              838 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
mdev              849 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
mdev              861 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
mdev              872 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              877 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
mdev              883 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
mdev              914 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
mdev              917 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
mdev              920 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
mdev              931 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
mdev              936 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
mdev              941 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
mdev              953 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
mdev              959 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
mdev              965 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
mdev              976 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              981 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
mdev              986 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
mdev             1013 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1015 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
mdev             1024 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1027 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
mdev             1046 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1049 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
mdev             1069 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1075 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
mdev             1083 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
mdev             1089 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1091 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
mdev             1100 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1106 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
mdev             1114 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
mdev             1194 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
mdev             1195 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
mdev             1199 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1204 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
mdev             1207 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
mdev             1214 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1219 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
mdev             1222 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
mdev             1323 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev             1329 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
mdev             1337 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 		mlx5_core_access_reg(mdev, in, sz, out, sz,
mdev             1380 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
mdev              302 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev              337 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
mdev              398 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
mdev              451 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	return priv->mdev;
mdev              511 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              527 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
mdev              648 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	func_mdev = priv->mdev;
mdev              788 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
mdev              832 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
mdev              834 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
mdev              837 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
mdev              839 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				       MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
mdev              843 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
mdev              905 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_core_dev *dev = priv->mdev;
mdev             1003 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5_fc_destroy(priv->mdev, counter);
mdev             1124 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	esw = flow->priv->mdev->priv.eswitch;
mdev             1139 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	esw = flow->priv->mdev->priv.eswitch;
mdev             1153 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             1198 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		attr->dests[out_index].mdev = out_priv->mdev;
mdev             1257 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             1276 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5_geneve_tlv_option_del(priv->mdev->geneve);
mdev             1298 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             1305 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
mdev             1310 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
mdev             1347 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
mdev             1363 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             1382 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
mdev             1395 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
mdev             1508 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		esw = priv->mdev->priv.eswitch;
mdev             1561 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
mdev             1571 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             1585 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             1606 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
mdev             1628 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_core_dev *dev = flow->priv->mdev;
mdev             1753 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			(priv->mdev,
mdev             2083 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
mdev             2160 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_core_dev *dev = priv->mdev;
mdev             2469 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
mdev             2473 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
mdev             2475 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
mdev             2489 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
mdev             2522 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
mdev             2735 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	fmdev = priv->mdev;
mdev             2736 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	pmdev = peer_priv->mdev;
mdev             2833 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			if (MLX5_CAP_FLOWTABLE(priv->mdev,
mdev             2948 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
mdev             2965 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             2996 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             3114 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
mdev             3131 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
mdev             3137 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 			if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
mdev             3216 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             3289 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             3328 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 				attr->dests[attr->out_count].mdev = out_priv->mdev;
mdev             3456 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
mdev             3493 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             3570 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             3583 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		esw_attr->counter_dev = priv->mdev;
mdev             3643 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
mdev             3644 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
mdev             3664 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		in_mdev = peer_priv->mdev;
mdev             3666 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		in_mdev = priv->mdev;
mdev             3697 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_core_dev *in_mdev = priv->mdev;
mdev             3780 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mdev             3883 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
mdev             3962 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	esw = priv->mdev->priv.eswitch;
mdev             4053 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
mdev             4124 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 		mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
mdev              416 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
mdev              390 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 		struct mlx5_core_dev *mdev;
mdev              187 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 					MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
mdev              274 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 			MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
mdev               57 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	dma_device = &conn->fdev->mdev->pdev->dev;
mdev               89 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	dma_device = &conn->fdev->mdev->pdev->dev;
mdev              223 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
mdev              244 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
mdev              431 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_core_dev *mdev = fdev->mdev;
mdev              445 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	wqp.buf_numa_node = mdev->priv.numa_node;
mdev              446 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	wqp.db_numa_node  = mdev->priv.numa_node;
mdev              448 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
mdev              466 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
mdev              483 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out));
mdev              516 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
mdev              523 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_core_dev *mdev = fdev->mdev;
mdev              526 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	wqp.buf_numa_node = mdev->priv.numa_node;
mdev              527 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	wqp.db_numa_node  = mdev->priv.numa_node;
mdev              529 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
mdev              537 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_core_dev *mdev = fdev->mdev;
mdev              597 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
mdev              603 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
mdev              661 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
mdev              671 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_core_dev *mdev = conn->fdev->mdev;
mdev              675 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
mdev              682 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_core_dev *mdev = fdev->mdev;
mdev              703 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
mdev              718 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_core_dev *mdev = fdev->mdev;
mdev              731 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
mdev              740 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		 MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
mdev              748 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
mdev              763 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_core_dev *mdev = fdev->mdev;
mdev              785 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
mdev              803 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
mdev              843 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
mdev              874 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_query_mac_address(fdev->mdev, remote_mac);
mdev              887 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
mdev              894 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
mdev              938 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
mdev              957 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
mdev              963 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
mdev              966 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
mdev              976 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_core_dev *mdev = fdev->mdev;
mdev              983 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
mdev              984 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
mdev              991 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
mdev              993 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
mdev             1001 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_nic_vport_enable_roce(fdev->mdev);
mdev             1007 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
mdev             1016 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
mdev             1023 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
mdev             1034 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
mdev             1036 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
mdev             1038 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_nic_vport_disable_roce(fdev->mdev);
mdev             1045 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
mdev             1046 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
mdev             1047 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
mdev             1048 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	mlx5_nic_vport_disable_roce(fdev->mdev);
mdev              104 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	err = mlx5_fpga_query(fdev->mdev, &query);
mdev              129 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	struct mlx5_core_dev *mdev = fdev->mdev;
mdev              131 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON);
mdev              136 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_RESET_SANDBOX);
mdev              141 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_OFF);
mdev              165 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
mdev              167 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              180 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	err = mlx5_fpga_caps(fdev->mdev);
mdev              184 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device);
mdev              189 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 		       MLX5_CAP_FPGA(fdev->mdev, image_version),
mdev              190 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 		       MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id),
mdev              191 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 		       MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id),
mdev              192 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 		       MLX5_CAP_FPGA(fdev->mdev, sandbox_product_version));
mdev              194 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
mdev              201 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	err = mlx5_core_reserve_gids(mdev, max_num_qps);
mdev              207 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_err_nb);
mdev              208 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_qp_err_nb);
mdev              226 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
mdev              227 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
mdev              228 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_core_unreserve_gids(mdev, max_num_qps);
mdev              236 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c int mlx5_fpga_init(struct mlx5_core_dev *mdev)
mdev              240 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	if (!MLX5_CAP_GEN(mdev, fpga)) {
mdev              241 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 		mlx5_core_dbg(mdev, "FPGA capability not present\n");
mdev              245 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_core_dbg(mdev, "Initializing FPGA\n");
mdev              251 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	fdev->mdev = mdev;
mdev              252 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mdev->fpga = fdev;
mdev              257 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
mdev              259 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              276 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 		err = mlx5_fpga_ctrl_op(mdev, MLX5_FPGA_CTRL_OPERATION_SANDBOX_BYPASS_ON);
mdev              283 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
mdev              284 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
mdev              286 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
mdev              287 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_core_unreserve_gids(mdev, max_num_qps);
mdev              290 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev)
mdev              292 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              294 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mlx5_fpga_device_stop(mdev);
mdev              296 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 	mdev->fpga = NULL;
mdev              352 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c 		mlx5_trigger_health_work(fdev->mdev);
mdev               46 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h 	struct mlx5_core_dev *mdev;
mdev               66 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h 	mlx5_core_dbg((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
mdev               70 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h 	mlx5_core_err((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
mdev               74 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h 	mlx5_core_warn((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h 	mlx5_core_err_rl((__adev)->mdev, "FPGA: %s:%d: " \
mdev               82 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h 	mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
mdev               85 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h 	mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
mdev               87 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h int mlx5_fpga_init(struct mlx5_core_dev *mdev);
mdev               88 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
mdev               89 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
mdev               90 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev);
mdev               94 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h static inline int mlx5_fpga_init(struct mlx5_core_dev *mdev)
mdev               99 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h static inline void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev)
mdev              103 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h static inline int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
mdev              108 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
mdev              124 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
mdev              126 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
mdev              129 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
mdev              133 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
mdev              216 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
mdev              220 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              290 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_core_dev *dev = fdev->mdev;
mdev              324 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
mdev              326 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              329 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (mlx5_fpga_is_ipsec_device(mdev)) {
mdev              359 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
mdev              361 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              370 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
mdev              373 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              388 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	count = mlx5_fpga_ipsec_counters_count(mdev);
mdev              418 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
mdev              426 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
mdev              446 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
mdev              448 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
mdev              454 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	return mlx5_fpga_ipsec_set_caps(mdev, flags);
mdev              458 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
mdev              522 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
mdev              529 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
mdev              665 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
mdev              675 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              685 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	sa_ctx->dev = mdev;
mdev              688 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
mdev              755 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
mdev              767 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	    !mlx5_is_fpga_egress_ipsec_rule(mdev,
mdev              774 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	else if (!mlx5_is_fpga_ipsec_rule(mdev,
mdev              785 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
mdev              817 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
mdev              966 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
mdev              985 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (!MLX5_CAP_FLOWTABLE(mdev,
mdev             1247 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
mdev             1250 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev             1254 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (!mlx5_fpga_is_ipsec_device(mdev))
mdev             1295 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
mdev             1327 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
mdev             1329 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev             1331 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (!mlx5_fpga_is_ipsec_device(mdev))
mdev             1386 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
mdev             1390 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
mdev             1395 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
mdev             1400 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
mdev             1406 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
mdev             1411 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
mdev             1417 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
mdev             1422 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	    (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
mdev             1424 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
mdev             1432 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
mdev             1439 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
mdev             1443 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mdev             1444 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
mdev             1471 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_core_dev *mdev = xfrm->mdev;
mdev             1472 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev             1482 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
mdev             1483 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
mdev             1488 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 		mlx5_core_warn(mdev, "Modify esp is not supported\n");
mdev             1507 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c 	mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
mdev               40 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
mdev               41 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
mdev               42 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
mdev               45 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
mdev               52 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
mdev               53 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
mdev               57 drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
mdev               72 drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c 	if (!fdev->mdev)
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c 		err = mlx5_fpga_access_reg(fdev->mdev, actual_size,
mdev              104 drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c 	if (!fdev->mdev)
mdev              110 drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c 		err = mlx5_fpga_access_reg(fdev->mdev, actual_size,
mdev              168 drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c 	return mlx5_fpga_sbu_caps(fdev->mdev, buf, size);
mdev              197 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
mdev              213 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
mdev              232 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
mdev              239 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
mdev              261 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
mdev              265 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
mdev              268 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	struct mlx5_fpga_tls *tls = mdev->fpga->tls;
mdev              281 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 		mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
mdev              287 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
mdev              337 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 		mlx5_fpga_tls_del_flow(fdev->mdev,
mdev              347 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev,
mdev              363 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
mdev              387 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev)
mdev              389 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
mdev              392 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
mdev              396 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
mdev              400 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0)
mdev              446 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev)
mdev              448 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              454 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	if (!mlx5_fpga_is_tls_device(mdev) || !fdev)
mdev              498 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev)
mdev              500 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	struct mlx5_fpga_device *fdev = mdev->fpga;
mdev              551 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c static int _mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
mdev              555 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	u32 caps = mlx5_fpga_tls_device_caps(mdev);
mdev              576 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx);
mdev              584 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
mdev              589 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	struct mlx5_fpga_tls *tls = mdev->fpga->tls;
mdev              606 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c 	ret = _mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid,
mdev               54 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
mdev               59 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev);
mdev               63 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev);
mdev               64 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev);
mdev               66 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev)
mdev               68 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h 	return mdev->fpga->tls->caps;
mdev               71 drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
mdev              310 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h #define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) (		\
mdev              311 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h 	(type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) :		\
mdev              312 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h 	(type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) :		\
mdev              313 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h 	(type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) :		\
mdev              314 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h 	(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) :		\
mdev              315 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h 	(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) :		\
mdev              316 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h 	(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) :		\
mdev              317 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h 	(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) :		\
mdev              105 drivers/net/ethernet/mellanox/mlx5/core/fw.c int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
mdev              115 drivers/net/ethernet/mellanox/mlx5/core/fw.c 	err = mlx5_cmd_query_adapter(mdev, out, outlen);
mdev              181 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              185 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c 	ret = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_IB, 1);
mdev               56 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
mdev               61 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5e_set_rq_type(mdev, params);
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5e_init_rq_type_params(mdev, params);
mdev               75 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c int mlx5i_init(struct mlx5_core_dev *mdev,
mdev               83 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
mdev               90 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
mdev               92 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5i_build_nic_params(mdev, &priv->channels.params);
mdev              161 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              177 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
mdev              179 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
mdev              184 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
mdev              186 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
mdev              190 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
mdev              192 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
mdev              200 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
mdev              208 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              212 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
mdev              215 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
mdev              220 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
mdev              243 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	ret = mlx5_core_create_qp(mdev, qp, in, inlen);
mdev              245 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_err(mdev, "Failed creating IPoIB QP err : %d\n", ret);
mdev              254 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
mdev              256 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5_core_destroy_qp(mdev, qp);
mdev              259 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn)
mdev              268 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	return mlx5e_create_tis(mdev, in, tisn);
mdev              276 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
mdev              278 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err);
mdev              282 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]);
mdev              284 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
mdev              291 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
mdev              299 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
mdev              300 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
mdev              308 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
mdev              363 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              370 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
mdev              518 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5_core_dev *mdev = epriv->mdev;
mdev              527 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
mdev              531 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mdev              533 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
mdev              548 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mdev              561 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5_core_dev *mdev = epriv->mdev;
mdev              574 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mdev              589 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5_core_dev *mdev  = epriv->mdev;
mdev              593 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
mdev              594 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	err = mlx5_core_attach_mcg(mdev, gid, ipriv->qp.qpn);
mdev              596 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n",
mdev              600 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n",
mdev              612 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5_core_dev *mdev  = epriv->mdev;
mdev              616 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qp.qpn, gid->raw);
mdev              618 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	err = mlx5_core_detach_mcg(mdev, gid, ipriv->qp.qpn);
mdev              620 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n",
mdev              644 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
mdev              646 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
mdev              649 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
mdev              650 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n");
mdev              668 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		mlx5e_destroy_mdev_resources(priv->mdev);
mdev              672 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
mdev              674 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	return mdev->mlx5e_res.pdn != 0;
mdev              677 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
mdev              679 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	if (mlx5_is_sub_interface(mdev))
mdev              687 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
mdev              688 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	const struct mlx5e_profile *prof = mlx5_get_profile(mdev);
mdev              697 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	ipriv->sub_interface = mlx5_is_sub_interface(mdev);
mdev              701 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 			mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
mdev              706 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		err = mlx5e_create_mdev_resources(mdev);
mdev              711 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	prof->init(mdev, netdev, prof, ipriv);
mdev              735 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	mlx5e_destroy_mdev_resources(mdev);
mdev              741 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
mdev              748 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	rc = mlx5i_check_required_hca_cap(mdev);
mdev              752 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 	nch = mlx5e_get_max_num_channels(mdev);
mdev              759 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		.param = mdev,
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn);
mdev               65 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
mdev               66 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
mdev               89 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h int mlx5i_init(struct mlx5_core_dev *mdev,
mdev              114 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 		mlx5_core_warn(epriv->mdev, "QPN to netdev delete from HT failed\n");
mdev              169 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 		mlx5_core_warn(priv->mdev, "failed to get parent device\n");
mdev              194 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	struct mlx5_core_dev *mdev = epriv->mdev;
mdev              203 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 		mlx5_core_warn(mdev, "prepare child underlay qp state failed, %d\n", err);
mdev              207 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mdev              209 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 		mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err);
mdev              213 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]);
mdev              215 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 		mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
mdev              221 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 		mlx5_core_warn(mdev, "opening child channels failed, %d\n", err);
mdev              231 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
mdev              233 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mdev              246 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	struct mlx5_core_dev *mdev = priv->mdev;
mdev              256 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
mdev              260 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	mlx5e_destroy_tis(mdev, priv->tisn[0][0]);
mdev              278 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c static int mlx5i_pkey_init(struct mlx5_core_dev *mdev,
mdev              286 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	err = mlx5i_init(mdev, netdev, profile, ppriv);
mdev              313 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
mdev              315 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 		mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err);
mdev              326 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
mdev              733 drivers/net/ethernet/mellanox/mlx5/core/lag.c 	struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
mdev              749 drivers/net/ethernet/mellanox/mlx5/core/lag.c 		mdev[0] = ldev->pf[0].dev;
mdev              750 drivers/net/ethernet/mellanox/mlx5/core/lag.c 		mdev[1] = ldev->pf[1].dev;
mdev              753 drivers/net/ethernet/mellanox/mlx5/core/lag.c 		mdev[0] = dev;
mdev              757 drivers/net/ethernet/mellanox/mlx5/core/lag.c 		ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
mdev               72 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
mdev               75 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
mdev               80 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
mdev               81 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_clock *clock = &mdev->clock;
mdev              106 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
mdev              125 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mlx5_set_mtpps(mdev, in, sizeof(in));
mdev              138 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	mlx5_update_clock_info_page(clock->mdev);
mdev              153 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	mlx5_update_clock_info_page(clock->mdev);
mdev              164 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
mdev              170 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	cycles = mlx5_read_internal_timer(mdev, sts);
mdev              187 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	mlx5_update_clock_info_page(clock->mdev);
mdev              215 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	mlx5_update_clock_info_page(clock->mdev);
mdev              227 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_core_dev *mdev =
mdev              236 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (!MLX5_PPS_CAP(mdev))
mdev              275 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
mdev              279 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	return mlx5_set_mtppse(mdev, pin, 0,
mdev              289 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_core_dev *mdev =
mdev              303 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (!MLX5_PPS_CAP(mdev))
mdev              331 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		cycles_now = mlx5_read_internal_timer(mdev, NULL);
mdev              355 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
mdev              359 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	return mlx5_set_mtppse(mdev, pin, 0,
mdev              440 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
mdev              442 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_clock *clock = &mdev->clock;
mdev              445 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	mlx5_query_mtpps(mdev, out, sizeof(out));
mdev              468 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_core_dev *mdev = clock->mdev;
mdev              494 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		cycles_now = mlx5_read_internal_timer(mdev, NULL);
mdev              508 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
mdev              515 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c void mlx5_init_clock(struct mlx5_core_dev *mdev)
mdev              517 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_clock *clock = &mdev->clock;
mdev              523 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
mdev              525 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
mdev              535 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	clock->mdev = mdev;
mdev              555 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	mdev->clock_info =
mdev              557 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (mdev->clock_info) {
mdev              558 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mdev->clock_info->nsec = clock->tc.nsec;
mdev              559 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mdev->clock_info->cycles = clock->tc.cycle_last;
mdev              560 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mdev->clock_info->mask = clock->cycles.mask;
mdev              561 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mdev->clock_info->mult = clock->nominal_c_mult;
mdev              562 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mdev->clock_info->shift = clock->cycles.shift;
mdev              563 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mdev->clock_info->frac = clock->tc.frac;
mdev              564 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mdev->clock_info->overflow_period = clock->overflow_period;
mdev              572 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
mdev              578 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (MLX5_PPS_CAP(mdev))
mdev              579 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mlx5_get_pps_caps(mdev);
mdev              584 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 					&mdev->pdev->dev);
mdev              586 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
mdev              592 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
mdev              595 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
mdev              597 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	struct mlx5_clock *clock = &mdev->clock;
mdev              599 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
mdev              602 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
mdev              611 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (mdev->clock_info) {
mdev              612 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		free_page((unsigned long)mdev->clock_info);
mdev              613 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		mdev->clock_info = NULL;
mdev               37 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h void mlx5_init_clock(struct mlx5_core_dev *mdev);
mdev               38 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
mdev               40 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
mdev               42 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h 	return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
mdev               60 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
mdev               61 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
mdev                7 drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
mdev               22 drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
mdev               50 drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c 	MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.pdn);
mdev               52 drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev                9 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 	struct mlx5_core_dev *mdev;
mdev               17 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c static int mlx5_geneve_tlv_option_create(struct mlx5_core_dev *mdev,
mdev               29 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
mdev               43 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev               51 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c static void mlx5_geneve_tlv_option_destroy(struct mlx5_core_dev *mdev, u16 obj_id)
mdev               60 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev               82 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 			mlx5_core_warn(geneve->mdev,
mdev               93 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 		res = mlx5_geneve_tlv_option_create(geneve->mdev,
mdev               98 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 			mlx5_core_warn(geneve->mdev,
mdev              125 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 		mlx5_geneve_tlv_option_destroy(geneve->mdev, geneve->obj_id);
mdev              134 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c struct mlx5_geneve *mlx5_geneve_create(struct mlx5_core_dev *mdev)
mdev              141 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 	geneve->mdev = mdev;
mdev              154 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c 		mlx5_geneve_tlv_option_destroy(geneve->mdev, geneve->obj_id);
mdev               14 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h struct mlx5_geneve *mlx5_geneve_create(struct mlx5_core_dev *mdev);
mdev               23 drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.h *mlx5_geneve_create(struct mlx5_core_dev *mdev) { return NULL; }
mdev               83 drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
mdev               85 drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
mdev               17 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c static void mlx5_query_port_tun_entropy(struct mlx5_core_dev *mdev,
mdev               29 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	if (!MLX5_CAP_GEN(mdev, ports_check))
mdev               32 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	if (mlx5_query_ports_check(mdev, out, sizeof(out)))
mdev               43 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c static int mlx5_set_port_tun_entropy_calc(struct mlx5_core_dev *mdev, u8 enable,
mdev               49 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	err = mlx5_query_ports_check(mdev, in, sizeof(in));
mdev               55 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	return mlx5_set_ports_check(mdev, in, sizeof(in));
mdev               58 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c static int mlx5_set_port_gre_tun_entropy_calc(struct mlx5_core_dev *mdev,
mdev               64 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	err = mlx5_query_ports_check(mdev, in, sizeof(in));
mdev               70 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	return mlx5_set_ports_check(mdev, in, sizeof(in));
mdev               74 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 				struct mlx5_core_dev *mdev)
mdev               78 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	tun_entropy->mdev = mdev;
mdev               80 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	mlx5_query_port_tun_entropy(mdev, &entropy_flags);
mdev               93 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 	mlx5_query_port_tun_entropy(tun_entropy->mdev, &entropy_flags);
mdev              103 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 		err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev,
mdev              114 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 			mlx5_core_warn(tun_entropy->mdev,
mdev              125 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 		err = mlx5_set_port_tun_entropy_calc(tun_entropy->mdev, enable,
mdev              132 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c 			err = mlx5_set_port_tun_entropy_calc(tun_entropy->mdev, 1, 0);
mdev               10 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h 	struct mlx5_core_dev *mdev;
mdev               18 drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h 				struct mlx5_core_dev *mdev);
mdev               42 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	struct mlx5_core_dev		*mdev;
mdev               56 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
mdev               58 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
mdev               61 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
mdev               69 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev               72 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
mdev               80 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              122 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
mdev              123 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 		mlx5_core_info(vxlan->mdev,
mdev              125 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 			       port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
mdev              130 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
mdev              152 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
mdev              183 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 		mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
mdev              193 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
mdev              197 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
mdev              204 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 	vxlan->mdev = mdev;
mdev              227 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c 		mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);
mdev               49 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
mdev               56 drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
mdev              156 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
mdev              197 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
mdev              198 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
mdev              199 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
mdev              204 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) &&		\
mdev              205 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h 			    MLX5_CAP_GEN((mdev), pps_modify) &&		\
mdev              206 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h 			    MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) &&	\
mdev              207 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h 			    MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
mdev              234 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
mdev              136 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c static int irq_set_rmap(struct mlx5_core_dev *mdev)
mdev              140 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
mdev              148 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 		mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
mdev              155 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 				       pci_irq_vector(mdev->pdev, vecidx));
mdev              157 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 			mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
mdev              165 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	irq_clear_rmap(mdev);
mdev              173 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
mdev              179 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	irq = mlx5_irq_get(mdev, vecidx);
mdev              180 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	irqn = pci_irq_vector(mdev->pdev, vecidx);
mdev              182 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 		mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
mdev              186 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	cpumask_set_cpu(cpumask_local_spread(i, mdev->priv.numa_node),
mdev              190 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 		mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x",
mdev              196 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
mdev              202 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	irq = mlx5_irq_get(mdev, vecidx);
mdev              203 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	irqn = pci_irq_vector(mdev->pdev, vecidx);
mdev              208 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
mdev              210 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
mdev              215 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 		err = set_comp_irq_affinity_hint(mdev, i);
mdev              224 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 		clear_comp_irq_affinity_hint(mdev, i);
mdev              229 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
mdev              231 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 	int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
mdev              235 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c 		clear_comp_irq_affinity_hint(mdev, i);
mdev               98 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
mdev              107 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, in, sz, qcam, sz, MLX5_REG_QCAM, 0, 0);
mdev              546 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_max_tc(struct mlx5_core_dev *mdev)
mdev              548 drivers/net/ethernet/mellanox/mlx5/core/port.c 	u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
mdev              553 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out)
mdev              559 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return  mlx5_core_access_reg(mdev, in, sizeof(in), out,
mdev              563 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in)
mdev              569 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, in, sizeof(out), out,
mdev              573 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
mdev              581 drivers/net/ethernet/mellanox/mlx5/core/port.c 		if (prio_tc[i] > mlx5_max_tc(mdev))
mdev              587 drivers/net/ethernet/mellanox/mlx5/core/port.c 		err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
mdev              597 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
mdev              610 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
mdev              619 drivers/net/ethernet/mellanox/mlx5/core/port.c static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
mdev              624 drivers/net/ethernet/mellanox/mlx5/core/port.c 	if (!MLX5_CAP_GEN(mdev, ets))
mdev              627 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
mdev              631 drivers/net/ethernet/mellanox/mlx5/core/port.c static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
mdev              636 drivers/net/ethernet/mellanox/mlx5/core/port.c 	if (!MLX5_CAP_GEN(mdev, ets))
mdev              640 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
mdev              644 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
mdev              649 drivers/net/ethernet/mellanox/mlx5/core/port.c 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
mdev              654 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
mdev              658 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
mdev              665 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
mdev              679 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
mdev              684 drivers/net/ethernet/mellanox/mlx5/core/port.c 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
mdev              689 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
mdev              693 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
mdev              700 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
mdev              714 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
mdev              724 drivers/net/ethernet/mellanox/mlx5/core/port.c 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
mdev              734 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
mdev              738 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
mdev              747 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
mdev              751 drivers/net/ethernet/mellanox/mlx5/core/port.c 	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
mdev              764 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
mdev              772 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              776 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
mdev              783 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              791 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
mdev              796 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, in, sizeof(in), out,
mdev              800 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
mdev              804 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, in, inlen, out,
mdev              808 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
mdev              813 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_query_ports_check(mdev, in, sizeof(in));
mdev              818 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_set_ports_check(mdev, in, sizeof(in));
mdev              821 drivers/net/ethernet/mellanox/mlx5/core/port.c void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
mdev              829 drivers/net/ethernet/mellanox/mlx5/core/port.c 	if (!MLX5_CAP_GEN(mdev, ports_check))
mdev              832 drivers/net/ethernet/mellanox/mlx5/core/port.c 	if (mlx5_query_ports_check(mdev, out, sizeof(out)))
mdev              839 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
mdev              843 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
mdev              847 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
mdev              851 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out,
mdev              855 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode)
mdev              863 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
mdev              874 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
mdev              883 drivers/net/ethernet/mellanox/mlx5/core/port.c 	return mlx5_core_access_reg(mdev, in, sizeof(in), out,
mdev              887 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state)
mdev              896 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
mdev              901 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
mdev              909 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
mdev              917 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
mdev              933 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
mdev              944 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 1);
mdev              954 drivers/net/ethernet/mellanox/mlx5/core/port.c int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio)
mdev              971 drivers/net/ethernet/mellanox/mlx5/core/port.c 	err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
mdev              464 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 		if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
mdev              693 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 					ret = mlx5dr_cmd_query_flow_table(action->dest_tbl.fw_tbl.mdev,
mdev              985 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 					struct mlx5_core_dev *mdev)
mdev              995 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 	action->dest_tbl.fw_tbl.mdev = mdev;
mdev             1083 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 		ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, data_sz, data,
mdev             1573 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c 		mlx5dr_cmd_destroy_reformat_ctx((action->reformat.dmn)->mdev,
mdev                6 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
mdev               21 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev               34 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
mdev               54 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
mdev               66 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
mdev               70 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
mdev               76 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
mdev               79 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP64_ESW_FLOWTABLE(mdev,
mdev               82 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
mdev               88 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
mdev               91 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->prio_tag_required	= MLX5_CAP_GEN(mdev, prio_tag_required);
mdev               92 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->eswitch_manager	= MLX5_CAP_GEN(mdev, eswitch_manager);
mdev               93 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->gvmi		= MLX5_CAP_GEN(mdev, vhca_id);
mdev               94 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->flex_protocols	= MLX5_CAP_GEN(mdev, flex_parser_protocols);
mdev               97 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
mdev               98 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
mdev              103 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
mdev              105 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 			MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
mdev              109 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
mdev              111 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
mdev              113 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
mdev              115 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
mdev              116 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
mdev              118 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
mdev              120 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
mdev              122 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
mdev              124 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
mdev              159 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
mdev              166 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              169 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
mdev              207 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
mdev              213 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
mdev              224 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              227 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
mdev              252 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
mdev              263 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
mdev              274 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              277 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
mdev              295 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
mdev              306 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
mdev              319 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              322 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
mdev              363 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              377 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
mdev              389 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              392 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
mdev              423 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
mdev              433 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
mdev              444 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev              447 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
mdev              460 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev               60 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
mdev               66 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	dmn->uar = mlx5_get_uars_page(dmn->mdev);
mdev              100 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
mdev              102 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
mdev              112 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
mdev              113 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
mdev              125 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
mdev              133 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
mdev              171 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
mdev              179 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
mdev              209 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
mdev              215 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
mdev              220 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
mdev              222 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
mdev              226 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	ret = dr_domain_query_fdb_caps(mdev, dmn);
mdev              285 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
mdev              297 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	dmn->mdev = mdev;
mdev              302 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	if (dr_domain_caps_init(mdev, dmn)) {
mdev              359 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 		ret = mlx5dr_cmd_sync_steering(dmn->mdev);
mdev              370 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c 	mlx5dr_cmd_sync_steering(dmn->mdev);
mdev               19 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	ret = mlx5dr_cmd_create_flow_table(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
mdev               27 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
mdev               40 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	ret = mlx5dr_cmd_alloc_modify_header(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, 1,
mdev               48 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	ret = mlx5dr_cmd_set_fte_modify_and_vport(dmn->mdev,
mdev               65 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	mlx5dr_cmd_dealloc_modify_header(dmn->mdev, modify_hdr_id);
mdev               67 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	mlx5dr_cmd_destroy_flow_group(dmn->mdev,
mdev               71 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	mlx5dr_cmd_destroy_flow_table(dmn->mdev, table_id, MLX5_FLOW_TABLE_TYPE_FDB);
mdev               80 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	mlx5dr_cmd_del_flow_table_entry(dmn->mdev,
mdev               83 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	mlx5dr_cmd_dealloc_modify_header(dmn->mdev, recalc_cs_ft->modify_hdr_id);
mdev               84 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	mlx5dr_cmd_destroy_flow_group(dmn->mdev,
mdev               88 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c 	mlx5dr_cmd_destroy_flow_table(dmn->mdev,
mdev               70 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
mdev               94 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	return mlx5_core_create_mkey(mdev, mkey, in, inlen);
mdev              102 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5_core_dev *mdev = pool->dmn->mdev;
mdev              120 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
mdev              128 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
mdev              150 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
mdev              159 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev;
mdev              163 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
mdev              164 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
mdev              468 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
mdev              108 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
mdev              123 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	wqp.buf_numa_node = mdev->priv.numa_node;
mdev              124 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	wqp.db_numa_node = mdev->priv.numa_node;
mdev              136 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
mdev              139 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		mlx5_core_info(mdev, "Can't create QP WQ\n");
mdev              148 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		mlx5_core_warn(mdev, "Can't allocate wqe head\n");
mdev              177 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
mdev              183 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen);
mdev              187 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		mlx5_core_warn(mdev, " Can't create QP\n");
mdev              204 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static void dr_destroy_qp(struct mlx5_core_dev *mdev,
mdev              207 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	mlx5_core_destroy_qp(mdev, &dr_qp->mqp);
mdev              574 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
mdev              588 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
mdev              592 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
mdev              607 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, qpc,
mdev              611 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
mdev              639 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
mdev              654 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
mdev              659 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
mdev              670 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
mdev              679 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
mdev              698 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
mdev              721 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	wqp.buf_numa_node = mdev->priv.numa_node;
mdev              722 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	wqp.db_numa_node = mdev->priv.numa_node;
mdev              724 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq,
mdev              740 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
mdev              741 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
mdev              761 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
mdev              790 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
mdev              792 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	mlx5_core_destroy_cq(mdev, &cq->mcq);
mdev              798 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey)
mdev              815 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	return mlx5_core_create_mkey(mdev, mkey, in, sizeof(in));
mdev              818 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
mdev              829 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dma_device = &mdev->pdev->dev;
mdev              834 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		mlx5_core_warn(mdev, "Can't dma buf\n");
mdev              839 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	err = dr_create_mkey(mdev, pdn, &mr->mkey);
mdev              841 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		mlx5_core_warn(mdev, "Can't create mkey\n");
mdev              855 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
mdev              857 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	mlx5_core_destroy_mkey(mdev, &mr->mkey);
mdev              858 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size,
mdev              875 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
mdev              886 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
mdev              920 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dmn->send_ring->mr = dr_reg_mr(dmn->mdev,
mdev              927 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
mdev              938 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
mdev              942 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_destroy_qp(dmn->mdev, dmn->send_ring->qp);
mdev              944 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_destroy_cq(dmn->mdev, dmn->send_ring->cq);
mdev              954 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_destroy_qp(dmn->mdev, send_ring->qp);
mdev              955 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_destroy_cq(dmn->mdev, send_ring->cq);
mdev              956 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
mdev              957 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_dereg_mr(dmn->mdev, send_ring->mr);
mdev              207 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c 	return mlx5dr_cmd_destroy_flow_table(tbl->dmn->mdev,
mdev              224 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c 	ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev,
mdev               21 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
mdev               22 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
mdev               23 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
mdev              649 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h 	struct mlx5_core_dev *mdev;
mdev              741 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h 					struct mlx5_core_dev *mdev;
mdev              863 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
mdev              865 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
mdev              869 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
mdev              871 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
mdev              873 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
mdev              874 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
mdev              880 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
mdev              883 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
mdev              888 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
mdev              890 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
mdev              894 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
mdev              898 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
mdev              907 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
mdev              914 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
mdev              919 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
mdev              941 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
mdev              971 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h 	struct mlx5_core_dev *mdev;
mdev              993 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h 	struct mlx5_core_dev *mdev;
mdev             1001 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h 	struct mlx5_core_dev *mdev;
mdev               39 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
mdev               79 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h 					struct mlx5_core_dev *mdev);
mdev              121 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) { return NULL; }
mdev              169 drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h 					struct mlx5_core_dev *mdev) { return NULL; }
mdev              305 drivers/net/ethernet/mellanox/mlx5/core/transobj.c static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
mdev              321 drivers/net/ethernet/mellanox/mlx5/core/transobj.c 	return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
mdev              324 drivers/net/ethernet/mellanox/mlx5/core/transobj.c static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
mdev              339 drivers/net/ethernet/mellanox/mlx5/core/transobj.c 	return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
mdev               65 drivers/net/ethernet/mellanox/mlx5/core/uar.c static int uars_per_sys_page(struct mlx5_core_dev *mdev)
mdev               67 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	if (MLX5_CAP_GEN(mdev, uar_4k))
mdev               68 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/uar.c static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
mdev               77 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	if (MLX5_CAP_GEN(mdev, uar_4k))
mdev               82 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
mdev               91 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	if (mlx5_cmd_free_uar(up->mdev, up->index))
mdev               92 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
mdev               98 drivers/net/ethernet/mellanox/mlx5/core/uar.c static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
mdev              107 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
mdev              112 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	up->mdev = mdev;
mdev              131 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	err = mlx5_cmd_alloc_uar(mdev, &up->index);
mdev              133 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
mdev              137 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	pfn = uar2pfn(mdev, up->index);
mdev              152 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
mdev              157 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	if (mlx5_cmd_free_uar(mdev, up->index))
mdev              158 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
mdev              166 drivers/net/ethernet/mellanox/mlx5/core/uar.c struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
mdev              170 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
mdev              171 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
mdev              172 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
mdev              177 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	ret = alloc_uars_page(mdev, false);
mdev              180 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
mdev              182 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
mdev              188 drivers/net/ethernet/mellanox/mlx5/core/uar.c void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
mdev              190 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
mdev              192 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
mdev              196 drivers/net/ethernet/mellanox/mlx5/core/uar.c static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
mdev              203 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	       (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
mdev              206 drivers/net/ethernet/mellanox/mlx5/core/uar.c static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
mdev              217 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	bfregs = &mdev->priv.bfregs;
mdev              227 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		up = alloc_uars_page(mdev, map_wc);
mdev              250 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	bfreg->map = up->map + map_offset(mdev, dbi);
mdev              259 drivers/net/ethernet/mellanox/mlx5/core/uar.c int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
mdev              264 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
mdev              269 drivers/net/ethernet/mellanox/mlx5/core/uar.c 		return alloc_bfreg(mdev, bfreg, false, fast_path);
mdev              291 drivers/net/ethernet/mellanox/mlx5/core/uar.c void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
mdev              302 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	bfregs = &mdev->priv.bfregs;
mdev              311 drivers/net/ethernet/mellanox/mlx5/core/uar.c 	dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
mdev               43 drivers/net/ethernet/mellanox/mlx5/core/vport.c static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
mdev               55 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
mdev               58 drivers/net/ethernet/mellanox/mlx5/core/vport.c u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
mdev               67 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
mdev               80 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev               83 drivers/net/ethernet/mellanox/mlx5/core/vport.c static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
mdev               94 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
mdev               97 drivers/net/ethernet/mellanox/mlx5/core/vport.c static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
mdev              104 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
mdev              107 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
mdev              113 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
mdev              121 drivers/net/ethernet/mellanox/mlx5/core/vport.c void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
mdev              124 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
mdev              126 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
mdev              139 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
mdev              156 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	return mlx5_modify_nic_vport_context(mdev, in, inlen);
mdev              159 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
mdev              180 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
mdev              189 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
mdev              191 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
mdev              195 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
mdev              220 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
mdev              228 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
mdev              238 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
mdev              248 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
mdev              261 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
mdev              435 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
mdev              445 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
mdev              456 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
mdev              465 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
mdev              476 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
mdev              486 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
mdev              502 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
mdev              509 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
mdev              519 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
mdev              777 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
mdev              791 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
mdev              808 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
mdev              829 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
mdev              842 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
mdev              848 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
mdev              849 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
mdev              861 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
mdev              865 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
mdev              869 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
mdev              872 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		mlx5_core_dbg(mdev, "%s local_lb\n",
mdev              880 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
mdev              891 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
mdev              914 drivers/net/ethernet/mellanox/mlx5/core/vport.c static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
mdev              929 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
mdev              936 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
mdev              941 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	if (!mdev->roce.roce_en)
mdev              942 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
mdev              945 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		mdev->roce.roce_en++;
mdev              952 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
mdev              957 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	if (mdev->roce.roce_en) {
mdev              958 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		mdev->roce.roce_en--;
mdev              959 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		if (mdev->roce.roce_en == 0)
mdev              960 drivers/net/ethernet/mellanox/mlx5/core/vport.c 			err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
mdev              963 drivers/net/ethernet/mellanox/mlx5/core/vport.c 			mdev->roce.roce_en++;
mdev             1007 drivers/net/ethernet/mellanox/mlx5/core/vport.c int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
mdev             1021 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
mdev             1152 drivers/net/ethernet/mellanox/mlx5/core/vport.c u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
mdev             1154 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
mdev             1157 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	if (mdev->sys_image_guid)
mdev             1158 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		return mdev->sys_image_guid;
mdev             1161 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
mdev             1163 drivers/net/ethernet/mellanox/mlx5/core/vport.c 		mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
mdev             1165 drivers/net/ethernet/mellanox/mlx5/core/vport.c 	mdev->sys_image_guid = tmp;
mdev               62 drivers/net/ethernet/mellanox/mlx5/core/wq.c int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mdev               71 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
mdev               73 drivers/net/ethernet/mellanox/mlx5/core/wq.c 		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
mdev               79 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
mdev               82 drivers/net/ethernet/mellanox/mlx5/core/wq.c 		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
mdev               89 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	wq_ctrl->mdev = mdev;
mdev               94 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	mlx5_db_free(mdev, &wq_ctrl->db);
mdev              106 drivers/net/ethernet/mellanox/mlx5/core/wq.c int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mdev              120 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
mdev              122 drivers/net/ethernet/mellanox/mlx5/core/wq.c 		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
mdev              126 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	err = mlx5_frag_buf_alloc_node(mdev,
mdev              131 drivers/net/ethernet/mellanox/mlx5/core/wq.c 		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
mdev              156 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	wq_ctrl->mdev = mdev;
mdev              161 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	mlx5_db_free(mdev, &wq_ctrl->db);
mdev              166 drivers/net/ethernet/mellanox/mlx5/core/wq.c int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mdev              175 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
mdev              177 drivers/net/ethernet/mellanox/mlx5/core/wq.c 		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
mdev              183 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
mdev              187 drivers/net/ethernet/mellanox/mlx5/core/wq.c 		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
mdev              194 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	wq_ctrl->mdev = mdev;
mdev              199 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	mlx5_db_free(mdev, &wq_ctrl->db);
mdev              217 drivers/net/ethernet/mellanox/mlx5/core/wq.c int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mdev              226 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
mdev              228 drivers/net/ethernet/mellanox/mlx5/core/wq.c 		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
mdev              234 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
mdev              237 drivers/net/ethernet/mellanox/mlx5/core/wq.c 		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
mdev              244 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	wq_ctrl->mdev = mdev;
mdev              249 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	mlx5_db_free(mdev, &wq_ctrl->db);
mdev              265 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
mdev              266 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
mdev               46 drivers/net/ethernet/mellanox/mlx5/core/wq.h 	struct mlx5_core_dev	*mdev;
mdev               79 drivers/net/ethernet/mellanox/mlx5/core/wq.h int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mdev               85 drivers/net/ethernet/mellanox/mlx5/core/wq.h int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mdev               90 drivers/net/ethernet/mellanox/mlx5/core/wq.h int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mdev               96 drivers/net/ethernet/mellanox/mlx5/core/wq.h int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mdev               50 drivers/net/wireless/intersil/hostap/hostap_main.c 	struct net_device *dev, *mdev;
mdev               64 drivers/net/wireless/intersil/hostap/hostap_main.c 	mdev = local->dev;
mdev               65 drivers/net/wireless/intersil/hostap/hostap_main.c 	eth_hw_addr_inherit(dev, mdev);
mdev               66 drivers/net/wireless/intersil/hostap/hostap_main.c 	dev->base_addr = mdev->base_addr;
mdev               67 drivers/net/wireless/intersil/hostap/hostap_main.c 	dev->irq = mdev->irq;
mdev               68 drivers/net/wireless/intersil/hostap/hostap_main.c 	dev->mem_start = mdev->mem_start;
mdev               69 drivers/net/wireless/intersil/hostap/hostap_main.c 	dev->mem_end = mdev->mem_end;
mdev               78 drivers/net/wireless/intersil/hostap/hostap_main.c 	SET_NETDEV_DEV(dev, mdev->dev.parent);
mdev               92 drivers/net/wireless/intersil/hostap/hostap_main.c 	       mdev->name, dev->name);
mdev               28 drivers/net/wireless/intersil/orinoco/airport.c 	struct macio_dev *mdev;
mdev               36 drivers/net/wireless/intersil/orinoco/airport.c airport_suspend(struct macio_dev *mdev, pm_message_t state)
mdev               38 drivers/net/wireless/intersil/orinoco/airport.c 	struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev);
mdev               58 drivers/net/wireless/intersil/orinoco/airport.c 			  macio_get_of_node(mdev), 0, 0);
mdev               64 drivers/net/wireless/intersil/orinoco/airport.c airport_resume(struct macio_dev *mdev)
mdev               66 drivers/net/wireless/intersil/orinoco/airport.c 	struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev);
mdev               75 drivers/net/wireless/intersil/orinoco/airport.c 			  macio_get_of_node(mdev), 0, 1);
mdev               88 drivers/net/wireless/intersil/orinoco/airport.c airport_detach(struct macio_dev *mdev)
mdev               90 drivers/net/wireless/intersil/orinoco/airport.c 	struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev);
mdev              105 drivers/net/wireless/intersil/orinoco/airport.c 	macio_release_resource(mdev, 0);
mdev              108 drivers/net/wireless/intersil/orinoco/airport.c 			  macio_get_of_node(mdev), 0, 0);
mdev              111 drivers/net/wireless/intersil/orinoco/airport.c 	macio_set_drvdata(mdev, NULL);
mdev              134 drivers/net/wireless/intersil/orinoco/airport.c 			  macio_get_of_node(card->mdev), 0, 0);
mdev              137 drivers/net/wireless/intersil/orinoco/airport.c 			  macio_get_of_node(card->mdev), 0, 1);
mdev              148 drivers/net/wireless/intersil/orinoco/airport.c airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
mdev              155 drivers/net/wireless/intersil/orinoco/airport.c 	if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) {
mdev              161 drivers/net/wireless/intersil/orinoco/airport.c 	priv = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev,
mdev              170 drivers/net/wireless/intersil/orinoco/airport.c 	card->mdev = mdev;
mdev              172 drivers/net/wireless/intersil/orinoco/airport.c 	if (macio_request_resource(mdev, 0, DRIVER_NAME)) {
mdev              178 drivers/net/wireless/intersil/orinoco/airport.c 	macio_set_drvdata(mdev, priv);
mdev              181 drivers/net/wireless/intersil/orinoco/airport.c 	card->irq = macio_irq(mdev, 0);
mdev              182 drivers/net/wireless/intersil/orinoco/airport.c 	phys_addr = macio_resource_start(mdev, 0);  /* Physical address */
mdev              194 drivers/net/wireless/intersil/orinoco/airport.c 			  macio_get_of_node(mdev), 0, 1);
mdev              220 drivers/net/wireless/intersil/orinoco/airport.c 	airport_detach(mdev);
mdev                5 drivers/net/wireless/mediatek/mt76/mt7603/core.c void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
mdev                7 drivers/net/wireless/mediatek/mt76/mt7603/core.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev               83 drivers/net/wireless/mediatek/mt76/mt7603/dma.c void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mdev               86 drivers/net/wireless/mediatek/mt76/mt7603/dma.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              416 drivers/net/wireless/mediatek/mt76/mt7603/init.c static u32 mt7603_rr(struct mt76_dev *mdev, u32 offset)
mdev              418 drivers/net/wireless/mediatek/mt76/mt7603/init.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              421 drivers/net/wireless/mediatek/mt76/mt7603/init.c 	return dev->bus_ops->rr(mdev, addr);
mdev              424 drivers/net/wireless/mediatek/mt76/mt7603/init.c static void mt7603_wr(struct mt76_dev *mdev, u32 offset, u32 val)
mdev              426 drivers/net/wireless/mediatek/mt76/mt7603/init.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              429 drivers/net/wireless/mediatek/mt76/mt7603/init.c 	dev->bus_ops->wr(mdev, addr, val);
mdev              432 drivers/net/wireless/mediatek/mt76/mt7603/init.c static u32 mt7603_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
mdev              434 drivers/net/wireless/mediatek/mt76/mt7603/init.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              437 drivers/net/wireless/mediatek/mt76/mt7603/init.c 	return dev->bus_ops->rmw(mdev, addr, mask, val);
mdev              907 drivers/net/wireless/mediatek/mt76/mt7603/mac.c int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mdev              912 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              930 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mdev             1085 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct mt76_dev *mdev = &dev->mt76;
mdev             1092 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	mt76_tx_status_lock(mdev, &list);
mdev             1093 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
mdev             1102 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 		mt76_tx_status_skb_done(mdev, skb, &list);
mdev             1104 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	mt76_tx_status_unlock(mdev, &list);
mdev             1153 drivers/net/wireless/mediatek/mt76/mt7603/mac.c void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mdev             1156 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev             1167 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	mt76_tx_complete_skb(mdev, skb);
mdev             1462 drivers/net/wireless/mediatek/mt76/mt7603/mac.c void mt7603_update_channel(struct mt76_dev *mdev)
mdev             1464 drivers/net/wireless/mediatek/mt76/mt7603/mac.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              314 drivers/net/wireless/mediatek/mt76/mt7603/main.c mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              317 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              342 drivers/net/wireless/mediatek/mt76/mt7603/main.c mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              345 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              351 drivers/net/wireless/mediatek/mt76/mt7603/main.c mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              354 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev              377 drivers/net/wireless/mediatek/mt76/mt7603/main.c mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
mdev              379 drivers/net/wireless/mediatek/mt76/mt7603/main.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev               21 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	struct mt76_dev *mdev = &dev->mt76;
mdev               25 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	seq = ++mdev->mmio.mcu.msg_seq & 0xf;
mdev               27 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 		seq = ++mdev->mmio.mcu.msg_seq & 0xf;
mdev               57 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
mdev               60 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev               70 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	mutex_lock(&mdev->mmio.mcu.mutex);
mdev               81 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 			dev_err(mdev->dev,
mdev              100 drivers/net/wireless/mediatek/mt76/mt7603/mcu.c 	mutex_unlock(&mdev->mmio.mcu.mutex);
mdev              227 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mdev              232 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mdev              235 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mdev              237 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
mdev              238 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
mdev              239 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              241 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              243 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              248 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_update_channel(struct mt76_dev *mdev);
mdev               18 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	struct mt76_dev *mdev;
mdev               35 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
mdev               37 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	if (!mdev)
mdev               40 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	dev = container_of(mdev, struct mt7603_dev, mt76);
mdev               41 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
mdev               43 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
mdev               45 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
mdev               47 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler,
mdev               65 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	struct mt76_dev *mdev = pci_get_drvdata(pdev);
mdev               66 drivers/net/wireless/mediatek/mt76/mt7603/pci.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev               14 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	struct mt76_dev *mdev;
mdev               28 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
mdev               30 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	if (!mdev)
mdev               33 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	dev = container_of(mdev, struct mt7603_dev, mt76);
mdev               34 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	mt76_mmio_init(mdev, mem_base);
mdev               36 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
mdev               38 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
mdev               40 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler,
mdev               58 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	struct mt76_dev *mdev = platform_get_drvdata(pdev);
mdev               59 drivers/net/wireless/mediatek/mt76/mt7603/soc.c 	struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
mdev               59 drivers/net/wireless/mediatek/mt76/mt7615/dma.c void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mdev               62 drivers/net/wireless/mediatek/mt76/mt7615/dma.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              218 drivers/net/wireless/mediatek/mt76/mt7615/mac.c void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
mdev              222 drivers/net/wireless/mediatek/mt76/mt7615/mac.c void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mdev              236 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              237 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		txp = mt7615_txwi_to_txp(mdev, e->txwi);
mdev              246 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		mt76_tx_complete_skb(mdev, e->skb);
mdev              762 drivers/net/wireless/mediatek/mt76/mt7615/mac.c int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mdev              768 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              781 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mdev              819 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
mdev              976 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct mt76_dev *mdev = &dev->mt76;
mdev              983 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	mt76_tx_status_lock(mdev, &list);
mdev              984 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
mdev              993 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		mt76_tx_status_skb_done(mdev, skb, &list);
mdev              995 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	mt76_tx_status_unlock(mdev, &list);
mdev             1047 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct mt76_dev *mdev = &dev->mt76;
mdev             1060 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		mt7615_txp_skb_unmap(mdev, txwi);
mdev             1062 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 			mt76_tx_complete_skb(mdev, txwi->skb);
mdev             1066 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 		mt76_put_txwi(mdev, txwi);
mdev             1240 drivers/net/wireless/mediatek/mt76/mt7615/mac.c void mt7615_update_channel(struct mt76_dev *mdev)
mdev             1242 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev             1247 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	if (!test_bit(MT76_STATE_RUNNING, &mdev->state))
mdev             1250 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	state = mt76_channel_state(mdev, mdev->chandef.chan);
mdev             1254 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	spin_lock_bh(&mdev->cc_lock);
mdev             1258 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 						  mdev->survey_time));
mdev             1259 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	mdev->survey_time = cur_time;
mdev             1260 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	spin_unlock_bh(&mdev->cc_lock);
mdev              339 drivers/net/wireless/mediatek/mt76/mt7615/main.c int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              342 drivers/net/wireless/mediatek/mt76/mt7615/main.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              361 drivers/net/wireless/mediatek/mt76/mt7615/main.c void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              364 drivers/net/wireless/mediatek/mt76/mt7615/main.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              370 drivers/net/wireless/mediatek/mt76/mt7615/main.c void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              373 drivers/net/wireless/mediatek/mt76/mt7615/main.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              143 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
mdev              146 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              155 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	mutex_lock(&mdev->mmio.mcu.mutex);
mdev              162 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 		skb = mt76_mcu_get_response(mdev, expires);
mdev              164 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 			dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
mdev              176 drivers/net/wireless/mediatek/mt76/mt7615/mcu.c 	mutex_unlock(&mdev->mmio.mcu.mutex);
mdev              231 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_update_channel(struct mt76_dev *mdev);
mdev              253 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mdev              258 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mdev              261 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mdev              263 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
mdev              264 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              266 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              268 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev               31 drivers/net/wireless/mediatek/mt76/mt7615/pci.c mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
mdev               33 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev               87 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	struct mt76_dev *mdev;
mdev              104 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
mdev              106 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	if (!mdev)
mdev              109 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              112 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
mdev              114 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
mdev              116 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
mdev              133 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	struct mt76_dev *mdev = pci_get_drvdata(pdev);
mdev              134 drivers/net/wireless/mediatek/mt76/mt7615/pci.c 	struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mdev              169 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	struct mt76_dev *mdev;
mdev              186 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
mdev              188 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	if (!mdev)
mdev              191 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              194 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
mdev              196 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
mdev              197 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
mdev              199 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
mdev              228 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	struct mt76_dev *mdev = pci_get_drvdata(pdev);
mdev              229 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              231 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	mt76_unregister_device(mdev);
mdev              233 drivers/net/wireless/mediatek/mt76/mt76x0/pci.c 	mt76_free_device(mdev);
mdev              225 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	struct mt76_dev *mdev;
mdev              229 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
mdev              231 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	if (!mdev)
mdev              234 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              246 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	mt76x02u_init_mcu(mdev);
mdev              247 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	ret = mt76u_init(mdev, usb_intf);
mdev              254 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	if (!mt76x02_wait_for_mac(mdev)) {
mdev              259 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
mdev              261 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
mdev              262 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 		 mdev->rev, mac_rev);
mdev              270 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 		dev_warn(mdev->dev, "Warning: eFUSE not present\n");
mdev              282 drivers/net/wireless/mediatek/mt76/mt76x0/usb.c 	ieee80211_free_hw(mdev->hw);
mdev              136 drivers/net/wireless/mediatek/mt76/mt76x02.h int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              138 drivers/net/wireless/mediatek/mt76/mt76x02.h void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              169 drivers/net/wireless/mediatek/mt76/mt76x02.h bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
mdev              170 drivers/net/wireless/mediatek/mt76/mt76x02.h void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mdev              172 drivers/net/wireless/mediatek/mt76/mt76x02.h void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
mdev              176 drivers/net/wireless/mediatek/mt76/mt76x02.h int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
mdev              521 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	struct mt76_dev *mdev = &dev->mt76;
mdev              540 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	mt76_tx_status_lock(mdev, &list);
mdev              544 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 			status.skb = mt76_tx_status_skb_get(mdev, wcid,
mdev              551 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		mt76_tx_status_unlock(mdev, &list);
mdev              567 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 			mt76_tx_status_unlock(mdev, &list);
mdev              584 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 		mt76_tx_status_skb_done(mdev, status.skb, &list);
mdev              585 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	mt76_tx_status_unlock(mdev, &list);
mdev              826 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mdev              829 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              840 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
mdev              844 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	mt76_tx_complete_skb(mdev, e->skb);
mdev              947 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c void mt76x02_update_channel(struct mt76_dev *mdev)
mdev              949 drivers/net/wireless/mediatek/mt76/mt76x02_mac.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              190 drivers/net/wireless/mediatek/mt76/mt76x02_mac.h void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mdev              192 drivers/net/wireless/mediatek/mt76/mt76x02_mac.h void mt76x02_update_channel(struct mt76_dev *mdev);
mdev               13 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
mdev               16 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               27 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	mutex_lock(&mdev->mmio.mcu.mutex);
mdev               29 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	seq = ++mdev->mmio.mcu.msg_seq & 0xf;
mdev               31 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 		seq = ++mdev->mmio.mcu.msg_seq & 0xf;
mdev               49 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 			dev_err(mdev->dev,
mdev               68 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c 	mutex_unlock(&mdev->mmio.mcu.mutex);
mdev               96 drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
mdev              247 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
mdev              251 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c 	dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               35 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mdev               38 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               53 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	mt76_rx(mdev, q, skb);
mdev              124 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
mdev              126 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              138 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mdev              143 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              156 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mdev               17 drivers/net/wireless/mediatek/mt76/mt76x02_usb.h int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
mdev               21 drivers/net/wireless/mediatek/mt76/mt76x02_usb.h void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mdev               18 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
mdev               22 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	mt76_tx_complete_skb(mdev, e->skb);
mdev               63 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
mdev               68 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               69 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
mdev               81 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mdev               92 drivers/net/wireless/mediatek/mt76/mt76x02_util.c mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
mdev               95 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev,
mdev              103 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
mdev              104 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
mdev              106 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	val = MT_LED_CTRL_REPLAY(mdev->led_pin) |
mdev              107 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	      MT_LED_CTRL_KICK(mdev->led_pin);
mdev              108 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	if (mdev->led_al)
mdev              109 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 		val |= MT_LED_CTRL_POLARITY(mdev->led_pin);
mdev              118 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
mdev              125 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	mt76x02_led_set_config(mdev, delta_on, delta_off);
mdev              134 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
mdev              138 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 		mt76x02_led_set_config(mdev, 0, 0xff);
mdev              140 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 		mt76x02_led_set_config(mdev, 0xff, 0);
mdev              247 drivers/net/wireless/mediatek/mt76/mt76x02_util.c int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              250 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              275 drivers/net/wireless/mediatek/mt76/mt76x02_util.c void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mdev              278 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev              619 drivers/net/wireless/mediatek/mt76/mt76x02_util.c void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
mdev              622 drivers/net/wireless/mediatek/mt76/mt76x02_util.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               35 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	struct mt76_dev *mdev;
mdev               52 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x2_ops,
mdev               54 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	if (!mdev)
mdev               57 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               58 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
mdev               61 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
mdev               62 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
mdev               64 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
mdev               96 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	struct mt76_dev *mdev = pci_get_drvdata(pdev);
mdev               97 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               99 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	mt76_unregister_device(mdev);
mdev              101 drivers/net/wireless/mediatek/mt76/mt76x2/pci.c 	mt76_free_device(mdev);
mdev              158 drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c mt76pci_mcu_restart(struct mt76_dev *mdev)
mdev              163 drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c 	dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               40 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c 	struct mt76_dev *mdev;
mdev               43 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c 	mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
mdev               45 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c 	if (!mdev)
mdev               48 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c 	dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev               55 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c 	mt76x02u_init_mcu(mdev);
mdev               56 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c 	err = mt76u_init(mdev, intf);
mdev               60 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c 	mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
mdev               61 drivers/net/wireless/mediatek/mt76/mt76x2/usb.c 	dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
mdev              163 drivers/platform/chrome/cros_ec_chardev.c 	struct miscdevice *mdev = filp->private_data;
mdev              164 drivers/platform/chrome/cros_ec_chardev.c 	struct cros_ec_dev *ec_dev = dev_get_drvdata(mdev->parent);
mdev              100 drivers/s390/cio/vfio_ccw_cp.c static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
mdev              104 drivers/s390/cio/vfio_ccw_cp.c 	ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
mdev              110 drivers/s390/cio/vfio_ccw_cp.c 		vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
mdev              124 drivers/s390/cio/vfio_ccw_cp.c static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
mdev              128 drivers/s390/cio/vfio_ccw_cp.c 		vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
mdev              192 drivers/s390/cio/vfio_ccw_cp.c static long copy_from_iova(struct device *mdev,
mdev              205 drivers/s390/cio/vfio_ccw_cp.c 	ret = pfn_array_pin(&pa, mdev);
mdev              207 drivers/s390/cio/vfio_ccw_cp.c 		pfn_array_unpin_free(&pa, mdev);
mdev              228 drivers/s390/cio/vfio_ccw_cp.c 	pfn_array_unpin_free(&pa, mdev);
mdev              429 drivers/s390/cio/vfio_ccw_cp.c 	len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
mdev              528 drivers/s390/cio/vfio_ccw_cp.c 		ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
mdev              557 drivers/s390/cio/vfio_ccw_cp.c 		ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len);
mdev              576 drivers/s390/cio/vfio_ccw_cp.c 		ret = pfn_array_pin(pa, cp->mdev);
mdev              592 drivers/s390/cio/vfio_ccw_cp.c 	pfn_array_unpin_free(pa, cp->mdev);
mdev              635 drivers/s390/cio/vfio_ccw_cp.c int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
mdev              648 drivers/s390/cio/vfio_ccw_cp.c 	cp->mdev = mdev;
mdev              685 drivers/s390/cio/vfio_ccw_cp.c 			pfn_array_unpin_free(chain->ch_pa + i, cp->mdev);
mdev               39 drivers/s390/cio/vfio_ccw_cp.h 	struct device *mdev;
mdev               44 drivers/s390/cio/vfio_ccw_cp.h extern int cp_init(struct channel_program *cp, struct device *mdev,
mdev              101 drivers/s390/cio/vfio_ccw_drv.c 	if (private->mdev && is_final)
mdev              250 drivers/s390/cio/vfio_ccw_drv.c 		private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
mdev              248 drivers/s390/cio/vfio_ccw_fsm.c 	struct mdev_device *mdev = private->mdev;
mdev              263 drivers/s390/cio/vfio_ccw_fsm.c 					   mdev_uuid(mdev), schid.cssid,
mdev              268 drivers/s390/cio/vfio_ccw_fsm.c 		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
mdev              273 drivers/s390/cio/vfio_ccw_fsm.c 					   mdev_uuid(mdev), schid.cssid,
mdev              284 drivers/s390/cio/vfio_ccw_fsm.c 					   mdev_uuid(mdev), schid.cssid,
mdev              297 drivers/s390/cio/vfio_ccw_fsm.c 					   mdev_uuid(mdev), schid.cssid,
mdev              308 drivers/s390/cio/vfio_ccw_fsm.c 				   mdev_uuid(mdev), schid.cssid,
mdev              316 drivers/s390/cio/vfio_ccw_fsm.c 				   mdev_uuid(mdev), schid.cssid,
mdev               20 drivers/s390/cio/vfio_ccw_ops.c static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
mdev               26 drivers/s390/cio/vfio_ccw_ops.c 	private = dev_get_drvdata(mdev_parent_dev(mdev));
mdev               64 drivers/s390/cio/vfio_ccw_ops.c 		if (vfio_ccw_mdev_reset(private->mdev))
mdev              113 drivers/s390/cio/vfio_ccw_ops.c static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
mdev              116 drivers/s390/cio/vfio_ccw_ops.c 		dev_get_drvdata(mdev_parent_dev(mdev));
mdev              124 drivers/s390/cio/vfio_ccw_ops.c 	private->mdev = mdev;
mdev              128 drivers/s390/cio/vfio_ccw_ops.c 			   mdev_uuid(mdev), private->sch->schid.cssid,
mdev              135 drivers/s390/cio/vfio_ccw_ops.c static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
mdev              138 drivers/s390/cio/vfio_ccw_ops.c 		dev_get_drvdata(mdev_parent_dev(mdev));
mdev              141 drivers/s390/cio/vfio_ccw_ops.c 			   mdev_uuid(mdev), private->sch->schid.cssid,
mdev              153 drivers/s390/cio/vfio_ccw_ops.c 	private->mdev = NULL;
mdev              159 drivers/s390/cio/vfio_ccw_ops.c static int vfio_ccw_mdev_open(struct mdev_device *mdev)
mdev              162 drivers/s390/cio/vfio_ccw_ops.c 		dev_get_drvdata(mdev_parent_dev(mdev));
mdev              168 drivers/s390/cio/vfio_ccw_ops.c 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
mdev              175 drivers/s390/cio/vfio_ccw_ops.c 		vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
mdev              180 drivers/s390/cio/vfio_ccw_ops.c static void vfio_ccw_mdev_release(struct mdev_device *mdev)
mdev              183 drivers/s390/cio/vfio_ccw_ops.c 		dev_get_drvdata(mdev_parent_dev(mdev));
mdev              188 drivers/s390/cio/vfio_ccw_ops.c 		if (!vfio_ccw_mdev_reset(mdev))
mdev              194 drivers/s390/cio/vfio_ccw_ops.c 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
mdev              226 drivers/s390/cio/vfio_ccw_ops.c static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
mdev              234 drivers/s390/cio/vfio_ccw_ops.c 	private = dev_get_drvdata(mdev_parent_dev(mdev));
mdev              281 drivers/s390/cio/vfio_ccw_ops.c static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
mdev              289 drivers/s390/cio/vfio_ccw_ops.c 	private = dev_get_drvdata(mdev_parent_dev(mdev));
mdev              307 drivers/s390/cio/vfio_ccw_ops.c 					 struct mdev_device *mdev)
mdev              311 drivers/s390/cio/vfio_ccw_ops.c 	private = dev_get_drvdata(mdev_parent_dev(mdev));
mdev              320 drivers/s390/cio/vfio_ccw_ops.c 					 struct mdev_device *mdev,
mdev              326 drivers/s390/cio/vfio_ccw_ops.c 	private = dev_get_drvdata(mdev_parent_dev(mdev));
mdev              396 drivers/s390/cio/vfio_ccw_ops.c static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
mdev              406 drivers/s390/cio/vfio_ccw_ops.c 	private = dev_get_drvdata(mdev_parent_dev(mdev));
mdev              485 drivers/s390/cio/vfio_ccw_ops.c static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
mdev              505 drivers/s390/cio/vfio_ccw_ops.c 		ret = vfio_ccw_mdev_get_device_info(&info, mdev);
mdev              523 drivers/s390/cio/vfio_ccw_ops.c 		ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
mdev              568 drivers/s390/cio/vfio_ccw_ops.c 		return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
mdev              571 drivers/s390/cio/vfio_ccw_ops.c 		return vfio_ccw_mdev_reset(mdev);
mdev               83 drivers/s390/cio/vfio_ccw_private.h 	struct mdev_device	*mdev;
mdev               27 drivers/s390/crypto/vfio_ap_ops.c static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
mdev              125 drivers/s390/crypto/vfio_ap_ops.c 		vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
mdev              212 drivers/s390/crypto/vfio_ap_ops.c 	ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1,
mdev              241 drivers/s390/crypto/vfio_ap_ops.c 		vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1);
mdev              324 drivers/s390/crypto/vfio_ap_ops.c static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
mdev              337 drivers/s390/crypto/vfio_ap_ops.c 	matrix_mdev->mdev = mdev;
mdev              339 drivers/s390/crypto/vfio_ap_ops.c 	mdev_set_drvdata(mdev, matrix_mdev);
mdev              349 drivers/s390/crypto/vfio_ap_ops.c static int vfio_ap_mdev_remove(struct mdev_device *mdev)
mdev              351 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev              357 drivers/s390/crypto/vfio_ap_ops.c 	vfio_ap_mdev_reset_queues(mdev);
mdev              362 drivers/s390/crypto/vfio_ap_ops.c 	mdev_set_drvdata(mdev, NULL);
mdev              605 drivers/s390/crypto/vfio_ap_ops.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              606 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev              671 drivers/s390/crypto/vfio_ap_ops.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              672 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev              751 drivers/s390/crypto/vfio_ap_ops.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              752 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev              813 drivers/s390/crypto/vfio_ap_ops.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              814 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev              857 drivers/s390/crypto/vfio_ap_ops.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              858 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev              906 drivers/s390/crypto/vfio_ap_ops.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              907 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev              936 drivers/s390/crypto/vfio_ap_ops.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              937 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev              955 drivers/s390/crypto/vfio_ap_ops.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              956 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev             1079 drivers/s390/crypto/vfio_ap_ops.c 		vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1);
mdev             1161 drivers/s390/crypto/vfio_ap_ops.c static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
mdev             1166 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev             1187 drivers/s390/crypto/vfio_ap_ops.c static int vfio_ap_mdev_open(struct mdev_device *mdev)
mdev             1189 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev             1200 drivers/s390/crypto/vfio_ap_ops.c 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
mdev             1209 drivers/s390/crypto/vfio_ap_ops.c 	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
mdev             1214 drivers/s390/crypto/vfio_ap_ops.c 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
mdev             1220 drivers/s390/crypto/vfio_ap_ops.c static void vfio_ap_mdev_release(struct mdev_device *mdev)
mdev             1222 drivers/s390/crypto/vfio_ap_ops.c 	struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
mdev             1228 drivers/s390/crypto/vfio_ap_ops.c 		vfio_ap_mdev_reset_queues(mdev);
mdev             1234 drivers/s390/crypto/vfio_ap_ops.c 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
mdev             1236 drivers/s390/crypto/vfio_ap_ops.c 	vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
mdev             1261 drivers/s390/crypto/vfio_ap_ops.c static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
mdev             1272 drivers/s390/crypto/vfio_ap_ops.c 		ret = vfio_ap_mdev_reset_queues(mdev);
mdev               88 drivers/s390/crypto/vfio_ap_private.h 	struct mdev_device *mdev;
mdev               59 drivers/scsi/mac53c94.c 	struct macio_dev *mdev;
mdev              410 drivers/scsi/mac53c94.c static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
mdev              412 drivers/scsi/mac53c94.c 	struct device_node *node = macio_get_of_node(mdev);
mdev              413 drivers/scsi/mac53c94.c 	struct pci_dev *pdev = macio_get_pci_dev(mdev);
mdev              420 drivers/scsi/mac53c94.c 	if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
mdev              423 drivers/scsi/mac53c94.c 		       macio_resource_count(mdev), macio_irq_count(mdev));
mdev              427 drivers/scsi/mac53c94.c 	if (macio_request_resources(mdev, "mac53c94") != 0) {
mdev              440 drivers/scsi/mac53c94.c 	macio_set_drvdata(mdev, state);
mdev              443 drivers/scsi/mac53c94.c 	state->mdev = mdev;
mdev              446 drivers/scsi/mac53c94.c 		ioremap(macio_resource_start(mdev, 0), 0x1000);
mdev              447 drivers/scsi/mac53c94.c 	state->intr = macio_irq(mdev, 0);
mdev              449 drivers/scsi/mac53c94.c 		ioremap(macio_resource_start(mdev, 1), 0x1000);
mdev              450 drivers/scsi/mac53c94.c 	state->dmaintr = macio_irq(mdev, 1);
mdev              490 drivers/scsi/mac53c94.c 	rc = scsi_add_host(host, &mdev->ofdev.dev);
mdev              508 drivers/scsi/mac53c94.c 	macio_release_resources(mdev);
mdev              513 drivers/scsi/mac53c94.c static int mac53c94_remove(struct macio_dev *mdev)
mdev              515 drivers/scsi/mac53c94.c 	struct fsc_state *fp = (struct fsc_state *)macio_get_drvdata(mdev);
mdev              530 drivers/scsi/mac53c94.c 	macio_release_resources(mdev);
mdev              174 drivers/scsi/mesh.c 	struct macio_dev *mdev;
mdev             1745 drivers/scsi/mesh.c 		pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1);
mdev             1748 drivers/scsi/mesh.c 		pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0);
mdev             1755 drivers/scsi/mesh.c static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
mdev             1757 drivers/scsi/mesh.c 	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
mdev             1786 drivers/scsi/mesh.c static int mesh_resume(struct macio_dev *mdev)
mdev             1788 drivers/scsi/mesh.c 	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
mdev             1812 drivers/scsi/mesh.c static int mesh_shutdown(struct macio_dev *mdev)
mdev             1814 drivers/scsi/mesh.c 	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
mdev             1845 drivers/scsi/mesh.c static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
mdev             1847 drivers/scsi/mesh.c 	struct device_node *mesh = macio_get_of_node(mdev);
mdev             1848 drivers/scsi/mesh.c 	struct pci_dev* pdev = macio_get_pci_dev(mdev);
mdev             1856 drivers/scsi/mesh.c 	switch (mdev->bus->chip->type) {
mdev             1866 drivers/scsi/mesh.c 	if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
mdev             1868 drivers/scsi/mesh.c 	       	       " (got %d,%d)\n", macio_resource_count(mdev),
mdev             1869 drivers/scsi/mesh.c 		       macio_irq_count(mdev));
mdev             1873 drivers/scsi/mesh.c 	if (macio_request_resources(mdev, "mesh") != 0) {
mdev             1888 drivers/scsi/mesh.c 	mesh_host->base = macio_resource_start(mdev, 0);
mdev             1889 drivers/scsi/mesh.c 	mesh_host->irq = macio_irq(mdev, 0);
mdev             1891 drivers/scsi/mesh.c 	macio_set_drvdata(mdev, ms);
mdev             1893 drivers/scsi/mesh.c 	ms->mdev = mdev;
mdev             1896 drivers/scsi/mesh.c 	ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000);
mdev             1901 drivers/scsi/mesh.c 	ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
mdev             1908 drivers/scsi/mesh.c        	ms->meshintr = macio_irq(mdev, 0);
mdev             1909 drivers/scsi/mesh.c        	ms->dmaintr = macio_irq(mdev, 1);
mdev             1919 drivers/scsi/mesh.c 	dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
mdev             1965 drivers/scsi/mesh.c 	if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
mdev             1977 drivers/scsi/mesh.c 	mesh_shutdown(mdev);
mdev             1979 drivers/scsi/mesh.c 	dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
mdev             1987 drivers/scsi/mesh.c 	macio_release_resources(mdev);
mdev             1992 drivers/scsi/mesh.c static int mesh_remove(struct macio_dev *mdev)
mdev             1994 drivers/scsi/mesh.c 	struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev);
mdev             2002 drivers/scsi/mesh.c 	mesh_shutdown(mdev);
mdev             2012 drivers/scsi/mesh.c 	dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
mdev             2016 drivers/scsi/mesh.c 	macio_release_resources(mdev);
mdev              139 drivers/sh/maple/maple.c 	struct maple_device *mdev;
mdev              142 drivers/sh/maple/maple.c 	mdev = to_maple_dev(dev);
mdev              143 drivers/sh/maple/maple.c 	mq = mdev->mq;
mdev              146 drivers/sh/maple/maple.c 	kfree(mdev);
mdev              157 drivers/sh/maple/maple.c int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
mdev              172 drivers/sh/maple/maple.c 	mdev->mq->command = command;
mdev              173 drivers/sh/maple/maple.c 	mdev->mq->length = length;
mdev              176 drivers/sh/maple/maple.c 	mdev->mq->sendbuf = sendbuf;
mdev              179 drivers/sh/maple/maple.c 	list_add_tail(&mdev->mq->list, &maple_waitq);
mdev              186 drivers/sh/maple/maple.c static struct mapleq *maple_allocq(struct maple_device *mdev)
mdev              195 drivers/sh/maple/maple.c 	mq->dev = mdev;
mdev              206 drivers/sh/maple/maple.c 	dev_err(&mdev->dev, "could not allocate memory for device (%d, %d)\n",
mdev              207 drivers/sh/maple/maple.c 		mdev->port, mdev->unit);
mdev              213 drivers/sh/maple/maple.c 	struct maple_device *mdev;
mdev              218 drivers/sh/maple/maple.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              219 drivers/sh/maple/maple.c 	if (!mdev)
mdev              222 drivers/sh/maple/maple.c 	mdev->port = port;
mdev              223 drivers/sh/maple/maple.c 	mdev->unit = unit;
mdev              225 drivers/sh/maple/maple.c 	mdev->mq = maple_allocq(mdev);
mdev              227 drivers/sh/maple/maple.c 	if (!mdev->mq) {
mdev              228 drivers/sh/maple/maple.c 		kfree(mdev);
mdev              231 drivers/sh/maple/maple.c 	mdev->dev.bus = &maple_bus_type;
mdev              232 drivers/sh/maple/maple.c 	mdev->dev.parent = &maple_bus;
mdev              233 drivers/sh/maple/maple.c 	init_waitqueue_head(&mdev->maple_wait);
mdev              234 drivers/sh/maple/maple.c 	return mdev;
mdev              237 drivers/sh/maple/maple.c static void maple_free_dev(struct maple_device *mdev)
mdev              239 drivers/sh/maple/maple.c 	kmem_cache_free(maple_queue_cache, mdev->mq->recvbuf);
mdev              240 drivers/sh/maple/maple.c 	kfree(mdev->mq);
mdev              241 drivers/sh/maple/maple.c 	kfree(mdev);
mdev              316 drivers/sh/maple/maple.c 	struct maple_device *mdev;
mdev              318 drivers/sh/maple/maple.c 	mdev = devptr;
mdev              320 drivers/sh/maple/maple.c 	if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
mdev              325 drivers/sh/maple/maple.c static void maple_detach_driver(struct maple_device *mdev)
mdev              327 drivers/sh/maple/maple.c 	device_unregister(&mdev->dev);
mdev              331 drivers/sh/maple/maple.c static void maple_attach_driver(struct maple_device *mdev)
mdev              337 drivers/sh/maple/maple.c 	recvbuf = mdev->mq->recvbuf->buf;
mdev              340 drivers/sh/maple/maple.c 	memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
mdev              341 drivers/sh/maple/maple.c 	memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
mdev              342 drivers/sh/maple/maple.c 	memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
mdev              343 drivers/sh/maple/maple.c 	memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
mdev              344 drivers/sh/maple/maple.c 	memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
mdev              345 drivers/sh/maple/maple.c 	memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
mdev              346 drivers/sh/maple/maple.c 	memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
mdev              347 drivers/sh/maple/maple.c 	memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
mdev              348 drivers/sh/maple/maple.c 	mdev->product_name[30] = '\0';
mdev              349 drivers/sh/maple/maple.c 	memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
mdev              350 drivers/sh/maple/maple.c 	mdev->product_licence[60] = '\0';
mdev              352 drivers/sh/maple/maple.c 	for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
mdev              357 drivers/sh/maple/maple.c 	for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
mdev              363 drivers/sh/maple/maple.c 	function = be32_to_cpu(mdev->devinfo.function);
mdev              365 drivers/sh/maple/maple.c 	dev_info(&mdev->dev, "detected %s: function 0x%lX: at (%d, %d)\n",
mdev              366 drivers/sh/maple/maple.c 		mdev->product_name, function, mdev->port, mdev->unit);
mdev              371 drivers/sh/maple/maple.c 		mdev->driver = &maple_unsupported_device;
mdev              372 drivers/sh/maple/maple.c 		dev_set_name(&mdev->dev, "%d:0.port", mdev->port);
mdev              375 drivers/sh/maple/maple.c 			bus_for_each_drv(&maple_bus_type, NULL, mdev,
mdev              380 drivers/sh/maple/maple.c 			dev_info(&mdev->dev, "no driver found\n");
mdev              381 drivers/sh/maple/maple.c 			mdev->driver = &maple_unsupported_device;
mdev              383 drivers/sh/maple/maple.c 		dev_set_name(&mdev->dev, "%d:0%d.%lX", mdev->port,
mdev              384 drivers/sh/maple/maple.c 			     mdev->unit, function);
mdev              387 drivers/sh/maple/maple.c 	mdev->function = function;
mdev              388 drivers/sh/maple/maple.c 	mdev->dev.release = &maple_release_device;
mdev              390 drivers/sh/maple/maple.c 	atomic_set(&mdev->busy, 0);
mdev              391 drivers/sh/maple/maple.c 	error = device_register(&mdev->dev);
mdev              393 drivers/sh/maple/maple.c 		dev_warn(&mdev->dev, "could not register device at"
mdev              394 drivers/sh/maple/maple.c 			" (%d, %d), with error 0x%X\n", mdev->unit,
mdev              395 drivers/sh/maple/maple.c 			mdev->port, error);
mdev              396 drivers/sh/maple/maple.c 		maple_free_dev(mdev);
mdev              397 drivers/sh/maple/maple.c 		mdev = NULL;
mdev              410 drivers/sh/maple/maple.c 	struct maple_device *mdev;
mdev              413 drivers/sh/maple/maple.c 	mdev = to_maple_dev(device);
mdev              414 drivers/sh/maple/maple.c 	if (mdev->port == ds->port && mdev->unit == ds->unit)
mdev              422 drivers/sh/maple/maple.c 	struct maple_device *mdev = to_maple_dev(device);
mdev              423 drivers/sh/maple/maple.c 	if (mdev->interval > 0 && atomic_read(&mdev->busy) == 0 &&
mdev              424 drivers/sh/maple/maple.c 		time_after(jiffies, mdev->when)) {
mdev              426 drivers/sh/maple/maple.c 		add = maple_add_packet(mdev,
mdev              427 drivers/sh/maple/maple.c 			be32_to_cpu(mdev->devinfo.function),
mdev              430 drivers/sh/maple/maple.c 			mdev->when = jiffies + mdev->interval;
mdev              436 drivers/sh/maple/maple.c 			if (atomic_read(&mdev->busy) == 0) {
mdev              437 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 1);
mdev              438 drivers/sh/maple/maple.c 				maple_add_packet(mdev, 0,
mdev              449 drivers/sh/maple/maple.c 	struct maple_device *mdev;
mdev              473 drivers/sh/maple/maple.c 				mdev = baseunits[x];
mdev              474 drivers/sh/maple/maple.c 				if (!mdev)
mdev              476 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 1);
mdev              477 drivers/sh/maple/maple.c 				locking = maple_add_packet(mdev, 0,
mdev              492 drivers/sh/maple/maple.c static void maple_map_subunits(struct maple_device *mdev, int submask)
mdev              498 drivers/sh/maple/maple.c 	ds.port = mdev->port;
mdev              510 drivers/sh/maple/maple.c 			mdev_add = maple_alloc_dev(mdev->port, k + 1);
mdev              524 drivers/sh/maple/maple.c static void maple_clean_submap(struct maple_device *mdev)
mdev              528 drivers/sh/maple/maple.c 	killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
mdev              531 drivers/sh/maple/maple.c 	subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
mdev              535 drivers/sh/maple/maple.c static void maple_response_none(struct maple_device *mdev)
mdev              537 drivers/sh/maple/maple.c 	maple_clean_submap(mdev);
mdev              539 drivers/sh/maple/maple.c 	if (likely(mdev->unit != 0)) {
mdev              549 drivers/sh/maple/maple.c 		if (mdev->can_unload) {
mdev              550 drivers/sh/maple/maple.c 			if (!mdev->can_unload(mdev)) {
mdev              551 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 2);
mdev              552 drivers/sh/maple/maple.c 				wake_up(&mdev->maple_wait);
mdev              557 drivers/sh/maple/maple.c 		dev_info(&mdev->dev, "detaching device at (%d, %d)\n",
mdev              558 drivers/sh/maple/maple.c 			mdev->port, mdev->unit);
mdev              559 drivers/sh/maple/maple.c 		maple_detach_driver(mdev);
mdev              563 drivers/sh/maple/maple.c 			if (checked[mdev->port] == false) {
mdev              564 drivers/sh/maple/maple.c 				checked[mdev->port] = true;
mdev              565 drivers/sh/maple/maple.c 				empty[mdev->port] = true;
mdev              566 drivers/sh/maple/maple.c 				dev_info(&mdev->dev, "no devices"
mdev              567 drivers/sh/maple/maple.c 					" to port %d\n", mdev->port);
mdev              573 drivers/sh/maple/maple.c 	atomic_set(&mdev->busy, 0);
mdev              577 drivers/sh/maple/maple.c static void maple_response_devinfo(struct maple_device *mdev,
mdev              582 drivers/sh/maple/maple.c 		if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
mdev              583 drivers/sh/maple/maple.c 			checked[mdev->port] = true;
mdev              584 drivers/sh/maple/maple.c 			maple_attach_driver(mdev);
mdev              586 drivers/sh/maple/maple.c 			if (mdev->unit != 0)
mdev              587 drivers/sh/maple/maple.c 				maple_attach_driver(mdev);
mdev              588 drivers/sh/maple/maple.c 			if (mdev->unit == 0) {
mdev              589 drivers/sh/maple/maple.c 				empty[mdev->port] = false;
mdev              590 drivers/sh/maple/maple.c 				maple_attach_driver(mdev);
mdev              594 drivers/sh/maple/maple.c 	if (mdev->unit == 0) {
mdev              596 drivers/sh/maple/maple.c 		if (submask ^ subdevice_map[mdev->port]) {
mdev              597 drivers/sh/maple/maple.c 			maple_map_subunits(mdev, submask);
mdev              598 drivers/sh/maple/maple.c 			subdevice_map[mdev->port] = submask;
mdev              603 drivers/sh/maple/maple.c static void maple_response_fileerr(struct maple_device *mdev, void *recvbuf)
mdev              605 drivers/sh/maple/maple.c 	if (mdev->fileerr_handler) {
mdev              606 drivers/sh/maple/maple.c 		mdev->fileerr_handler(mdev, recvbuf);
mdev              609 drivers/sh/maple/maple.c 		dev_warn(&mdev->dev, "device at (%d, %d) reports"
mdev              610 drivers/sh/maple/maple.c 			"file error 0x%X\n", mdev->port, mdev->unit,
mdev              617 drivers/sh/maple/maple.c 	struct maple_device *mdev;
mdev              623 drivers/sh/maple/maple.c 			mdev = baseunits[i];
mdev              624 drivers/sh/maple/maple.c 			maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
mdev              634 drivers/sh/maple/maple.c 	struct maple_device *mdev;
mdev              643 drivers/sh/maple/maple.c 			mdev = mq->dev;
mdev              652 drivers/sh/maple/maple.c 				maple_response_none(mdev);
mdev              656 drivers/sh/maple/maple.c 				maple_response_devinfo(mdev, recvbuf);
mdev              657 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 0);
mdev              661 drivers/sh/maple/maple.c 				if (mdev->callback)
mdev              662 drivers/sh/maple/maple.c 					mdev->callback(mq);
mdev              663 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 0);
mdev              664 drivers/sh/maple/maple.c 				wake_up(&mdev->maple_wait);
mdev              668 drivers/sh/maple/maple.c 				maple_response_fileerr(mdev, recvbuf);
mdev              669 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 0);
mdev              670 drivers/sh/maple/maple.c 				wake_up(&mdev->maple_wait);
mdev              676 drivers/sh/maple/maple.c 				dev_warn(&mdev->dev, "non-fatal error"
mdev              678 drivers/sh/maple/maple.c 					mdev->port, mdev->unit);
mdev              679 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 0);
mdev              683 drivers/sh/maple/maple.c 				dev_notice(&mdev->dev, "extended"
mdev              685 drivers/sh/maple/maple.c 				" but call is not supported\n", mdev->port,
mdev              686 drivers/sh/maple/maple.c 				mdev->unit);
mdev              687 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 0);
mdev              691 drivers/sh/maple/maple.c 				atomic_set(&mdev->busy, 0);
mdev              692 drivers/sh/maple/maple.c 				wake_up(&mdev->maple_wait);
mdev              798 drivers/sh/maple/maple.c 	struct maple_device *mdev[MAPLE_PORTS];
mdev              848 drivers/sh/maple/maple.c 		mdev[i] = maple_alloc_dev(i, 0);
mdev              849 drivers/sh/maple/maple.c 		if (!mdev[i]) {
mdev              851 drivers/sh/maple/maple.c 				maple_free_dev(mdev[i]);
mdev              854 drivers/sh/maple/maple.c 		baseunits[i] = mdev[i];
mdev              855 drivers/sh/maple/maple.c 		atomic_set(&mdev[i]->busy, 1);
mdev              856 drivers/sh/maple/maple.c 		maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
mdev              104 drivers/ssb/driver_mipscore.c 	struct ssb_device *mdev = bus->mipscore.dev;
mdev              120 drivers/ssb/driver_mipscore.c 		if ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))
mdev              145 drivers/ssb/driver_mipscore.c 	struct ssb_device *mdev = bus->mipscore.dev;
mdev              154 drivers/ssb/driver_mipscore.c 		ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC)));
mdev              160 drivers/ssb/driver_mipscore.c 		ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) | ssb_read32(mdev, SSB_INTVEC)));
mdev              162 drivers/ssb/driver_mipscore.c 		u32 ipsflag = ssb_read32(mdev, SSB_IPSFLAG);
mdev              171 drivers/ssb/driver_mipscore.c 		ssb_write32(mdev, SSB_IPSFLAG, irqflag);
mdev              190 drivers/staging/media/hantro/hantro.h 	struct media_device mdev;
mdev              491 drivers/staging/media/hantro/hantro_drv.c static int hantro_register_entity(struct media_device *mdev,
mdev              506 drivers/staging/media/hantro/hantro_drv.c 	name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
mdev              518 drivers/staging/media/hantro/hantro_drv.c 	ret = media_device_register_entity(mdev, entity);
mdev              528 drivers/staging/media/hantro/hantro_drv.c 	struct media_device *mdev = &vpu->mdev;
mdev              534 drivers/staging/media/hantro/hantro_drv.c 	ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
mdev              542 drivers/staging/media/hantro/hantro_drv.c 	ret = hantro_register_entity(mdev, &func->proc, "proc",
mdev              549 drivers/staging/media/hantro/hantro_drv.c 	ret = hantro_register_entity(mdev, &func->sink, "sink",
mdev              569 drivers/staging/media/hantro/hantro_drv.c 	func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
mdev              843 drivers/staging/media/hantro/hantro_drv.c 	vpu->mdev.dev = vpu->dev;
mdev              844 drivers/staging/media/hantro/hantro_drv.c 	strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
mdev              845 drivers/staging/media/hantro/hantro_drv.c 	strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
mdev              846 drivers/staging/media/hantro/hantro_drv.c 		sizeof(vpu->mdev.model));
mdev              847 drivers/staging/media/hantro/hantro_drv.c 	media_device_init(&vpu->mdev);
mdev              848 drivers/staging/media/hantro/hantro_drv.c 	vpu->mdev.ops = &hantro_m2m_media_ops;
mdev              849 drivers/staging/media/hantro/hantro_drv.c 	vpu->v4l2_dev.mdev = &vpu->mdev;
mdev              863 drivers/staging/media/hantro/hantro_drv.c 	ret = media_device_register(&vpu->mdev);
mdev              876 drivers/staging/media/hantro/hantro_drv.c 	media_device_cleanup(&vpu->mdev);
mdev              893 drivers/staging/media/hantro/hantro_drv.c 	media_device_unregister(&vpu->mdev);
mdev              896 drivers/staging/media/hantro/hantro_drv.c 	media_device_cleanup(&vpu->mdev);
mdev              741 drivers/staging/media/imx/imx-media-capture.c 	priv->md = container_of(v4l2_dev->mdev, struct imx_media_dev, md);
mdev              263 drivers/staging/media/imx/imx-media-dev-common.c 	struct imx_media_dev *imxmd = container_of(link->graph_obj.mdev,
mdev              372 drivers/staging/media/imx/imx-media-dev-common.c 	imxmd->v4l2_dev.mdev = &imxmd->md;
mdev             1362 drivers/staging/media/ipu3/ipu3-v4l2.c 	imgu->v4l2_dev.mdev = &imgu->media_dev;
mdev              999 drivers/staging/media/omap4iss/iss.c 	iss->v4l2_dev.mdev = &iss->media_dev;
mdev              210 drivers/staging/media/omap4iss/iss_video.c 	struct media_device *mdev = entity->graph_obj.mdev;
mdev              213 drivers/staging/media/omap4iss/iss_video.c 	mutex_lock(&mdev->graph_mutex);
mdev              215 drivers/staging/media/omap4iss/iss_video.c 	if (media_graph_walk_init(&graph, mdev)) {
mdev              216 drivers/staging/media/omap4iss/iss_video.c 		mutex_unlock(&mdev->graph_mutex);
mdev              236 drivers/staging/media/omap4iss/iss_video.c 	mutex_unlock(&mdev->graph_mutex);
mdev              800 drivers/staging/media/omap4iss/iss_video.c 	return vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
mdev              878 drivers/staging/media/omap4iss/iss_video.c 	ret = media_entity_enum_init(&pipe->ent_enum, entity->graph_obj.mdev);
mdev              882 drivers/staging/media/omap4iss/iss_video.c 	ret = media_graph_walk_init(&graph, entity->graph_obj.mdev);
mdev              358 drivers/staging/media/sunxi/cedrus/cedrus.c 	dev->mdev.dev = &pdev->dev;
mdev              359 drivers/staging/media/sunxi/cedrus/cedrus.c 	strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
mdev              360 drivers/staging/media/sunxi/cedrus/cedrus.c 	strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME,
mdev              361 drivers/staging/media/sunxi/cedrus/cedrus.c 		sizeof(dev->mdev.bus_info));
mdev              363 drivers/staging/media/sunxi/cedrus/cedrus.c 	media_device_init(&dev->mdev);
mdev              364 drivers/staging/media/sunxi/cedrus/cedrus.c 	dev->mdev.ops = &cedrus_m2m_media_ops;
mdev              365 drivers/staging/media/sunxi/cedrus/cedrus.c 	dev->v4l2_dev.mdev = &dev->mdev;
mdev              384 drivers/staging/media/sunxi/cedrus/cedrus.c 	ret = media_device_register(&dev->mdev);
mdev              410 drivers/staging/media/sunxi/cedrus/cedrus.c 	if (media_devnode_is_registered(dev->mdev.devnode)) {
mdev              411 drivers/staging/media/sunxi/cedrus/cedrus.c 		media_device_unregister(&dev->mdev);
mdev              413 drivers/staging/media/sunxi/cedrus/cedrus.c 		media_device_cleanup(&dev->mdev);
mdev              135 drivers/staging/media/sunxi/cedrus/cedrus.h 	struct media_device	mdev;
mdev              615 drivers/staging/most/configfs.c void most_interface_register_notify(const char *mdev)
mdev              621 drivers/staging/most/configfs.c 		if (!strcmp(mdev_link->device, mdev)) {
mdev              570 drivers/staging/most/core.c static struct most_channel *get_channel(char *mdev, char *mdev_ch)
mdev              576 drivers/staging/most/core.c 	dev = bus_find_device_by_name(&mc.bus, NULL, mdev);
mdev              613 drivers/staging/most/core.c int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val)
mdev              615 drivers/staging/most/core.c 	struct most_channel *c = get_channel(mdev, mdev_ch);
mdev              623 drivers/staging/most/core.c int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val)
mdev              625 drivers/staging/most/core.c 	struct most_channel *c = get_channel(mdev, mdev_ch);
mdev              633 drivers/staging/most/core.c int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val)
mdev              635 drivers/staging/most/core.c 	struct most_channel *c = get_channel(mdev, mdev_ch);
mdev              643 drivers/staging/most/core.c int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val)
mdev              645 drivers/staging/most/core.c 	struct most_channel *c = get_channel(mdev, mdev_ch);
mdev              653 drivers/staging/most/core.c int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
mdev              656 drivers/staging/most/core.c 	struct most_channel *c = get_channel(mdev, mdev_ch);
mdev              672 drivers/staging/most/core.c int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
mdev              674 drivers/staging/most/core.c 	struct most_channel *c = get_channel(mdev, mdev_ch);
mdev              693 drivers/staging/most/core.c int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
mdev              695 drivers/staging/most/core.c 	struct most_channel *c = get_channel(mdev, mdev_ch);
mdev              714 drivers/staging/most/core.c int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
mdev              717 drivers/staging/most/core.c 	struct most_channel *c = get_channel(mdev, mdev_ch);
mdev              742 drivers/staging/most/core.c 	char *mdev;
mdev              749 drivers/staging/most/core.c 	ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
mdev              755 drivers/staging/most/core.c 	c = get_channel(mdev, mdev_ch);
mdev              768 drivers/staging/most/core.c int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
mdev              776 drivers/staging/most/core.c 	c = get_channel(mdev, mdev_ch);
mdev              326 drivers/staging/most/core.h int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
mdev              328 drivers/staging/most/core.h int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
mdev              329 drivers/staging/most/core.h int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val);
mdev              330 drivers/staging/most/core.h int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val);
mdev              331 drivers/staging/most/core.h int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val);
mdev              332 drivers/staging/most/core.h int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val);
mdev              333 drivers/staging/most/core.h int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf);
mdev              334 drivers/staging/most/core.h int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf);
mdev              335 drivers/staging/most/core.h int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val);
mdev               78 drivers/staging/most/usb/usb.c 	struct most_dev *mdev;
mdev              230 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = to_mdev(iface);
mdev              235 drivers/staging/most/usb/usb.c 		dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
mdev              239 drivers/staging/most/usb/usb.c 		dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
mdev              243 drivers/staging/most/usb/usb.c 	lock = mdev->channel_lock + channel;
mdev              245 drivers/staging/most/usb/usb.c 	mdev->is_channel_healthy[channel] = false;
mdev              248 drivers/staging/most/usb/usb.c 	cancel_work_sync(&mdev->clear_work[channel].ws);
mdev              250 drivers/staging/most/usb/usb.c 	mutex_lock(&mdev->io_mutex);
mdev              251 drivers/staging/most/usb/usb.c 	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
mdev              252 drivers/staging/most/usb/usb.c 	if (mdev->padding_active[channel])
mdev              253 drivers/staging/most/usb/usb.c 		mdev->padding_active[channel] = false;
mdev              255 drivers/staging/most/usb/usb.c 	if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
mdev              256 drivers/staging/most/usb/usb.c 		del_timer_sync(&mdev->link_stat_timer);
mdev              257 drivers/staging/most/usb/usb.c 		cancel_work_sync(&mdev->poll_work_obj);
mdev              259 drivers/staging/most/usb/usb.c 	mutex_unlock(&mdev->io_mutex);
mdev              272 drivers/staging/most/usb/usb.c static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
mdev              274 drivers/staging/most/usb/usb.c 	struct most_channel_config *conf = &mdev->conf[channel];
mdev              283 drivers/staging/most/usb/usb.c 		dev_err(&mdev->usb_device->dev,
mdev              305 drivers/staging/most/usb/usb.c static int hdm_remove_padding(struct most_dev *mdev, int channel,
mdev              308 drivers/staging/most/usb/usb.c 	struct most_channel_config *const conf = &mdev->conf[channel];
mdev              339 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = to_mdev(mbo->ifp);
mdev              341 drivers/staging/most/usb/usb.c 	spinlock_t *lock = mdev->channel_lock + channel;
mdev              348 drivers/staging/most/usb/usb.c 	if (likely(mdev->is_channel_healthy[channel])) {
mdev              356 drivers/staging/most/usb/usb.c 			dev_warn(&mdev->usb_device->dev,
mdev              358 drivers/staging/most/usb/usb.c 				 mdev->ep_address[channel]);
mdev              359 drivers/staging/most/usb/usb.c 			mdev->is_channel_healthy[channel] = false;
mdev              360 drivers/staging/most/usb/usb.c 			mdev->clear_work[channel].pipe = urb->pipe;
mdev              361 drivers/staging/most/usb/usb.c 			schedule_work(&mdev->clear_work[channel].ws);
mdev              488 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = to_mdev(mbo->ifp);
mdev              490 drivers/staging/most/usb/usb.c 	struct device *dev = &mdev->usb_device->dev;
mdev              491 drivers/staging/most/usb/usb.c 	spinlock_t *lock = mdev->channel_lock + channel;
mdev              498 drivers/staging/most/usb/usb.c 	if (likely(mdev->is_channel_healthy[channel])) {
mdev              504 drivers/staging/most/usb/usb.c 			if (mdev->padding_active[channel] &&
mdev              505 drivers/staging/most/usb/usb.c 			    hdm_remove_padding(mdev, channel, mbo)) {
mdev              512 drivers/staging/most/usb/usb.c 				 mdev->ep_address[channel]);
mdev              513 drivers/staging/most/usb/usb.c 			mdev->is_channel_healthy[channel] = false;
mdev              514 drivers/staging/most/usb/usb.c 			mdev->clear_work[channel].pipe = urb->pipe;
mdev              515 drivers/staging/most/usb/usb.c 			schedule_work(&mdev->clear_work[channel].ws);
mdev              523 drivers/staging/most/usb/usb.c 				 mdev->ep_address[channel]);
mdev              553 drivers/staging/most/usb/usb.c 	struct most_dev *mdev;
mdev              565 drivers/staging/most/usb/usb.c 	mdev = to_mdev(iface);
mdev              566 drivers/staging/most/usb/usb.c 	conf = &mdev->conf[channel];
mdev              568 drivers/staging/most/usb/usb.c 	mutex_lock(&mdev->io_mutex);
mdev              569 drivers/staging/most/usb/usb.c 	if (!mdev->usb_device) {
mdev              580 drivers/staging/most/usb/usb.c 	if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
mdev              581 drivers/staging/most/usb/usb.c 	    hdm_add_padding(mdev, channel, mbo)) {
mdev              591 drivers/staging/most/usb/usb.c 		usb_fill_bulk_urb(urb, mdev->usb_device,
mdev              592 drivers/staging/most/usb/usb.c 				  usb_sndbulkpipe(mdev->usb_device,
mdev              593 drivers/staging/most/usb/usb.c 						  mdev->ep_address[channel]),
mdev              602 drivers/staging/most/usb/usb.c 		usb_fill_bulk_urb(urb, mdev->usb_device,
mdev              603 drivers/staging/most/usb/usb.c 				  usb_rcvbulkpipe(mdev->usb_device,
mdev              604 drivers/staging/most/usb/usb.c 						  mdev->ep_address[channel]),
mdev              612 drivers/staging/most/usb/usb.c 	usb_anchor_urb(urb, &mdev->busy_urbs[channel]);
mdev              616 drivers/staging/most/usb/usb.c 		dev_err(&mdev->usb_device->dev,
mdev              627 drivers/staging/most/usb/usb.c 	mutex_unlock(&mdev->io_mutex);
mdev              633 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = to_mdev(mbo->ifp);
mdev              635 drivers/staging/most/usb/usb.c 	return usb_alloc_coherent(mdev->usb_device, size, GFP_KERNEL,
mdev              641 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = to_mdev(mbo->ifp);
mdev              643 drivers/staging/most/usb/usb.c 	usb_free_coherent(mdev->usb_device, size, mbo->virt_address,
mdev              667 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = to_mdev(iface);
mdev              668 drivers/staging/most/usb/usb.c 	struct device *dev = &mdev->usb_device->dev;
mdev              670 drivers/staging/most/usb/usb.c 	mdev->is_channel_healthy[channel] = true;
mdev              671 drivers/staging/most/usb/usb.c 	mdev->clear_work[channel].channel = channel;
mdev              672 drivers/staging/most/usb/usb.c 	mdev->clear_work[channel].mdev = mdev;
mdev              673 drivers/staging/most/usb/usb.c 	INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
mdev              691 drivers/staging/most/usb/usb.c 		mdev->padding_active[channel] = false;
mdev              700 drivers/staging/most/usb/usb.c 	mdev->padding_active[channel] = true;
mdev              715 drivers/staging/most/usb/usb.c 			 mdev->suffix[channel], old_size, conf->buffer_size);
mdev              722 drivers/staging/most/usb/usb.c 	mdev->conf[channel] = *conf;
mdev              724 drivers/staging/most/usb/usb.c 		u16 ep = mdev->ep_address[channel];
mdev              726 drivers/staging/most/usb/usb.c 		if (start_sync_ep(mdev->usb_device, ep) < 0)
mdev              746 drivers/staging/most/usb/usb.c 	struct most_dev *mdev;
mdev              749 drivers/staging/most/usb/usb.c 	mdev = to_mdev(iface);
mdev              750 drivers/staging/most/usb/usb.c 	mdev->on_netinfo = on_netinfo;
mdev              754 drivers/staging/most/usb/usb.c 	mdev->link_stat_timer.expires = jiffies + HZ;
mdev              755 drivers/staging/most/usb/usb.c 	mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
mdev              767 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
mdev              769 drivers/staging/most/usb/usb.c 	schedule_work(&mdev->poll_work_obj);
mdev              770 drivers/staging/most/usb/usb.c 	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
mdev              771 drivers/staging/most/usb/usb.c 	add_timer(&mdev->link_stat_timer);
mdev              782 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = to_mdev_from_work(wq_obj);
mdev              783 drivers/staging/most/usb/usb.c 	struct usb_device *usb_device = mdev->usb_device;
mdev              815 drivers/staging/most/usb/usb.c 	if (mdev->on_netinfo)
mdev              816 drivers/staging/most/usb/usb.c 		mdev->on_netinfo(&mdev->iface, link, hw_addr);
mdev              828 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = clear_work->mdev;
mdev              832 drivers/staging/most/usb/usb.c 	mutex_lock(&mdev->io_mutex);
mdev              833 drivers/staging/most/usb/usb.c 	most_stop_enqueue(&mdev->iface, channel);
mdev              834 drivers/staging/most/usb/usb.c 	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
mdev              835 drivers/staging/most/usb/usb.c 	if (usb_clear_halt(mdev->usb_device, pipe))
mdev              836 drivers/staging/most/usb/usb.c 		dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
mdev              846 drivers/staging/most/usb/usb.c 	if (mdev->conf[channel].data_type == MOST_CH_ASYNC &&
mdev              847 drivers/staging/most/usb/usb.c 	    mdev->conf[channel].direction == MOST_CH_RX) {
mdev              849 drivers/staging/most/usb/usb.c 		int snd_pipe = usb_sndbulkpipe(mdev->usb_device,
mdev              850 drivers/staging/most/usb/usb.c 					       mdev->ep_address[peer]);
mdev              851 drivers/staging/most/usb/usb.c 		usb_clear_halt(mdev->usb_device, snd_pipe);
mdev              853 drivers/staging/most/usb/usb.c 	mdev->is_channel_healthy[channel] = true;
mdev              854 drivers/staging/most/usb/usb.c 	most_resume_enqueue(&mdev->iface, channel);
mdev              855 drivers/staging/most/usb/usb.c 	mutex_unlock(&mdev->io_mutex);
mdev             1043 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev             1050 drivers/staging/most/usb/usb.c 	if (!mdev)
mdev             1053 drivers/staging/most/usb/usb.c 	usb_set_intfdata(interface, mdev);
mdev             1055 drivers/staging/most/usb/usb.c 	mutex_init(&mdev->io_mutex);
mdev             1056 drivers/staging/most/usb/usb.c 	INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
mdev             1057 drivers/staging/most/usb/usb.c 	timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
mdev             1059 drivers/staging/most/usb/usb.c 	mdev->usb_device = usb_dev;
mdev             1060 drivers/staging/most/usb/usb.c 	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
mdev             1062 drivers/staging/most/usb/usb.c 	mdev->iface.mod = hdm_usb_fops.owner;
mdev             1063 drivers/staging/most/usb/usb.c 	mdev->iface.driver_dev = &interface->dev;
mdev             1064 drivers/staging/most/usb/usb.c 	mdev->iface.interface = ITYPE_USB;
mdev             1065 drivers/staging/most/usb/usb.c 	mdev->iface.configure = hdm_configure_channel;
mdev             1066 drivers/staging/most/usb/usb.c 	mdev->iface.request_netinfo = hdm_request_netinfo;
mdev             1067 drivers/staging/most/usb/usb.c 	mdev->iface.enqueue = hdm_enqueue;
mdev             1068 drivers/staging/most/usb/usb.c 	mdev->iface.poison_channel = hdm_poison_channel;
mdev             1069 drivers/staging/most/usb/usb.c 	mdev->iface.dma_alloc = hdm_dma_alloc;
mdev             1070 drivers/staging/most/usb/usb.c 	mdev->iface.dma_free = hdm_dma_free;
mdev             1071 drivers/staging/most/usb/usb.c 	mdev->iface.description = mdev->description;
mdev             1072 drivers/staging/most/usb/usb.c 	mdev->iface.num_channels = num_endpoints;
mdev             1074 drivers/staging/most/usb/usb.c 	snprintf(mdev->description, sizeof(mdev->description),
mdev             1081 drivers/staging/most/usb/usb.c 	mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
mdev             1082 drivers/staging/most/usb/usb.c 	if (!mdev->conf)
mdev             1085 drivers/staging/most/usb/usb.c 	mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
mdev             1086 drivers/staging/most/usb/usb.c 	if (!mdev->cap)
mdev             1089 drivers/staging/most/usb/usb.c 	mdev->iface.channel_vector = mdev->cap;
mdev             1090 drivers/staging/most/usb/usb.c 	mdev->ep_address =
mdev             1091 drivers/staging/most/usb/usb.c 		kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
mdev             1092 drivers/staging/most/usb/usb.c 	if (!mdev->ep_address)
mdev             1095 drivers/staging/most/usb/usb.c 	mdev->busy_urbs =
mdev             1096 drivers/staging/most/usb/usb.c 		kcalloc(num_endpoints, sizeof(*mdev->busy_urbs), GFP_KERNEL);
mdev             1097 drivers/staging/most/usb/usb.c 	if (!mdev->busy_urbs)
mdev             1100 drivers/staging/most/usb/usb.c 	tmp_cap = mdev->cap;
mdev             1103 drivers/staging/most/usb/usb.c 		mdev->ep_address[i] = ep_desc->bEndpointAddress;
mdev             1104 drivers/staging/most/usb/usb.c 		mdev->padding_active[i] = false;
mdev             1105 drivers/staging/most/usb/usb.c 		mdev->is_channel_healthy[i] = true;
mdev             1107 drivers/staging/most/usb/usb.c 		snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
mdev             1108 drivers/staging/most/usb/usb.c 			 mdev->ep_address[i]);
mdev             1110 drivers/staging/most/usb/usb.c 		tmp_cap->name_suffix = &mdev->suffix[i][0];
mdev             1122 drivers/staging/most/usb/usb.c 		init_usb_anchor(&mdev->busy_urbs[i]);
mdev             1123 drivers/staging/most/usb/usb.c 		spin_lock_init(&mdev->channel_lock[i]);
mdev             1137 drivers/staging/most/usb/usb.c 	ret = most_register_interface(&mdev->iface);
mdev             1141 drivers/staging/most/usb/usb.c 	mutex_lock(&mdev->io_mutex);
mdev             1145 drivers/staging/most/usb/usb.c 		mdev->dci = kzalloc(sizeof(*mdev->dci), GFP_KERNEL);
mdev             1146 drivers/staging/most/usb/usb.c 		if (!mdev->dci) {
mdev             1147 drivers/staging/most/usb/usb.c 			mutex_unlock(&mdev->io_mutex);
mdev             1148 drivers/staging/most/usb/usb.c 			most_deregister_interface(&mdev->iface);
mdev             1153 drivers/staging/most/usb/usb.c 		mdev->dci->dev.init_name = "dci";
mdev             1154 drivers/staging/most/usb/usb.c 		mdev->dci->dev.parent = &mdev->iface.dev;
mdev             1155 drivers/staging/most/usb/usb.c 		mdev->dci->dev.groups = dci_attr_groups;
mdev             1156 drivers/staging/most/usb/usb.c 		mdev->dci->dev.release = release_dci;
mdev             1157 drivers/staging/most/usb/usb.c 		if (device_register(&mdev->dci->dev)) {
mdev             1158 drivers/staging/most/usb/usb.c 			mutex_unlock(&mdev->io_mutex);
mdev             1159 drivers/staging/most/usb/usb.c 			most_deregister_interface(&mdev->iface);
mdev             1163 drivers/staging/most/usb/usb.c 		mdev->dci->usb_device = mdev->usb_device;
mdev             1165 drivers/staging/most/usb/usb.c 	mutex_unlock(&mdev->io_mutex);
mdev             1168 drivers/staging/most/usb/usb.c 	kfree(mdev->dci);
mdev             1170 drivers/staging/most/usb/usb.c 	kfree(mdev->busy_urbs);
mdev             1172 drivers/staging/most/usb/usb.c 	kfree(mdev->ep_address);
mdev             1174 drivers/staging/most/usb/usb.c 	kfree(mdev->cap);
mdev             1176 drivers/staging/most/usb/usb.c 	kfree(mdev->conf);
mdev             1178 drivers/staging/most/usb/usb.c 	kfree(mdev);
mdev             1198 drivers/staging/most/usb/usb.c 	struct most_dev *mdev = usb_get_intfdata(interface);
mdev             1200 drivers/staging/most/usb/usb.c 	mutex_lock(&mdev->io_mutex);
mdev             1202 drivers/staging/most/usb/usb.c 	mdev->usb_device = NULL;
mdev             1203 drivers/staging/most/usb/usb.c 	mutex_unlock(&mdev->io_mutex);
mdev             1205 drivers/staging/most/usb/usb.c 	del_timer_sync(&mdev->link_stat_timer);
mdev             1206 drivers/staging/most/usb/usb.c 	cancel_work_sync(&mdev->poll_work_obj);
mdev             1208 drivers/staging/most/usb/usb.c 	device_unregister(&mdev->dci->dev);
mdev             1209 drivers/staging/most/usb/usb.c 	most_deregister_interface(&mdev->iface);
mdev             1211 drivers/staging/most/usb/usb.c 	kfree(mdev->busy_urbs);
mdev             1212 drivers/staging/most/usb/usb.c 	kfree(mdev->cap);
mdev             1213 drivers/staging/most/usb/usb.c 	kfree(mdev->conf);
mdev             1214 drivers/staging/most/usb/usb.c 	kfree(mdev->ep_address);
mdev             1215 drivers/staging/most/usb/usb.c 	kfree(mdev);
mdev               52 drivers/staging/most/video/video.c 	struct most_video_dev *mdev;
mdev               59 drivers/staging/most/video/video.c static inline bool data_ready(struct most_video_dev *mdev)
mdev               61 drivers/staging/most/video/video.c 	return !list_empty(&mdev->pending_mbos);
mdev               64 drivers/staging/most/video/video.c static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
mdev               66 drivers/staging/most/video/video.c 	return list_first_entry(&mdev->pending_mbos, struct mbo, list);
mdev               73 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = video_drvdata(filp);
mdev               87 drivers/staging/most/video/video.c 	if (!atomic_inc_and_test(&mdev->access_ref)) {
mdev               88 drivers/staging/most/video/video.c 		v4l2_err(&mdev->v4l2_dev, "too many clients\n");
mdev               93 drivers/staging/most/video/video.c 	fh->mdev = mdev;
mdev               99 drivers/staging/most/video/video.c 	ret = most_start_channel(mdev->iface, mdev->ch_idx, &comp);
mdev              101 drivers/staging/most/video/video.c 		v4l2_err(&mdev->v4l2_dev, "most_start_channel() failed\n");
mdev              112 drivers/staging/most/video/video.c 	atomic_dec(&mdev->access_ref);
mdev              120 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              132 drivers/staging/most/video/video.c 	spin_lock_irq(&mdev->list_lock);
mdev              133 drivers/staging/most/video/video.c 	mdev->mute = true;
mdev              134 drivers/staging/most/video/video.c 	list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
mdev              136 drivers/staging/most/video/video.c 		spin_unlock_irq(&mdev->list_lock);
mdev              138 drivers/staging/most/video/video.c 		spin_lock_irq(&mdev->list_lock);
mdev              140 drivers/staging/most/video/video.c 	spin_unlock_irq(&mdev->list_lock);
mdev              141 drivers/staging/most/video/video.c 	most_stop_channel(mdev->iface, mdev->ch_idx, &comp);
mdev              142 drivers/staging/most/video/video.c 	mdev->mute = false;
mdev              147 drivers/staging/most/video/video.c 	atomic_dec(&mdev->access_ref);
mdev              156 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              162 drivers/staging/most/video/video.c 	if (!mdev)
mdev              167 drivers/staging/most/video/video.c 		if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
mdev              171 drivers/staging/most/video/video.c 	if (!data_ready(mdev))
mdev              174 drivers/staging/most/video/video.c 	while (count > 0 && data_ready(mdev)) {
mdev              175 drivers/staging/most/video/video.c 		struct mbo *const mbo = get_top_mbo(mdev);
mdev              180 drivers/staging/most/video/video.c 			v4l2_err(&mdev->v4l2_dev, "read: copy_to_user failed\n");
mdev              193 drivers/staging/most/video/video.c 			spin_lock_irq(&mdev->list_lock);
mdev              195 drivers/staging/most/video/video.c 			spin_unlock_irq(&mdev->list_lock);
mdev              205 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              209 drivers/staging/most/video/video.c 	if (!data_ready(mdev))
mdev              210 drivers/staging/most/video/video.c 		poll_wait(filp, &mdev->wait_data, wait);
mdev              211 drivers/staging/most/video/video.c 	if (data_ready(mdev))
mdev              229 drivers/staging/most/video/video.c static int comp_set_format(struct most_video_dev *mdev, unsigned int cmd,
mdev              247 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              252 drivers/staging/most/video/video.c 		 "%s", mdev->iface->description);
mdev              281 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              283 drivers/staging/most/video/video.c 	return comp_set_format(mdev, VIDIOC_TRY_FMT, f);
mdev              290 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              292 drivers/staging/most/video/video.c 	return comp_set_format(mdev, VIDIOC_S_FMT, f);
mdev              305 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              314 drivers/staging/most/video/video.c 	input->std = mdev->vdev->tvnorms;
mdev              322 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              323 drivers/staging/most/video/video.c 	*i = mdev->ctrl_input;
mdev              330 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = fh->mdev;
mdev              334 drivers/staging/most/video/video.c 	mdev->ctrl_input = index;
mdev              372 drivers/staging/most/video/video.c 	struct most_video_dev *mdev;
mdev              376 drivers/staging/most/video/video.c 	list_for_each_entry(mdev, &video_devices, list) {
mdev              377 drivers/staging/most/video/video.c 		if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
mdev              379 drivers/staging/most/video/video.c 			return mdev;
mdev              389 drivers/staging/most/video/video.c 	struct most_video_dev *mdev =
mdev              392 drivers/staging/most/video/video.c 	if (!mdev)
mdev              395 drivers/staging/most/video/video.c 	spin_lock_irqsave(&mdev->list_lock, flags);
mdev              396 drivers/staging/most/video/video.c 	if (unlikely(mdev->mute)) {
mdev              397 drivers/staging/most/video/video.c 		spin_unlock_irqrestore(&mdev->list_lock, flags);
mdev              401 drivers/staging/most/video/video.c 	list_add_tail(&mbo->list, &mdev->pending_mbos);
mdev              402 drivers/staging/most/video/video.c 	spin_unlock_irqrestore(&mdev->list_lock, flags);
mdev              403 drivers/staging/most/video/video.c 	wake_up_interruptible(&mdev->wait_data);
mdev              407 drivers/staging/most/video/video.c static int comp_register_videodev(struct most_video_dev *mdev)
mdev              411 drivers/staging/most/video/video.c 	init_waitqueue_head(&mdev->wait_data);
mdev              414 drivers/staging/most/video/video.c 	mdev->vdev = video_device_alloc();
mdev              415 drivers/staging/most/video/video.c 	if (!mdev->vdev)
mdev              419 drivers/staging/most/video/video.c 	*mdev->vdev = comp_videodev_template;
mdev              420 drivers/staging/most/video/video.c 	mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
mdev              421 drivers/staging/most/video/video.c 	mdev->vdev->lock = &mdev->lock;
mdev              422 drivers/staging/most/video/video.c 	snprintf(mdev->vdev->name, sizeof(mdev->vdev->name), "MOST: %s",
mdev              423 drivers/staging/most/video/video.c 		 mdev->v4l2_dev.name);
mdev              426 drivers/staging/most/video/video.c 	video_set_drvdata(mdev->vdev, mdev);
mdev              427 drivers/staging/most/video/video.c 	ret = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
mdev              429 drivers/staging/most/video/video.c 		v4l2_err(&mdev->v4l2_dev, "video_register_device failed (%d)\n",
mdev              431 drivers/staging/most/video/video.c 		video_device_release(mdev->vdev);
mdev              437 drivers/staging/most/video/video.c static void comp_unregister_videodev(struct most_video_dev *mdev)
mdev              439 drivers/staging/most/video/video.c 	video_unregister_device(mdev->vdev);
mdev              444 drivers/staging/most/video/video.c 	struct most_video_dev *mdev =
mdev              448 drivers/staging/most/video/video.c 	kfree(mdev);
mdev              456 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
mdev              458 drivers/staging/most/video/video.c 	if (mdev) {
mdev              474 drivers/staging/most/video/video.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              475 drivers/staging/most/video/video.c 	if (!mdev)
mdev              478 drivers/staging/most/video/video.c 	mutex_init(&mdev->lock);
mdev              479 drivers/staging/most/video/video.c 	atomic_set(&mdev->access_ref, -1);
mdev              480 drivers/staging/most/video/video.c 	spin_lock_init(&mdev->list_lock);
mdev              481 drivers/staging/most/video/video.c 	INIT_LIST_HEAD(&mdev->pending_mbos);
mdev              482 drivers/staging/most/video/video.c 	mdev->iface = iface;
mdev              483 drivers/staging/most/video/video.c 	mdev->ch_idx = channel_idx;
mdev              484 drivers/staging/most/video/video.c 	mdev->v4l2_dev.release = comp_v4l2_dev_release;
mdev              487 drivers/staging/most/video/video.c 	strlcpy(mdev->v4l2_dev.name, name, sizeof(mdev->v4l2_dev.name));
mdev              488 drivers/staging/most/video/video.c 	ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
mdev              491 drivers/staging/most/video/video.c 		kfree(mdev);
mdev              495 drivers/staging/most/video/video.c 	ret = comp_register_videodev(mdev);
mdev              500 drivers/staging/most/video/video.c 	list_add(&mdev->list, &video_devices);
mdev              505 drivers/staging/most/video/video.c 	v4l2_device_disconnect(&mdev->v4l2_dev);
mdev              506 drivers/staging/most/video/video.c 	v4l2_device_put(&mdev->v4l2_dev);
mdev              513 drivers/staging/most/video/video.c 	struct most_video_dev *mdev = get_comp_dev(iface, channel_idx);
mdev              515 drivers/staging/most/video/video.c 	if (!mdev) {
mdev              521 drivers/staging/most/video/video.c 	list_del(&mdev->list);
mdev              524 drivers/staging/most/video/video.c 	comp_unregister_videodev(mdev);
mdev              525 drivers/staging/most/video/video.c 	v4l2_device_disconnect(&mdev->v4l2_dev);
mdev              526 drivers/staging/most/video/video.c 	v4l2_device_put(&mdev->v4l2_dev);
mdev              554 drivers/staging/most/video/video.c 	struct most_video_dev *mdev, *tmp;
mdev              563 drivers/staging/most/video/video.c 	list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
mdev              564 drivers/staging/most/video/video.c 		list_del(&mdev->list);
mdev              567 drivers/staging/most/video/video.c 		comp_unregister_videodev(mdev);
mdev              568 drivers/staging/most/video/video.c 		v4l2_device_disconnect(&mdev->v4l2_dev);
mdev              569 drivers/staging/most/video/video.c 		v4l2_device_put(&mdev->v4l2_dev);
mdev               29 drivers/tty/serial/8250/8250_men_mcb.c static u32 men_lookup_uartclk(struct mcb_device *mdev)
mdev               34 drivers/tty/serial/8250/8250_men_mcb.c 	dev_info(&mdev->dev, "%s on board %s\n",
mdev               35 drivers/tty/serial/8250/8250_men_mcb.c 		dev_name(&mdev->dev),
mdev               36 drivers/tty/serial/8250/8250_men_mcb.c 		mdev->bus->name);
mdev               37 drivers/tty/serial/8250/8250_men_mcb.c 	if  (strncmp(mdev->bus->name, "F075", 4) == 0)
mdev               39 drivers/tty/serial/8250/8250_men_mcb.c 	else if (strncmp(mdev->bus->name, "F216", 4) == 0)
mdev               41 drivers/tty/serial/8250/8250_men_mcb.c 	else if (strncmp(mdev->bus->name, "G215", 4) == 0)
mdev               43 drivers/tty/serial/8250/8250_men_mcb.c 	else if (strncmp(mdev->bus->name, "F210", 4) == 0)
mdev               46 drivers/tty/serial/8250/8250_men_mcb.c 		dev_info(&mdev->dev,
mdev               54 drivers/tty/serial/8250/8250_men_mcb.c static unsigned int get_num_ports(struct mcb_device *mdev,
mdev               57 drivers/tty/serial/8250/8250_men_mcb.c 	switch (mdev->id) {
mdev               65 drivers/tty/serial/8250/8250_men_mcb.c 		dev_err(&mdev->dev, "no supported device!\n");
mdev               70 drivers/tty/serial/8250/8250_men_mcb.c static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
mdev               79 drivers/tty/serial/8250/8250_men_mcb.c 	mem = mcb_get_resource(mdev, IORESOURCE_MEM);
mdev               82 drivers/tty/serial/8250/8250_men_mcb.c 	membase = devm_ioremap_resource(&mdev->dev, mem);
mdev               86 drivers/tty/serial/8250/8250_men_mcb.c 	num_ports = get_num_ports(mdev, membase);
mdev               88 drivers/tty/serial/8250/8250_men_mcb.c 	dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n",
mdev               89 drivers/tty/serial/8250/8250_men_mcb.c 		mdev->id, num_ports);
mdev               92 drivers/tty/serial/8250/8250_men_mcb.c 		dev_err(&mdev->dev, "unexpected number of ports: %u\n",
mdev               97 drivers/tty/serial/8250/8250_men_mcb.c 	data = devm_kcalloc(&mdev->dev, num_ports,
mdev              103 drivers/tty/serial/8250/8250_men_mcb.c 	mcb_set_drvdata(mdev, data);
mdev              106 drivers/tty/serial/8250/8250_men_mcb.c 		data[i].uart.port.dev = mdev->dma_dev;
mdev              113 drivers/tty/serial/8250/8250_men_mcb.c 		data[i].uart.port.uartclk = men_lookup_uartclk(mdev);
mdev              115 drivers/tty/serial/8250/8250_men_mcb.c 		data[i].uart.port.irq = mcb_get_irq(mdev);
mdev              125 drivers/tty/serial/8250/8250_men_mcb.c 			dev_err(&mdev->dev, "unable to register UART port\n");
mdev              128 drivers/tty/serial/8250/8250_men_mcb.c 		dev_info(&mdev->dev, "found MCB UART: ttyS%d\n", data[i].line);
mdev              134 drivers/tty/serial/8250/8250_men_mcb.c static void serial_8250_men_mcb_remove(struct mcb_device *mdev)
mdev              137 drivers/tty/serial/8250/8250_men_mcb.c 	struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev);
mdev              142 drivers/tty/serial/8250/8250_men_mcb.c 	num_ports = get_num_ports(mdev, data[0].uart.port.membase);
mdev              144 drivers/tty/serial/8250/8250_men_mcb.c 		dev_err(&mdev->dev, "error retrieving number of ports!\n");
mdev              121 drivers/tty/serial/men_z135_uart.c 	struct mcb_device *mdev;
mdev              264 drivers/tty/serial/men_z135_uart.c 		dev_warn(&uart->mdev->dev,
mdev              278 drivers/tty/serial/men_z135_uart.c 		dev_warn(&uart->mdev->dev,
mdev              329 drivers/tty/serial/men_z135_uart.c 		dev_err(&uart->mdev->dev,
mdev              409 drivers/tty/serial/men_z135_uart.c 			dev_dbg(&uart->mdev->dev, "Character Timeout Indication\n");
mdev              438 drivers/tty/serial/men_z135_uart.c 	struct device *dev = &uart->mdev->dev;
mdev              743 drivers/tty/serial/men_z135_uart.c 	struct mcb_device *mdev = uart->mdev;
mdev              746 drivers/tty/serial/men_z135_uart.c 	mem = mcb_request_mem(uart->mdev, dev_name(&mdev->dev));
mdev              809 drivers/tty/serial/men_z135_uart.c static int men_z135_probe(struct mcb_device *mdev,
mdev              817 drivers/tty/serial/men_z135_uart.c 	dev = &mdev->dev;
mdev              827 drivers/tty/serial/men_z135_uart.c 	mem = &mdev->mem;
mdev              829 drivers/tty/serial/men_z135_uart.c 	mcb_set_drvdata(mdev, uart);
mdev              835 drivers/tty/serial/men_z135_uart.c 	uart->port.irq = mcb_get_irq(mdev);
mdev              843 drivers/tty/serial/men_z135_uart.c 	uart->mdev = mdev;
mdev              865 drivers/tty/serial/men_z135_uart.c static void men_z135_remove(struct mcb_device *mdev)
mdev              867 drivers/tty/serial/men_z135_uart.c 	struct men_z135_port *uart = mcb_get_drvdata(mdev);
mdev             1544 drivers/tty/serial/pmac_zilog.c static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
mdev             1552 drivers/tty/serial/pmac_zilog.c 		if (pmz_ports[i].node == mdev->ofdev.dev.of_node)
mdev             1559 drivers/tty/serial/pmac_zilog.c 	uap->dev = mdev;
mdev             1560 drivers/tty/serial/pmac_zilog.c 	uap->port.dev = &mdev->ofdev.dev;
mdev             1561 drivers/tty/serial/pmac_zilog.c 	dev_set_drvdata(&mdev->ofdev.dev, uap);
mdev             1580 drivers/tty/serial/pmac_zilog.c static int pmz_detach(struct macio_dev *mdev)
mdev             1582 drivers/tty/serial/pmac_zilog.c 	struct uart_pmac_port	*uap = dev_get_drvdata(&mdev->ofdev.dev);
mdev             1593 drivers/tty/serial/pmac_zilog.c 	dev_set_drvdata(&mdev->ofdev.dev, NULL);
mdev             1601 drivers/tty/serial/pmac_zilog.c static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
mdev             1603 drivers/tty/serial/pmac_zilog.c 	struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
mdev             1616 drivers/tty/serial/pmac_zilog.c static int pmz_resume(struct macio_dev *mdev)
mdev             1618 drivers/tty/serial/pmac_zilog.c 	struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
mdev               30 drivers/vfio/mdev/mdev_core.c struct device *mdev_parent_dev(struct mdev_device *mdev)
mdev               32 drivers/vfio/mdev/mdev_core.c 	return mdev->parent->dev;
mdev               36 drivers/vfio/mdev/mdev_core.c void *mdev_get_drvdata(struct mdev_device *mdev)
mdev               38 drivers/vfio/mdev/mdev_core.c 	return mdev->driver_data;
mdev               42 drivers/vfio/mdev/mdev_core.c void mdev_set_drvdata(struct mdev_device *mdev, void *data)
mdev               44 drivers/vfio/mdev/mdev_core.c 	mdev->driver_data = data;
mdev               48 drivers/vfio/mdev/mdev_core.c struct device *mdev_dev(struct mdev_device *mdev)
mdev               50 drivers/vfio/mdev/mdev_core.c 	return &mdev->dev;
mdev               60 drivers/vfio/mdev/mdev_core.c const guid_t *mdev_uuid(struct mdev_device *mdev)
mdev               62 drivers/vfio/mdev/mdev_core.c 	return &mdev->uuid;
mdev              103 drivers/vfio/mdev/mdev_core.c static void mdev_device_remove_common(struct mdev_device *mdev)
mdev              109 drivers/vfio/mdev/mdev_core.c 	type = to_mdev_type(mdev->type_kobj);
mdev              110 drivers/vfio/mdev/mdev_core.c 	mdev_remove_sysfs_files(&mdev->dev, type);
mdev              111 drivers/vfio/mdev/mdev_core.c 	device_del(&mdev->dev);
mdev              112 drivers/vfio/mdev/mdev_core.c 	parent = mdev->parent;
mdev              114 drivers/vfio/mdev/mdev_core.c 	ret = parent->ops->remove(mdev);
mdev              116 drivers/vfio/mdev/mdev_core.c 		dev_err(&mdev->dev, "Remove failed: err=%d\n", ret);
mdev              119 drivers/vfio/mdev/mdev_core.c 	put_device(&mdev->dev);
mdev              126 drivers/vfio/mdev/mdev_core.c 		struct mdev_device *mdev;
mdev              128 drivers/vfio/mdev/mdev_core.c 		mdev = to_mdev_device(dev);
mdev              129 drivers/vfio/mdev/mdev_core.c 		mdev_device_remove_common(mdev);
mdev              255 drivers/vfio/mdev/mdev_core.c static void mdev_device_free(struct mdev_device *mdev)
mdev              258 drivers/vfio/mdev/mdev_core.c 	list_del(&mdev->next);
mdev              261 drivers/vfio/mdev/mdev_core.c 	dev_dbg(&mdev->dev, "MDEV: destroying\n");
mdev              262 drivers/vfio/mdev/mdev_core.c 	kfree(mdev);
mdev              267 drivers/vfio/mdev/mdev_core.c 	struct mdev_device *mdev = to_mdev_device(dev);
mdev              269 drivers/vfio/mdev/mdev_core.c 	mdev_device_free(mdev);
mdev              276 drivers/vfio/mdev/mdev_core.c 	struct mdev_device *mdev, *tmp;
mdev              295 drivers/vfio/mdev/mdev_core.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              296 drivers/vfio/mdev/mdev_core.c 	if (!mdev) {
mdev              302 drivers/vfio/mdev/mdev_core.c 	guid_copy(&mdev->uuid, uuid);
mdev              303 drivers/vfio/mdev/mdev_core.c 	list_add(&mdev->next, &mdev_list);
mdev              306 drivers/vfio/mdev/mdev_core.c 	mdev->parent = parent;
mdev              310 drivers/vfio/mdev/mdev_core.c 		mdev_device_free(mdev);
mdev              315 drivers/vfio/mdev/mdev_core.c 	device_initialize(&mdev->dev);
mdev              316 drivers/vfio/mdev/mdev_core.c 	mdev->dev.parent  = dev;
mdev              317 drivers/vfio/mdev/mdev_core.c 	mdev->dev.bus     = &mdev_bus_type;
mdev              318 drivers/vfio/mdev/mdev_core.c 	mdev->dev.release = mdev_device_release;
mdev              319 drivers/vfio/mdev/mdev_core.c 	dev_set_name(&mdev->dev, "%pUl", uuid);
mdev              320 drivers/vfio/mdev/mdev_core.c 	mdev->dev.groups = parent->ops->mdev_attr_groups;
mdev              321 drivers/vfio/mdev/mdev_core.c 	mdev->type_kobj = kobj;
mdev              323 drivers/vfio/mdev/mdev_core.c 	ret = parent->ops->create(kobj, mdev);
mdev              327 drivers/vfio/mdev/mdev_core.c 	ret = device_add(&mdev->dev);
mdev              331 drivers/vfio/mdev/mdev_core.c 	ret = mdev_create_sysfs_files(&mdev->dev, type);
mdev              335 drivers/vfio/mdev/mdev_core.c 	mdev->active = true;
mdev              336 drivers/vfio/mdev/mdev_core.c 	dev_dbg(&mdev->dev, "MDEV: created\n");
mdev              342 drivers/vfio/mdev/mdev_core.c 	device_del(&mdev->dev);
mdev              344 drivers/vfio/mdev/mdev_core.c 	parent->ops->remove(mdev);
mdev              347 drivers/vfio/mdev/mdev_core.c 	put_device(&mdev->dev);
mdev              355 drivers/vfio/mdev/mdev_core.c 	struct mdev_device *mdev, *tmp;
mdev              358 drivers/vfio/mdev/mdev_core.c 	mdev = to_mdev_device(dev);
mdev              362 drivers/vfio/mdev/mdev_core.c 		if (tmp == mdev)
mdev              366 drivers/vfio/mdev/mdev_core.c 	if (tmp != mdev) {
mdev              371 drivers/vfio/mdev/mdev_core.c 	if (!mdev->active) {
mdev              376 drivers/vfio/mdev/mdev_core.c 	mdev->active = false;
mdev              379 drivers/vfio/mdev/mdev_core.c 	parent = mdev->parent;
mdev              384 drivers/vfio/mdev/mdev_core.c 	mdev_device_remove_common(mdev);
mdev              391 drivers/vfio/mdev/mdev_core.c 	struct mdev_device *mdev = to_mdev_device(dev);
mdev              393 drivers/vfio/mdev/mdev_core.c 	mdev->iommu_device = iommu_device;
mdev              401 drivers/vfio/mdev/mdev_core.c 	struct mdev_device *mdev = to_mdev_device(dev);
mdev              403 drivers/vfio/mdev/mdev_core.c 	return mdev->iommu_device;
mdev               16 drivers/vfio/mdev/mdev_driver.c static int mdev_attach_iommu(struct mdev_device *mdev)
mdev               25 drivers/vfio/mdev/mdev_driver.c 	ret = iommu_group_add_device(group, &mdev->dev);
mdev               27 drivers/vfio/mdev/mdev_driver.c 		dev_info(&mdev->dev, "MDEV: group_id = %d\n",
mdev               34 drivers/vfio/mdev/mdev_driver.c static void mdev_detach_iommu(struct mdev_device *mdev)
mdev               36 drivers/vfio/mdev/mdev_driver.c 	iommu_group_remove_device(&mdev->dev);
mdev               37 drivers/vfio/mdev/mdev_driver.c 	dev_info(&mdev->dev, "MDEV: detaching iommu\n");
mdev               43 drivers/vfio/mdev/mdev_driver.c 	struct mdev_device *mdev = to_mdev_device(dev);
mdev               46 drivers/vfio/mdev/mdev_driver.c 	ret = mdev_attach_iommu(mdev);
mdev               53 drivers/vfio/mdev/mdev_driver.c 			mdev_detach_iommu(mdev);
mdev               62 drivers/vfio/mdev/mdev_driver.c 	struct mdev_device *mdev = to_mdev_device(dev);
mdev               67 drivers/vfio/mdev/mdev_driver.c 	mdev_detach_iommu(mdev);
mdev               26 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_device *mdev = device_data;
mdev               27 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_parent *parent = mdev->parent;
mdev               36 drivers/vfio/mdev/vfio_mdev.c 	ret = parent->ops->open(mdev);
mdev               45 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_device *mdev = device_data;
mdev               46 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_parent *parent = mdev->parent;
mdev               49 drivers/vfio/mdev/vfio_mdev.c 		parent->ops->release(mdev);
mdev               57 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_device *mdev = device_data;
mdev               58 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_parent *parent = mdev->parent;
mdev               63 drivers/vfio/mdev/vfio_mdev.c 	return parent->ops->ioctl(mdev, cmd, arg);
mdev               69 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_device *mdev = device_data;
mdev               70 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_parent *parent = mdev->parent;
mdev               75 drivers/vfio/mdev/vfio_mdev.c 	return parent->ops->read(mdev, buf, count, ppos);
mdev               81 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_device *mdev = device_data;
mdev               82 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_parent *parent = mdev->parent;
mdev               87 drivers/vfio/mdev/vfio_mdev.c 	return parent->ops->write(mdev, buf, count, ppos);
mdev               92 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_device *mdev = device_data;
mdev               93 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_parent *parent = mdev->parent;
mdev               98 drivers/vfio/mdev/vfio_mdev.c 	return parent->ops->mmap(mdev, vma);
mdev              113 drivers/vfio/mdev/vfio_mdev.c 	struct mdev_device *mdev = to_mdev_device(dev);
mdev              115 drivers/vfio/mdev/vfio_mdev.c 	return vfio_add_group_dev(dev, &vfio_mdev_dev_ops, mdev);
mdev               93 drivers/w1/masters/mxc_w1.c 	struct mxc_w1_device *mdev;
mdev               98 drivers/w1/masters/mxc_w1.c 	mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
mdev              100 drivers/w1/masters/mxc_w1.c 	if (!mdev)
mdev              103 drivers/w1/masters/mxc_w1.c 	mdev->clk = devm_clk_get(&pdev->dev, NULL);
mdev              104 drivers/w1/masters/mxc_w1.c 	if (IS_ERR(mdev->clk))
mdev              105 drivers/w1/masters/mxc_w1.c 		return PTR_ERR(mdev->clk);
mdev              107 drivers/w1/masters/mxc_w1.c 	err = clk_prepare_enable(mdev->clk);
mdev              111 drivers/w1/masters/mxc_w1.c 	clkrate = clk_get_rate(mdev->clk);
mdev              122 drivers/w1/masters/mxc_w1.c 	mdev->regs = devm_platform_ioremap_resource(pdev, 0);
mdev              123 drivers/w1/masters/mxc_w1.c 	if (IS_ERR(mdev->regs)) {
mdev              124 drivers/w1/masters/mxc_w1.c 		err = PTR_ERR(mdev->regs);
mdev              129 drivers/w1/masters/mxc_w1.c 	writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
mdev              130 drivers/w1/masters/mxc_w1.c 	writeb(0, mdev->regs + MXC_W1_RESET);
mdev              132 drivers/w1/masters/mxc_w1.c 	writeb(clkdiv - 1, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev              134 drivers/w1/masters/mxc_w1.c 	mdev->bus_master.data = mdev;
mdev              135 drivers/w1/masters/mxc_w1.c 	mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus;
mdev              136 drivers/w1/masters/mxc_w1.c 	mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit;
mdev              138 drivers/w1/masters/mxc_w1.c 	platform_set_drvdata(pdev, mdev);
mdev              140 drivers/w1/masters/mxc_w1.c 	err = w1_add_master_device(&mdev->bus_master);
mdev              147 drivers/w1/masters/mxc_w1.c 	clk_disable_unprepare(mdev->clk);
mdev              156 drivers/w1/masters/mxc_w1.c 	struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
mdev              158 drivers/w1/masters/mxc_w1.c 	w1_remove_master_device(&mdev->bus_master);
mdev              160 drivers/w1/masters/mxc_w1.c 	clk_disable_unprepare(mdev->clk);
mdev               42 include/linux/lp.h 	unsigned int mdev;
mdev               72 include/linux/maple.h 	void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf);
mdev               73 include/linux/maple.h 	int (*can_unload)(struct maple_device *mdev);
mdev               96 include/linux/maple.h int maple_add_packet(struct maple_device *mdev, u32 function,
mdev               98 include/linux/maple.h void maple_clear_dev(struct maple_device *mdev);
mdev               96 include/linux/mcb.h 	int (*probe)(struct mcb_device *mdev, const struct mcb_device_id *id);
mdev               97 include/linux/mcb.h 	void (*remove)(struct mcb_device *mdev);
mdev               98 include/linux/mcb.h 	void (*shutdown)(struct mcb_device *mdev);
mdev               84 include/linux/mdev.h 	int     (*create)(struct kobject *kobj, struct mdev_device *mdev);
mdev               85 include/linux/mdev.h 	int     (*remove)(struct mdev_device *mdev);
mdev               86 include/linux/mdev.h 	int     (*open)(struct mdev_device *mdev);
mdev               87 include/linux/mdev.h 	void    (*release)(struct mdev_device *mdev);
mdev               88 include/linux/mdev.h 	ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
mdev               90 include/linux/mdev.h 	ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
mdev               92 include/linux/mdev.h 	long	(*ioctl)(struct mdev_device *mdev, unsigned int cmd,
mdev               94 include/linux/mdev.h 	int	(*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
mdev              132 include/linux/mdev.h void *mdev_get_drvdata(struct mdev_device *mdev);
mdev              133 include/linux/mdev.h void mdev_set_drvdata(struct mdev_device *mdev, void *data);
mdev              134 include/linux/mdev.h const guid_t *mdev_uuid(struct mdev_device *mdev);
mdev              144 include/linux/mdev.h struct device *mdev_parent_dev(struct mdev_device *mdev);
mdev              145 include/linux/mdev.h struct device *mdev_dev(struct mdev_device *mdev);
mdev             1135 include/linux/mlx4/device.h void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
mdev               98 include/linux/mlx5/accel.h 	struct mlx5_core_dev  *mdev;
mdev              119 include/linux/mlx5/accel.h u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
mdev              122 include/linux/mlx5/accel.h mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
mdev              131 include/linux/mlx5/accel.h static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
mdev              134 include/linux/mlx5/accel.h mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
mdev             1138 include/linux/mlx5/device.h #define MLX5_CAP_GEN(mdev, cap) \
mdev             1139 include/linux/mlx5/device.h 	MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
mdev             1141 include/linux/mlx5/device.h #define MLX5_CAP_GEN_64(mdev, cap) \
mdev             1142 include/linux/mlx5/device.h 	MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
mdev             1144 include/linux/mlx5/device.h #define MLX5_CAP_GEN_MAX(mdev, cap) \
mdev             1145 include/linux/mlx5/device.h 	MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
mdev             1147 include/linux/mlx5/device.h #define MLX5_CAP_ETH(mdev, cap) \
mdev             1149 include/linux/mlx5/device.h 		 mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
mdev             1151 include/linux/mlx5/device.h #define MLX5_CAP_ETH_MAX(mdev, cap) \
mdev             1153 include/linux/mlx5/device.h 		 mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
mdev             1155 include/linux/mlx5/device.h #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
mdev             1157 include/linux/mlx5/device.h 		 mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
mdev             1159 include/linux/mlx5/device.h #define MLX5_CAP_ROCE(mdev, cap) \
mdev             1160 include/linux/mlx5/device.h 	MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
mdev             1162 include/linux/mlx5/device.h #define MLX5_CAP_ROCE_MAX(mdev, cap) \
mdev             1163 include/linux/mlx5/device.h 	MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
mdev             1165 include/linux/mlx5/device.h #define MLX5_CAP_ATOMIC(mdev, cap) \
mdev             1166 include/linux/mlx5/device.h 	MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
mdev             1168 include/linux/mlx5/device.h #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
mdev             1169 include/linux/mlx5/device.h 	MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
mdev             1171 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE(mdev, cap) \
mdev             1172 include/linux/mlx5/device.h 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
mdev             1174 include/linux/mlx5/device.h #define MLX5_CAP64_FLOWTABLE(mdev, cap) \
mdev             1175 include/linux/mlx5/device.h 	MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
mdev             1177 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
mdev             1178 include/linux/mlx5/device.h 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
mdev             1180 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
mdev             1181 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
mdev             1183 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
mdev             1184 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
mdev             1186 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
mdev             1187 include/linux/mlx5/device.h 		MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
mdev             1189 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
mdev             1190 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
mdev             1192 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
mdev             1193 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
mdev             1195 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
mdev             1196 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
mdev             1198 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
mdev             1199 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
mdev             1201 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
mdev             1202 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
mdev             1204 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
mdev             1205 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
mdev             1207 include/linux/mlx5/device.h #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
mdev             1208 include/linux/mlx5/device.h 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
mdev             1210 include/linux/mlx5/device.h #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
mdev             1212 include/linux/mlx5/device.h 		 mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
mdev             1214 include/linux/mlx5/device.h #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
mdev             1216 include/linux/mlx5/device.h 		 mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
mdev             1218 include/linux/mlx5/device.h #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
mdev             1219 include/linux/mlx5/device.h 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
mdev             1221 include/linux/mlx5/device.h #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
mdev             1222 include/linux/mlx5/device.h 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
mdev             1224 include/linux/mlx5/device.h #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
mdev             1225 include/linux/mlx5/device.h 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
mdev             1227 include/linux/mlx5/device.h #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
mdev             1228 include/linux/mlx5/device.h 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
mdev             1230 include/linux/mlx5/device.h #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
mdev             1231 include/linux/mlx5/device.h 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
mdev             1233 include/linux/mlx5/device.h #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
mdev             1234 include/linux/mlx5/device.h 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
mdev             1236 include/linux/mlx5/device.h #define MLX5_CAP_ESW(mdev, cap) \
mdev             1238 include/linux/mlx5/device.h 		 mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
mdev             1240 include/linux/mlx5/device.h #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
mdev             1242 include/linux/mlx5/device.h 		(mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
mdev             1244 include/linux/mlx5/device.h #define MLX5_CAP_ESW_MAX(mdev, cap) \
mdev             1246 include/linux/mlx5/device.h 		 mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
mdev             1248 include/linux/mlx5/device.h #define MLX5_CAP_ODP(mdev, cap)\
mdev             1249 include/linux/mlx5/device.h 	MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
mdev             1251 include/linux/mlx5/device.h #define MLX5_CAP_ODP_MAX(mdev, cap)\
mdev             1252 include/linux/mlx5/device.h 	MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
mdev             1254 include/linux/mlx5/device.h #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
mdev             1256 include/linux/mlx5/device.h 		 mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
mdev             1258 include/linux/mlx5/device.h #define MLX5_CAP_QOS(mdev, cap)\
mdev             1259 include/linux/mlx5/device.h 	MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
mdev             1261 include/linux/mlx5/device.h #define MLX5_CAP_DEBUG(mdev, cap)\
mdev             1262 include/linux/mlx5/device.h 	MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
mdev             1264 include/linux/mlx5/device.h #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
mdev             1265 include/linux/mlx5/device.h 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
mdev             1267 include/linux/mlx5/device.h #define MLX5_CAP_PCAM_REG(mdev, reg) \
mdev             1268 include/linux/mlx5/device.h 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
mdev             1270 include/linux/mlx5/device.h #define MLX5_CAP_MCAM_REG(mdev, reg) \
mdev             1271 include/linux/mlx5/device.h 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
mdev             1273 include/linux/mlx5/device.h #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
mdev             1274 include/linux/mlx5/device.h 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
mdev             1276 include/linux/mlx5/device.h #define MLX5_CAP_QCAM_REG(mdev, fld) \
mdev             1277 include/linux/mlx5/device.h 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
mdev             1279 include/linux/mlx5/device.h #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
mdev             1280 include/linux/mlx5/device.h 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
mdev             1282 include/linux/mlx5/device.h #define MLX5_CAP_FPGA(mdev, cap) \
mdev             1283 include/linux/mlx5/device.h 	MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
mdev             1285 include/linux/mlx5/device.h #define MLX5_CAP64_FPGA(mdev, cap) \
mdev             1286 include/linux/mlx5/device.h 	MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
mdev             1288 include/linux/mlx5/device.h #define MLX5_CAP_DEV_MEM(mdev, cap)\
mdev             1289 include/linux/mlx5/device.h 	MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
mdev             1291 include/linux/mlx5/device.h #define MLX5_CAP64_DEV_MEM(mdev, cap)\
mdev             1292 include/linux/mlx5/device.h 	MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
mdev             1294 include/linux/mlx5/device.h #define MLX5_CAP_TLS(mdev, cap) \
mdev             1295 include/linux/mlx5/device.h 	MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
mdev             1297 include/linux/mlx5/device.h #define MLX5_CAP_DEV_EVENT(mdev, cap)\
mdev             1298 include/linux/mlx5/device.h 	MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
mdev              409 include/linux/mlx5/driver.h 	struct mlx5_core_dev   *mdev;
mdev              651 include/linux/mlx5/driver.h 	struct mlx5_core_dev      *mdev;
mdev             1012 include/linux/mlx5/driver.h int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
mdev             1014 include/linux/mlx5/driver.h void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
mdev             1077 include/linux/mlx5/driver.h int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
mdev             1090 include/linux/mlx5/driver.h struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
mdev             1091 include/linux/mlx5/driver.h void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
mdev             1098 include/linux/mlx5/driver.h struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
mdev             1103 include/linux/mlx5/driver.h int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
mdev               12 include/linux/mlx5/eswitch.h #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
mdev               81 include/linux/mlx5/fs_helpers.h static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
mdev               85 include/linux/mlx5/fs_helpers.h 	int match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
mdev              119 include/linux/mlx5/fs_helpers.h mlx5_fs_is_outer_ipv4_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
mdev              122 include/linux/mlx5/fs_helpers.h 	return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
mdev              127 include/linux/mlx5/fs_helpers.h mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
mdev              130 include/linux/mlx5/fs_helpers.h 	return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v,
mdev              166 include/linux/mlx5/port.h int mlx5_max_tc(struct mlx5_core_dev *mdev);
mdev              168 include/linux/mlx5/port.h int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
mdev              169 include/linux/mlx5/port.h int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
mdev              171 include/linux/mlx5/port.h int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
mdev              172 include/linux/mlx5/port.h int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
mdev              174 include/linux/mlx5/port.h int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
mdev              175 include/linux/mlx5/port.h int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
mdev              177 include/linux/mlx5/port.h int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
mdev              180 include/linux/mlx5/port.h int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
mdev              183 include/linux/mlx5/port.h int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
mdev              184 include/linux/mlx5/port.h int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
mdev              186 include/linux/mlx5/port.h int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
mdev              187 include/linux/mlx5/port.h int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
mdev              188 include/linux/mlx5/port.h int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
mdev              189 include/linux/mlx5/port.h void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
mdev              194 include/linux/mlx5/port.h int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
mdev              195 include/linux/mlx5/port.h int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
mdev              197 include/linux/mlx5/port.h int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
mdev              198 include/linux/mlx5/port.h int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
mdev              199 include/linux/mlx5/port.h int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
mdev              200 include/linux/mlx5/port.h int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
mdev               41 include/linux/mlx5/vport.h #define MLX5_VPORT_ECPF_PLACEHOLDER(mdev)	(mlx5_ecpf_vport_exists(mdev))
mdev               43 include/linux/mlx5/vport.h #define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER +		\
mdev               45 include/linux/mlx5/vport.h 				   MLX5_VPORT_ECPF_PLACEHOLDER(mdev))
mdev               47 include/linux/mlx5/vport.h #define MLX5_VPORT_MANAGER(mdev)					\
mdev               48 include/linux/mlx5/vport.h 	(MLX5_CAP_GEN(mdev, vport_group_manager) &&			\
mdev               49 include/linux/mlx5/vport.h 	 (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&	\
mdev               50 include/linux/mlx5/vport.h 	 mlx5_core_is_pf(mdev))
mdev               66 include/linux/mlx5/vport.h u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
mdev               67 include/linux/mlx5/vport.h int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
mdev               69 include/linux/mlx5/vport.h int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
mdev               71 include/linux/mlx5/vport.h int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
mdev               72 include/linux/mlx5/vport.h int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
mdev               74 include/linux/mlx5/vport.h void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
mdev               75 include/linux/mlx5/vport.h int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
mdev               79 include/linux/mlx5/vport.h int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
mdev               80 include/linux/mlx5/vport.h int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
mdev               81 include/linux/mlx5/vport.h int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
mdev               83 include/linux/mlx5/vport.h int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
mdev               84 include/linux/mlx5/vport.h int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
mdev               86 include/linux/mlx5/vport.h int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
mdev              111 include/linux/mlx5/vport.h int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
mdev              116 include/linux/mlx5/vport.h int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
mdev              124 include/linux/mlx5/vport.h int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
mdev              125 include/linux/mlx5/vport.h int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
mdev              126 include/linux/mlx5/vport.h int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
mdev              136 include/linux/mlx5/vport.h int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
mdev              137 include/linux/mlx5/vport.h int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
mdev              143 include/linux/mlx5/vport.h u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
mdev              119 include/media/dvbdev.h 	struct media_device *mdev;
mdev              286 include/media/dvbdev.h 						 struct media_device *mdev)
mdev              288 include/media/dvbdev.h 	adap->mdev = mdev;
mdev              299 include/media/dvbdev.h 	return adap->mdev;
mdev               52 include/media/media-dev-allocator.h void media_device_delete(struct media_device *mdev, const char *module_name,
mdev               60 include/media/media-dev-allocator.h 			struct media_device *mdev, const char *module_name,
mdev               68 include/media/media-device.h 	struct media_request *(*req_alloc)(struct media_device *mdev);
mdev              203 include/media/media-device.h 	struct media_entity_enum *ent_enum, struct media_device *mdev)
mdev              206 include/media/media-device.h 					mdev->entity_internal_idx_max + 1);
mdev              223 include/media/media-device.h void media_device_init(struct media_device *mdev);
mdev              233 include/media/media-device.h void media_device_cleanup(struct media_device *mdev);
mdev              279 include/media/media-device.h int __must_check __media_device_register(struct media_device *mdev,
mdev              291 include/media/media-device.h #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE)
mdev              301 include/media/media-device.h void media_device_unregister(struct media_device *mdev);
mdev              339 include/media/media-device.h int __must_check media_device_register_entity(struct media_device *mdev,
mdev              376 include/media/media-device.h int __must_check media_device_register_entity_notify(struct media_device *mdev,
mdev              387 include/media/media-device.h void media_device_unregister_entity_notify(struct media_device *mdev,
mdev              391 include/media/media-device.h #define media_device_for_each_entity(entity, mdev)			\
mdev              392 include/media/media-device.h 	list_for_each_entry(entity, &(mdev)->entities, graph_obj.list)
mdev              395 include/media/media-device.h #define media_device_for_each_intf(intf, mdev)			\
mdev              396 include/media/media-device.h 	list_for_each_entry(intf, &(mdev)->interfaces, graph_obj.list)
mdev              399 include/media/media-device.h #define media_device_for_each_pad(pad, mdev)			\
mdev              400 include/media/media-device.h 	list_for_each_entry(pad, &(mdev)->pads, graph_obj.list)
mdev              403 include/media/media-device.h #define media_device_for_each_link(link, mdev)			\
mdev              404 include/media/media-device.h 	list_for_each_entry(link, &(mdev)->links, graph_obj.list)
mdev              415 include/media/media-device.h void media_device_pci_init(struct media_device *mdev,
mdev              435 include/media/media-device.h void __media_device_usb_init(struct media_device *mdev,
mdev              441 include/media/media-device.h static inline int media_device_register(struct media_device *mdev)
mdev              445 include/media/media-device.h static inline void media_device_unregister(struct media_device *mdev)
mdev              448 include/media/media-device.h static inline int media_device_register_entity(struct media_device *mdev,
mdev              457 include/media/media-device.h 					struct media_device *mdev,
mdev              463 include/media/media-device.h 					struct media_device *mdev,
mdev              468 include/media/media-device.h static inline void media_device_pci_init(struct media_device *mdev,
mdev              474 include/media/media-device.h static inline void __media_device_usb_init(struct media_device *mdev,
mdev              496 include/media/media-device.h #define media_device_usb_init(mdev, udev, name) \
mdev              497 include/media/media-device.h 	__media_device_usb_init(mdev, udev, name, KBUILD_MODNAME)
mdev              114 include/media/media-devnode.h int __must_check media_devnode_register(struct media_device *mdev,
mdev               58 include/media/media-entity.h 	struct media_device	*mdev;
mdev              612 include/media/media-entity.h void media_gobj_create(struct media_device *mdev,
mdev              758 include/media/media-entity.h int media_create_pad_links(const struct media_device *mdev,
mdev              889 include/media/media-entity.h 	struct media_graph *graph, struct media_device *mdev);
mdev             1001 include/media/media-entity.h __must_check media_devnode_create(struct media_device *mdev,
mdev               63 include/media/media-request.h 	struct media_device *mdev;
mdev              208 include/media/media-request.h media_request_get_by_fd(struct media_device *mdev, int request_fd);
mdev              218 include/media/media-request.h int media_request_alloc(struct media_device *mdev,
mdev              232 include/media/media-request.h media_request_get_by_fd(struct media_device *mdev, int request_fd)
mdev             1278 include/media/v4l2-ctrls.h 		     struct media_device *mdev, struct v4l2_ext_controls *c);
mdev             1293 include/media/v4l2-ctrls.h 		       struct media_device *mdev,
mdev             1310 include/media/v4l2-ctrls.h 		     struct media_device *mdev,
mdev               49 include/media/v4l2-device.h 	struct media_device *mdev;
mdev              209 include/media/v4l2-device.h 	return v4l2_dev->mdev && v4l2_dev->mdev->ops &&
mdev              210 include/media/v4l2-device.h 	       v4l2_dev->mdev->ops->req_queue;
mdev               39 include/media/v4l2-mc.h int v4l2_mc_create_media_graph(struct media_device *mdev);
mdev              128 include/media/v4l2-mc.h static inline int v4l2_mc_create_media_graph(struct media_device *mdev)
mdev               57 include/media/videobuf2-dvb.h 			 struct media_device *mdev,
mdev              118 include/media/videobuf2-v4l2.h int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
mdev              144 include/media/videobuf2-v4l2.h int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
mdev              872 net/ipv6/ndisc.c 			struct net_device *mdev;
mdev              874 net/ipv6/ndisc.c 			mdev = netdev_master_upper_dev_get_rcu(dev);
mdev              875 net/ipv6/ndisc.c 			if (mdev) {
mdev              876 net/ipv6/ndisc.c 				ifp = ipv6_get_ifaddr(net, &msg->target, mdev, 1);
mdev               56 net/mac802154/llsec.c 		struct mac802154_llsec_device *mdev;
mdev               58 net/mac802154/llsec.c 		mdev = container_of(dev, struct mac802154_llsec_device, dev);
mdev               60 net/mac802154/llsec.c 		llsec_dev_free(mdev);
mdev              134 net/mpls/af_mpls.c 	struct mpls_dev *mdev;
mdev              137 net/mpls/af_mpls.c 		mdev = mpls_dev_get(dev);
mdev              138 net/mpls/af_mpls.c 		if (mdev)
mdev              139 net/mpls/af_mpls.c 			MPLS_INC_STATS_LEN(mdev, skb->len,
mdev              351 net/mpls/af_mpls.c 	struct mpls_dev *mdev;
mdev              359 net/mpls/af_mpls.c 	mdev = mpls_dev_get(dev);
mdev              360 net/mpls/af_mpls.c 	if (!mdev)
mdev              363 net/mpls/af_mpls.c 	MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
mdev              366 net/mpls/af_mpls.c 	if (!mdev->input_enabled) {
mdev              367 net/mpls/af_mpls.c 		MPLS_INC_STATS(mdev, rx_dropped);
mdev              386 net/mpls/af_mpls.c 		MPLS_INC_STATS(mdev, rx_noroute);
mdev              471 net/mpls/af_mpls.c 	MPLS_INC_STATS(mdev, rx_errors);
mdev             1067 net/mpls/af_mpls.c static void mpls_get_stats(struct mpls_dev *mdev,
mdev             1079 net/mpls/af_mpls.c 		p = per_cpu_ptr(mdev->stats, i);
mdev             1101 net/mpls/af_mpls.c 	struct mpls_dev *mdev;
mdev             1104 net/mpls/af_mpls.c 	mdev = mpls_dev_get(dev);
mdev             1105 net/mpls/af_mpls.c 	if (!mdev)
mdev             1115 net/mpls/af_mpls.c 	mpls_get_stats(mdev, stats);
mdev             1122 net/mpls/af_mpls.c 	struct mpls_dev *mdev;
mdev             1124 net/mpls/af_mpls.c 	mdev = mpls_dev_get(dev);
mdev             1125 net/mpls/af_mpls.c 	if (!mdev)
mdev             1131 net/mpls/af_mpls.c static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
mdev             1150 net/mpls/af_mpls.c 	if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
mdev             1155 net/mpls/af_mpls.c 			mdev->input_enabled) < 0)
mdev             1182 net/mpls/af_mpls.c 					int type, struct mpls_dev *mdev)
mdev             1191 net/mpls/af_mpls.c 	err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, event, 0, type);
mdev             1257 net/mpls/af_mpls.c 	struct mpls_dev *mdev;
mdev             1275 net/mpls/af_mpls.c 	mdev = mpls_dev_get(dev);
mdev             1276 net/mpls/af_mpls.c 	if (!mdev)
mdev             1284 net/mpls/af_mpls.c 	err = mpls_netconf_fill_devconf(skb, mdev,
mdev             1306 net/mpls/af_mpls.c 	struct mpls_dev *mdev;
mdev             1336 net/mpls/af_mpls.c 			mdev = mpls_dev_get(dev);
mdev             1337 net/mpls/af_mpls.c 			if (!mdev)
mdev             1339 net/mpls/af_mpls.c 			if (mpls_netconf_fill_devconf(skb, mdev,
mdev             1372 net/mpls/af_mpls.c 		struct mpls_dev *mdev = ctl->extra1;
mdev             1373 net/mpls/af_mpls.c 		int i = (int *)ctl->data - (int *)mdev;
mdev             1380 net/mpls/af_mpls.c 						    NETCONFA_INPUT, mdev);
mdev             1399 net/mpls/af_mpls.c 				    struct mpls_dev *mdev)
mdev             1414 net/mpls/af_mpls.c 		table[i].data = (char *)mdev + (uintptr_t)table[i].data;
mdev             1415 net/mpls/af_mpls.c 		table[i].extra1 = mdev;
mdev             1421 net/mpls/af_mpls.c 	mdev->sysctl = register_net_sysctl(net, path, table);
mdev             1422 net/mpls/af_mpls.c 	if (!mdev->sysctl)
mdev             1425 net/mpls/af_mpls.c 	mpls_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL, mdev);
mdev             1435 net/mpls/af_mpls.c 				       struct mpls_dev *mdev)
mdev             1440 net/mpls/af_mpls.c 	table = mdev->sysctl->ctl_table_arg;
mdev             1441 net/mpls/af_mpls.c 	unregister_net_sysctl_table(mdev->sysctl);
mdev             1444 net/mpls/af_mpls.c 	mpls_netconf_notify_devconf(net, RTM_DELNETCONF, 0, mdev);
mdev             1449 net/mpls/af_mpls.c 	struct mpls_dev *mdev;
mdev             1455 net/mpls/af_mpls.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev             1456 net/mpls/af_mpls.c 	if (!mdev)
mdev             1459 net/mpls/af_mpls.c 	mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
mdev             1460 net/mpls/af_mpls.c 	if (!mdev->stats)
mdev             1466 net/mpls/af_mpls.c 		mpls_stats = per_cpu_ptr(mdev->stats, i);
mdev             1470 net/mpls/af_mpls.c 	mdev->dev = dev;
mdev             1472 net/mpls/af_mpls.c 	err = mpls_dev_sysctl_register(dev, mdev);
mdev             1476 net/mpls/af_mpls.c 	rcu_assign_pointer(dev->mpls_ptr, mdev);
mdev             1478 net/mpls/af_mpls.c 	return mdev;
mdev             1481 net/mpls/af_mpls.c 	free_percpu(mdev->stats);
mdev             1482 net/mpls/af_mpls.c 	kfree(mdev);
mdev             1488 net/mpls/af_mpls.c 	struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
mdev             1490 net/mpls/af_mpls.c 	free_percpu(mdev->stats);
mdev             1491 net/mpls/af_mpls.c 	kfree(mdev);
mdev             1584 net/mpls/af_mpls.c 	struct mpls_dev *mdev;
mdev             1598 net/mpls/af_mpls.c 			mdev = mpls_add_dev(dev);
mdev             1599 net/mpls/af_mpls.c 			if (IS_ERR(mdev))
mdev             1600 net/mpls/af_mpls.c 				return notifier_from_errno(PTR_ERR(mdev));
mdev             1605 net/mpls/af_mpls.c 	mdev = mpls_dev_get(dev);
mdev             1606 net/mpls/af_mpls.c 	if (!mdev)
mdev             1629 net/mpls/af_mpls.c 		mdev = mpls_dev_get(dev);
mdev             1630 net/mpls/af_mpls.c 		if (mdev) {
mdev             1631 net/mpls/af_mpls.c 			mpls_dev_sysctl_unregister(dev, mdev);
mdev             1633 net/mpls/af_mpls.c 			call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
mdev             1637 net/mpls/af_mpls.c 		mdev = mpls_dev_get(dev);
mdev             1638 net/mpls/af_mpls.c 		if (mdev) {
mdev             1641 net/mpls/af_mpls.c 			mpls_dev_sysctl_unregister(dev, mdev);
mdev             1642 net/mpls/af_mpls.c 			err = mpls_dev_sysctl_register(dev, mdev);
mdev               34 net/mpls/internal.h #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field)		\
mdev               36 net/mpls/internal.h 		__typeof__(*(mdev)->stats) *ptr =			\
mdev               37 net/mpls/internal.h 			raw_cpu_ptr((mdev)->stats);			\
mdev               46 net/mpls/internal.h #define MPLS_INC_STATS(mdev, field)					\
mdev               48 net/mpls/internal.h 		__typeof__(*(mdev)->stats) *ptr =			\
mdev               49 net/mpls/internal.h 			raw_cpu_ptr((mdev)->stats);			\
mdev               59 net/mpls/internal.h #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field)		\
mdev               61 net/mpls/internal.h 		this_cpu_inc((mdev)->stats->stats.pkts_field);		\
mdev               62 net/mpls/internal.h 		this_cpu_add((mdev)->stats->stats.bytes_field, (len));	\
mdev               65 net/mpls/internal.h #define MPLS_INC_STATS(mdev, field)			\
mdev               66 net/mpls/internal.h 	this_cpu_inc((mdev)->stats->stats.field)
mdev              167 samples/vfio-mdev/mbochs.c 	struct mdev_device *mdev;
mdev              250 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev_state->mdev);
mdev              308 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev_state->mdev);
mdev              336 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev_state->mdev);
mdev              366 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev_state->mdev);
mdev              438 samples/vfio-mdev/mbochs.c static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
mdev              441 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              442 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev);
mdev              506 samples/vfio-mdev/mbochs.c static int mbochs_reset(struct mdev_device *mdev)
mdev              508 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              519 samples/vfio-mdev/mbochs.c static int mbochs_create(struct kobject *kobj, struct mdev_device *mdev)
mdev              522 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev);
mdev              550 samples/vfio-mdev/mbochs.c 	mdev_state->mdev = mdev;
mdev              551 samples/vfio-mdev/mbochs.c 	mdev_set_drvdata(mdev, mdev_state);
mdev              561 samples/vfio-mdev/mbochs.c 	mbochs_reset(mdev);
mdev              572 samples/vfio-mdev/mbochs.c static int mbochs_remove(struct mdev_device *mdev)
mdev              574 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              577 samples/vfio-mdev/mbochs.c 	mdev_set_drvdata(mdev, NULL);
mdev              584 samples/vfio-mdev/mbochs.c static ssize_t mbochs_read(struct mdev_device *mdev, char __user *buf,
mdev              596 samples/vfio-mdev/mbochs.c 			ret =  mdev_access(mdev, (char *)&val, sizeof(val),
mdev              608 samples/vfio-mdev/mbochs.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              620 samples/vfio-mdev/mbochs.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              643 samples/vfio-mdev/mbochs.c static ssize_t mbochs_write(struct mdev_device *mdev, const char __user *buf,
mdev              658 samples/vfio-mdev/mbochs.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              670 samples/vfio-mdev/mbochs.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              682 samples/vfio-mdev/mbochs.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              733 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev_state->mdev);
mdev              768 samples/vfio-mdev/mbochs.c static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
mdev              770 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              806 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
mdev              821 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
mdev              838 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
mdev              867 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
mdev              879 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev_state->mdev);
mdev              992 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev_state->mdev);
mdev             1020 samples/vfio-mdev/mbochs.c static int mbochs_get_region_info(struct mdev_device *mdev,
mdev             1026 samples/vfio-mdev/mbochs.c 	mdev_state = mdev_get_drvdata(mdev);
mdev             1076 samples/vfio-mdev/mbochs.c static int mbochs_get_irq_info(struct mdev_device *mdev,
mdev             1083 samples/vfio-mdev/mbochs.c static int mbochs_get_device_info(struct mdev_device *mdev,
mdev             1092 samples/vfio-mdev/mbochs.c static int mbochs_query_gfx_plane(struct mdev_device *mdev,
mdev             1095 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev             1096 samples/vfio-mdev/mbochs.c 	struct device *dev = mdev_dev(mdev);
mdev             1158 samples/vfio-mdev/mbochs.c static int mbochs_get_gfx_dmabuf(struct mdev_device *mdev,
mdev             1161 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev             1183 samples/vfio-mdev/mbochs.c static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
mdev             1202 samples/vfio-mdev/mbochs.c 		ret = mbochs_get_device_info(mdev, &info);
mdev             1226 samples/vfio-mdev/mbochs.c 		ret = mbochs_get_region_info(mdev, &info);
mdev             1249 samples/vfio-mdev/mbochs.c 		ret = mbochs_get_irq_info(mdev, &info);
mdev             1272 samples/vfio-mdev/mbochs.c 		ret = mbochs_query_gfx_plane(mdev, &plane);
mdev             1289 samples/vfio-mdev/mbochs.c 		return mbochs_get_gfx_dmabuf(mdev, dmabuf_id);
mdev             1296 samples/vfio-mdev/mbochs.c 		return mbochs_reset(mdev);
mdev             1301 samples/vfio-mdev/mbochs.c static int mbochs_open(struct mdev_device *mdev)
mdev             1309 samples/vfio-mdev/mbochs.c static void mbochs_close(struct mdev_device *mdev)
mdev             1311 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev             1335 samples/vfio-mdev/mbochs.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev             1336 samples/vfio-mdev/mbochs.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev               94 samples/vfio-mdev/mdpy.c 	struct mdev_device *mdev;
mdev              153 samples/vfio-mdev/mdpy.c 	struct device *dev = mdev_dev(mdev_state->mdev);
mdev              175 samples/vfio-mdev/mdpy.c static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
mdev              178 samples/vfio-mdev/mdpy.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              179 samples/vfio-mdev/mdpy.c 	struct device *dev = mdev_dev(mdev);
mdev              215 samples/vfio-mdev/mdpy.c static int mdpy_reset(struct mdev_device *mdev)
mdev              217 samples/vfio-mdev/mdpy.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              229 samples/vfio-mdev/mdpy.c static int mdpy_create(struct kobject *kobj, struct mdev_device *mdev)
mdev              232 samples/vfio-mdev/mdpy.c 	struct device *dev = mdev_dev(mdev);
mdev              263 samples/vfio-mdev/mdpy.c 	mdev_state->mdev = mdev;
mdev              264 samples/vfio-mdev/mdpy.c 	mdev_set_drvdata(mdev, mdev_state);
mdev              269 samples/vfio-mdev/mdpy.c 	mdpy_reset(mdev);
mdev              275 samples/vfio-mdev/mdpy.c static int mdpy_remove(struct mdev_device *mdev)
mdev              277 samples/vfio-mdev/mdpy.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              278 samples/vfio-mdev/mdpy.c 	struct device *dev = mdev_dev(mdev);
mdev              282 samples/vfio-mdev/mdpy.c 	mdev_set_drvdata(mdev, NULL);
mdev              291 samples/vfio-mdev/mdpy.c static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf,
mdev              303 samples/vfio-mdev/mdpy.c 			ret =  mdev_access(mdev, (char *)&val, sizeof(val),
mdev              315 samples/vfio-mdev/mdpy.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              327 samples/vfio-mdev/mdpy.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              350 samples/vfio-mdev/mdpy.c static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf,
mdev              365 samples/vfio-mdev/mdpy.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              377 samples/vfio-mdev/mdpy.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              389 samples/vfio-mdev/mdpy.c 			ret = mdev_access(mdev, (char *)&val, sizeof(val),
mdev              407 samples/vfio-mdev/mdpy.c static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
mdev              409 samples/vfio-mdev/mdpy.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              425 samples/vfio-mdev/mdpy.c static int mdpy_get_region_info(struct mdev_device *mdev,
mdev              431 samples/vfio-mdev/mdpy.c 	mdev_state = mdev_get_drvdata(mdev);
mdev              463 samples/vfio-mdev/mdpy.c static int mdpy_get_irq_info(struct mdev_device *mdev,
mdev              470 samples/vfio-mdev/mdpy.c static int mdpy_get_device_info(struct mdev_device *mdev,
mdev              479 samples/vfio-mdev/mdpy.c static int mdpy_query_gfx_plane(struct mdev_device *mdev,
mdev              482 samples/vfio-mdev/mdpy.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              512 samples/vfio-mdev/mdpy.c static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
mdev              519 samples/vfio-mdev/mdpy.c 	mdev_state = mdev_get_drvdata(mdev);
mdev              534 samples/vfio-mdev/mdpy.c 		ret = mdpy_get_device_info(mdev, &info);
mdev              559 samples/vfio-mdev/mdpy.c 		ret = mdpy_get_region_info(mdev, &info, &cap_type_id,
mdev              583 samples/vfio-mdev/mdpy.c 		ret = mdpy_get_irq_info(mdev, &info);
mdev              606 samples/vfio-mdev/mdpy.c 		ret = mdpy_query_gfx_plane(mdev, &plane);
mdev              620 samples/vfio-mdev/mdpy.c 		return mdpy_reset(mdev);
mdev              625 samples/vfio-mdev/mdpy.c static int mdpy_open(struct mdev_device *mdev)
mdev              633 samples/vfio-mdev/mdpy.c static void mdpy_close(struct mdev_device *mdev)
mdev              642 samples/vfio-mdev/mdpy.c 	struct mdev_device *mdev = mdev_from_dev(dev);
mdev              643 samples/vfio-mdev/mdpy.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              136 samples/vfio-mdev/mtty.c 	struct mdev_device *mdev;
mdev              634 samples/vfio-mdev/mtty.c static ssize_t mdev_access(struct mdev_device *mdev, u8 *buf, size_t count,
mdev              642 samples/vfio-mdev/mtty.c 	if (!mdev || !buf)
mdev              645 samples/vfio-mdev/mtty.c 	mdev_state = mdev_get_drvdata(mdev);
mdev              711 samples/vfio-mdev/mtty.c static int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
mdev              717 samples/vfio-mdev/mtty.c 	if (!mdev)
mdev              722 samples/vfio-mdev/mtty.c 			dev_driver_string(mdev_parent_dev(mdev)), i + 1);
mdev              749 samples/vfio-mdev/mtty.c 	mdev_state->mdev = mdev;
mdev              750 samples/vfio-mdev/mtty.c 	mdev_set_drvdata(mdev, mdev_state);
mdev              761 samples/vfio-mdev/mtty.c static int mtty_remove(struct mdev_device *mdev)
mdev              764 samples/vfio-mdev/mtty.c 	struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
mdev              771 samples/vfio-mdev/mtty.c 			mdev_set_drvdata(mdev, NULL);
mdev              783 samples/vfio-mdev/mtty.c static int mtty_reset(struct mdev_device *mdev)
mdev              787 samples/vfio-mdev/mtty.c 	if (!mdev)
mdev              790 samples/vfio-mdev/mtty.c 	mdev_state = mdev_get_drvdata(mdev);
mdev              799 samples/vfio-mdev/mtty.c static ssize_t mtty_read(struct mdev_device *mdev, char __user *buf,
mdev              811 samples/vfio-mdev/mtty.c 			ret =  mdev_access(mdev, (u8 *)&val, sizeof(val),
mdev              823 samples/vfio-mdev/mtty.c 			ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
mdev              835 samples/vfio-mdev/mtty.c 			ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
mdev              858 samples/vfio-mdev/mtty.c static ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
mdev              873 samples/vfio-mdev/mtty.c 			ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
mdev              885 samples/vfio-mdev/mtty.c 			ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
mdev              897 samples/vfio-mdev/mtty.c 			ret = mdev_access(mdev, (u8 *)&val, sizeof(val),
mdev              915 samples/vfio-mdev/mtty.c static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
mdev              922 samples/vfio-mdev/mtty.c 	if (!mdev)
mdev              925 samples/vfio-mdev/mtty.c 	mdev_state = mdev_get_drvdata(mdev);
mdev             1043 samples/vfio-mdev/mtty.c static int mtty_get_region_info(struct mdev_device *mdev,
mdev             1051 samples/vfio-mdev/mtty.c 	if (!mdev)
mdev             1054 samples/vfio-mdev/mtty.c 	mdev_state = mdev_get_drvdata(mdev);
mdev             1092 samples/vfio-mdev/mtty.c static int mtty_get_irq_info(struct mdev_device *mdev,
mdev             1117 samples/vfio-mdev/mtty.c static int mtty_get_device_info(struct mdev_device *mdev,
mdev             1127 samples/vfio-mdev/mtty.c static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
mdev             1134 samples/vfio-mdev/mtty.c 	if (!mdev)
mdev             1137 samples/vfio-mdev/mtty.c 	mdev_state = mdev_get_drvdata(mdev);
mdev             1154 samples/vfio-mdev/mtty.c 		ret = mtty_get_device_info(mdev, &info);
mdev             1179 samples/vfio-mdev/mtty.c 		ret = mtty_get_region_info(mdev, &info, &cap_type_id,
mdev             1203 samples/vfio-mdev/mtty.c 		ret = mtty_get_irq_info(mdev, &info);
mdev             1237 samples/vfio-mdev/mtty.c 		ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
mdev             1244 samples/vfio-mdev/mtty.c 		return mtty_reset(mdev);
mdev             1249 samples/vfio-mdev/mtty.c static int mtty_open(struct mdev_device *mdev)
mdev             1255 samples/vfio-mdev/mtty.c static void mtty_close(struct mdev_device *mdev)
mdev               57 sound/core/seq/oss/seq_oss_midi.c static int send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq_oss_midi *mdev);
mdev               96 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              100 sound/core/seq/oss/seq_oss_midi.c 	mdev = midi_devs[dev];
mdev              101 sound/core/seq/oss/seq_oss_midi.c 	if (mdev)
mdev              102 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_use(&mdev->use_lock);
mdev              104 sound/core/seq/oss/seq_oss_midi.c 	return mdev;
mdev              114 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              119 sound/core/seq/oss/seq_oss_midi.c 		mdev = midi_devs[i];
mdev              120 sound/core/seq/oss/seq_oss_midi.c 		if (mdev && mdev->client == client && mdev->port == port) {
mdev              122 sound/core/seq/oss/seq_oss_midi.c 			snd_use_lock_use(&mdev->use_lock);
mdev              124 sound/core/seq/oss/seq_oss_midi.c 			return mdev;
mdev              141 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              155 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = find_slot(pinfo->addr.client, pinfo->addr.port)) != NULL) {
mdev              157 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              164 sound/core/seq/oss/seq_oss_midi.c 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
mdev              165 sound/core/seq/oss/seq_oss_midi.c 	if (!mdev)
mdev              169 sound/core/seq/oss/seq_oss_midi.c 	mdev->client = pinfo->addr.client;
mdev              170 sound/core/seq/oss/seq_oss_midi.c 	mdev->port = pinfo->addr.port;
mdev              171 sound/core/seq/oss/seq_oss_midi.c 	mdev->flags = pinfo->capability;
mdev              172 sound/core/seq/oss/seq_oss_midi.c 	mdev->opened = 0;
mdev              173 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_init(&mdev->use_lock);
mdev              176 sound/core/seq/oss/seq_oss_midi.c 	strlcpy(mdev->name, pinfo->name, sizeof(mdev->name));
mdev              179 sound/core/seq/oss/seq_oss_midi.c 	if (snd_midi_event_new(MAX_MIDI_EVENT_BUF, &mdev->coder) < 0) {
mdev              181 sound/core/seq/oss/seq_oss_midi.c 		kfree(mdev);
mdev              185 sound/core/seq/oss/seq_oss_midi.c 	snd_midi_event_no_status(mdev->coder, 1);
mdev              198 sound/core/seq/oss/seq_oss_midi.c 			snd_midi_event_free(mdev->coder);
mdev              199 sound/core/seq/oss/seq_oss_midi.c 			kfree(mdev);
mdev              204 sound/core/seq/oss/seq_oss_midi.c 	mdev->seq_device = i;
mdev              205 sound/core/seq/oss/seq_oss_midi.c 	midi_devs[mdev->seq_device] = mdev;
mdev              217 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              221 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = find_slot(client, port)) != NULL) {
mdev              223 sound/core/seq/oss/seq_oss_midi.c 		midi_devs[mdev->seq_device] = NULL;
mdev              225 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              226 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_sync(&mdev->use_lock);
mdev              227 sound/core/seq/oss/seq_oss_midi.c 		snd_midi_event_free(mdev->coder);
mdev              228 sound/core/seq/oss/seq_oss_midi.c 		kfree(mdev);
mdev              248 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              253 sound/core/seq/oss/seq_oss_midi.c 		if ((mdev = midi_devs[i]) != NULL) {
mdev              254 sound/core/seq/oss/seq_oss_midi.c 			snd_midi_event_free(mdev->coder);
mdev              255 sound/core/seq/oss/seq_oss_midi.c 			kfree(mdev);
mdev              318 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              321 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = get_mididev(dp, dev)) == NULL)
mdev              325 sound/core/seq/oss/seq_oss_midi.c 	if (mdev->opened && mdev->devinfo != dp) {
mdev              326 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              335 sound/core/seq/oss/seq_oss_midi.c 	perm &= mdev->flags;
mdev              337 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              342 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev->opened & perm) == perm) {
mdev              343 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              347 sound/core/seq/oss/seq_oss_midi.c 	perm &= ~mdev->opened;
mdev              353 sound/core/seq/oss/seq_oss_midi.c 		subs.dest.client = mdev->client;
mdev              354 sound/core/seq/oss/seq_oss_midi.c 		subs.dest.port = mdev->port;
mdev              356 sound/core/seq/oss/seq_oss_midi.c 			mdev->opened |= PERM_WRITE;
mdev              359 sound/core/seq/oss/seq_oss_midi.c 		subs.sender.client = mdev->client;
mdev              360 sound/core/seq/oss/seq_oss_midi.c 		subs.sender.port = mdev->port;
mdev              365 sound/core/seq/oss/seq_oss_midi.c 			mdev->opened |= PERM_READ;
mdev              368 sound/core/seq/oss/seq_oss_midi.c 	if (! mdev->opened) {
mdev              369 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              373 sound/core/seq/oss/seq_oss_midi.c 	mdev->devinfo = dp;
mdev              374 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_free(&mdev->use_lock);
mdev              384 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              387 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = get_mididev(dp, dev)) == NULL)
mdev              389 sound/core/seq/oss/seq_oss_midi.c 	if (! mdev->opened || mdev->devinfo != dp) {
mdev              390 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              395 sound/core/seq/oss/seq_oss_midi.c 	if (mdev->opened & PERM_WRITE) {
mdev              397 sound/core/seq/oss/seq_oss_midi.c 		subs.dest.client = mdev->client;
mdev              398 sound/core/seq/oss/seq_oss_midi.c 		subs.dest.port = mdev->port;
mdev              401 sound/core/seq/oss/seq_oss_midi.c 	if (mdev->opened & PERM_READ) {
mdev              402 sound/core/seq/oss/seq_oss_midi.c 		subs.sender.client = mdev->client;
mdev              403 sound/core/seq/oss/seq_oss_midi.c 		subs.sender.port = mdev->port;
mdev              408 sound/core/seq/oss/seq_oss_midi.c 	mdev->opened = 0;
mdev              409 sound/core/seq/oss/seq_oss_midi.c 	mdev->devinfo = NULL;
mdev              411 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_free(&mdev->use_lock);
mdev              421 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              424 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = get_mididev(dp, dev)) == NULL)
mdev              428 sound/core/seq/oss/seq_oss_midi.c 	if (mdev->opened & PERM_WRITE)
mdev              430 sound/core/seq/oss/seq_oss_midi.c 	if (mdev->opened & PERM_READ)
mdev              433 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_free(&mdev->use_lock);
mdev              444 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              446 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = get_mididev(dp, dev)) == NULL)
mdev              448 sound/core/seq/oss/seq_oss_midi.c 	if (! mdev->opened) {
mdev              449 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              453 sound/core/seq/oss/seq_oss_midi.c 	if (mdev->opened & PERM_WRITE) {
mdev              458 sound/core/seq/oss/seq_oss_midi.c 		ev.dest.client = mdev->client;
mdev              459 sound/core/seq/oss/seq_oss_midi.c 		ev.dest.port = mdev->port;
mdev              482 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_free(&mdev->use_lock);
mdev              492 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              494 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = get_mididev(dp, dev)) == NULL)
mdev              496 sound/core/seq/oss/seq_oss_midi.c 	addr->client = mdev->client;
mdev              497 sound/core/seq/oss/seq_oss_midi.c 	addr->port = mdev->port;
mdev              498 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_free(&mdev->use_lock);
mdev              509 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              514 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = find_slot(ev->source.client, ev->source.port)) == NULL)
mdev              516 sound/core/seq/oss/seq_oss_midi.c 	if (! (mdev->opened & PERM_READ)) {
mdev              517 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              522 sound/core/seq/oss/seq_oss_midi.c 		rc = send_synth_event(dp, ev, mdev->seq_device);
mdev              524 sound/core/seq/oss/seq_oss_midi.c 		rc = send_midi_event(dp, ev, mdev);
mdev              526 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_free(&mdev->use_lock);
mdev              595 sound/core/seq/oss/seq_oss_midi.c send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq_oss_midi *mdev)
mdev              604 sound/core/seq/oss/seq_oss_midi.c 		snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
mdev              605 sound/core/seq/oss/seq_oss_midi.c 		snd_midi_event_reset_decode(mdev->coder);
mdev              607 sound/core/seq/oss/seq_oss_midi.c 		len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
mdev              609 sound/core/seq/oss/seq_oss_midi.c 			snd_seq_oss_readq_puts(dp->readq, mdev->seq_device, msg, len);
mdev              624 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              626 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = get_mididev(dp, dev)) == NULL)
mdev              628 sound/core/seq/oss/seq_oss_midi.c 	if (snd_midi_event_encode_byte(mdev->coder, c, ev)) {
mdev              629 sound/core/seq/oss/seq_oss_midi.c 		snd_seq_oss_fill_addr(dp, ev, mdev->client, mdev->port);
mdev              630 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              633 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_free(&mdev->use_lock);
mdev              643 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              645 sound/core/seq/oss/seq_oss_midi.c 	if ((mdev = get_mididev(dp, dev)) == NULL)
mdev              650 sound/core/seq/oss/seq_oss_midi.c 	strlcpy(inf->name, mdev->name, sizeof(inf->name));
mdev              651 sound/core/seq/oss/seq_oss_midi.c 	snd_use_lock_free(&mdev->use_lock);
mdev              678 sound/core/seq/oss/seq_oss_midi.c 	struct seq_oss_midi *mdev;
mdev              683 sound/core/seq/oss/seq_oss_midi.c 		mdev = get_mdev(i);
mdev              684 sound/core/seq/oss/seq_oss_midi.c 		if (mdev == NULL) {
mdev              688 sound/core/seq/oss/seq_oss_midi.c 		snd_iprintf(buf, "[%s] ALSA port %d:%d\n", mdev->name,
mdev              689 sound/core/seq/oss/seq_oss_midi.c 			    mdev->client, mdev->port);
mdev              691 sound/core/seq/oss/seq_oss_midi.c 			    capmode_str(mdev->flags),
mdev              692 sound/core/seq/oss/seq_oss_midi.c 			    capmode_str(mdev->opened));
mdev              693 sound/core/seq/oss/seq_oss_midi.c 		snd_use_lock_free(&mdev->use_lock);
mdev              242 sound/soc/intel/atom/sst/sst_acpi.c 	struct platform_device *mdev;
mdev              298 sound/soc/intel/atom/sst/sst_acpi.c 	mdev = platform_device_register_data(dev, mach->drv_name, -1,
mdev              300 sound/soc/intel/atom/sst/sst_acpi.c 	if (IS_ERR(mdev)) {
mdev              303 sound/soc/intel/atom/sst/sst_acpi.c 		return PTR_ERR(mdev);
mdev               36 sound/usb/media.c 	struct media_device *mdev;
mdev               44 sound/usb/media.c 	mdev = subs->stream->chip->media_dev;
mdev               45 sound/usb/media.c 	if (!mdev)
mdev               56 sound/usb/media.c 	mctl->media_dev = mdev;
mdev               75 sound/usb/media.c 	mctl->intf_devnode = media_devnode_create(mdev, intf_type, 0,
mdev               91 sound/usb/media.c 	media_device_for_each_entity(entity, mdev) {
mdev              122 sound/usb/media.c 		struct media_device *mdev;
mdev              124 sound/usb/media.c 		mdev = mctl->media_dev;
mdev              125 sound/usb/media.c 		if (mdev && media_devnode_is_registered(mdev->devnode)) {
mdev              169 sound/usb/media.c 	struct media_device *mdev = chip->media_dev;
mdev              174 sound/usb/media.c 	if (!mdev)
mdev              179 sound/usb/media.c 		ctl_intf = media_devnode_create(mdev, intf_type, 0,
mdev              197 sound/usb/media.c 		mctl->media_dev = mdev;
mdev              230 sound/usb/media.c 	struct media_device *mdev = chip->media_dev;
mdev              232 sound/usb/media.c 	if (!mdev)
mdev              242 sound/usb/media.c 		if (media_devnode_is_registered(mdev->devnode)) {
mdev              249 sound/usb/media.c 	if (media_devnode_is_registered(mdev->devnode))
mdev              257 sound/usb/media.c 	struct media_device *mdev;
mdev              267 sound/usb/media.c 		mdev = chip->media_dev;
mdev              271 sound/usb/media.c 	mdev = media_device_usb_allocate(usbdev, KBUILD_MODNAME, THIS_MODULE);
mdev              272 sound/usb/media.c 	if (IS_ERR(mdev))
mdev              276 sound/usb/media.c 	chip->media_dev = mdev;
mdev              287 sound/usb/media.c 	if (!media_devnode_is_registered(mdev->devnode)) {
mdev              293 sound/usb/media.c 		ret = media_device_register(mdev);
mdev              297 sound/usb/media.c 			media_device_delete(mdev, KBUILD_MODNAME, THIS_MODULE);
mdev              312 sound/usb/media.c 	struct media_device *mdev = chip->media_dev;
mdev              323 sound/usb/media.c 	if (mdev) {
mdev              324 sound/usb/media.c 		media_device_delete(mdev, KBUILD_MODNAME, THIS_MODULE);