dm_device 567 drivers/hv/hv_balloon.c static struct hv_dynmem_device dm_device; dm_device 603 drivers/hv/hv_balloon.c list_for_each_entry(has, &dm_device.ha_region_list, list) { dm_device 635 drivers/hv/hv_balloon.c if (dm_device.ha_waiting) { dm_device 636 drivers/hv/hv_balloon.c dm_device.ha_waiting = false; dm_device 637 drivers/hv/hv_balloon.c complete(&dm_device.ol_waitevent); dm_device 642 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 645 drivers/hv/hv_balloon.c if (pfn_count <= dm_device.num_pages_onlined) { dm_device 646 drivers/hv/hv_balloon.c dm_device.num_pages_onlined -= pfn_count; dm_device 654 drivers/hv/hv_balloon.c dm_device.num_pages_onlined = 0; dm_device 656 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 687 drivers/hv/hv_balloon.c lockdep_assert_held(&dm_device.ha_lock); dm_device 688 drivers/hv/hv_balloon.c dm_device.num_pages_onlined++; dm_device 715 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 727 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 729 drivers/hv/hv_balloon.c init_completion(&dm_device.ol_waitevent); dm_device 730 drivers/hv/hv_balloon.c dm_device.ha_waiting = !memhp_auto_online; dm_device 748 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 751 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 762 drivers/hv/hv_balloon.c if (dm_device.ha_waiting) dm_device 763 drivers/hv/hv_balloon.c wait_for_completion_timeout(&dm_device.ol_waitevent, dm_device 765 drivers/hv/hv_balloon.c post_status(&dm_device); dm_device 775 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 776 drivers/hv/hv_balloon.c list_for_each_entry(has, &dm_device.ha_region_list, list) { dm_device 785 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 796 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 797 drivers/hv/hv_balloon.c list_for_each_entry(has, &dm_device.ha_region_list, list) { dm_device 843 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 862 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 863 drivers/hv/hv_balloon.c list_for_each_entry(has, &dm_device.ha_region_list, list) { dm_device 915 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 917 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 926 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 943 drivers/hv/hv_balloon.c if (!dm_device.host_specified_ha_region) { dm_device 970 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 971 drivers/hv/hv_balloon.c list_add_tail(&ha_region->list, &dm_device.ha_region_list); dm_device 972 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 992 drivers/hv/hv_balloon.c struct hv_dynmem_device *dm = &dm_device; dm_device 1259 drivers/hv/hv_balloon.c unsigned int num_pages = dm_device.balloon_wrk.num_pages; dm_device 1295 drivers/hv/hv_balloon.c num_ballooned = alloc_balloon_pages(&dm_device, num_pages, dm_device 1305 drivers/hv/hv_balloon.c num_pages, dm_device.balloon_wrk.num_pages); dm_device 1309 drivers/hv/hv_balloon.c dm_device.state = DM_INITIALIZED; dm_device 1320 drivers/hv/hv_balloon.c ret = vmbus_sendpacket(dm_device.dev->channel, dm_device 1328 drivers/hv/hv_balloon.c post_status(&dm_device); dm_device 1338 drivers/hv/hv_balloon.c free_balloon_pages(&dm_device, dm_device 1358 drivers/hv/hv_balloon.c complete(&dm_device.config_event); dm_device 1372 drivers/hv/hv_balloon.c vmbus_sendpacket(dm_device.dev->channel, &resp, dm_device 1388 drivers/hv/hv_balloon.c &dm_device.config_event, 1*HZ); dm_device 1393 drivers/hv/hv_balloon.c reinit_completion(&dm_device.config_event); dm_device 1509 drivers/hv/hv_balloon.c dm_device.balloon_wrk.num_pages = bal_msg->num_pages; dm_device 1510 drivers/hv/hv_balloon.c schedule_work(&dm_device.balloon_wrk.wrk); dm_device 1545 drivers/hv/hv_balloon.c schedule_work(&dm_device.ha_wrk.wrk); dm_device 1584 drivers/hv/hv_balloon.c dm_device.version = version_req.version.version; dm_device 1592 drivers/hv/hv_balloon.c t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); dm_device 1602 drivers/hv/hv_balloon.c if (dm_device.state == DM_INIT_ERROR) { dm_device 1608 drivers/hv/hv_balloon.c DYNMEM_MAJOR_VERSION(dm_device.version), dm_device 1609 drivers/hv/hv_balloon.c DYNMEM_MINOR_VERSION(dm_device.version)); dm_device 1642 drivers/hv/hv_balloon.c t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); dm_device 1652 drivers/hv/hv_balloon.c if (dm_device.state == DM_INIT_ERROR) { dm_device 1673 drivers/hv/hv_balloon.c dm_device.dev = dev; dm_device 1674 drivers/hv/hv_balloon.c dm_device.state = DM_INITIALIZING; dm_device 1675 drivers/hv/hv_balloon.c dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8; dm_device 1676 drivers/hv/hv_balloon.c init_completion(&dm_device.host_event); dm_device 1677 drivers/hv/hv_balloon.c init_completion(&dm_device.config_event); dm_device 1678 drivers/hv/hv_balloon.c INIT_LIST_HEAD(&dm_device.ha_region_list); dm_device 1679 drivers/hv/hv_balloon.c spin_lock_init(&dm_device.ha_lock); dm_device 1680 drivers/hv/hv_balloon.c INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up); dm_device 1681 drivers/hv/hv_balloon.c INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req); dm_device 1682 drivers/hv/hv_balloon.c dm_device.host_specified_ha_region = false; dm_device 1689 drivers/hv/hv_balloon.c hv_set_drvdata(dev, &dm_device); dm_device 1695 drivers/hv/hv_balloon.c dm_device.state = DM_INITIALIZED; dm_device 1697 drivers/hv/hv_balloon.c dm_device.thread = dm_device 1698 drivers/hv/hv_balloon.c kthread_run(dm_thread_func, &dm_device, "hv_balloon"); dm_device 1699 drivers/hv/hv_balloon.c if (IS_ERR(dm_device.thread)) { dm_device 1700 drivers/hv/hv_balloon.c ret = PTR_ERR(dm_device.thread); dm_device 1734 drivers/hv/hv_balloon.c spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device 1743 drivers/hv/hv_balloon.c spin_unlock_irqrestore(&dm_device.ha_lock, flags); dm_device 61 drivers/md/dm-init.c struct dm_device *dev, *tmp; dm_device 106 drivers/md/dm-init.c static char __init *dm_parse_table_entry(struct dm_device *dev, char *str) dm_device 157 drivers/md/dm-init.c static int __init dm_parse_table(struct dm_device *dev, char *str) dm_device 187 drivers/md/dm-init.c static char __init *dm_parse_device_entry(struct dm_device *dev, char *str) dm_device 235 drivers/md/dm-init.c struct dm_device *dev; dm_device 266 drivers/md/dm-init.c struct dm_device *dev;