Lines Matching refs:hv_context
37 struct hv_context hv_context = { variable
100 void *hypercall_page = hv_context.hypercall_page; in do_hypercall()
121 void *hypercall_page = hv_context.hypercall_page; in do_hypercall()
144 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); in hv_init()
145 memset(hv_context.synic_message_page, 0, in hv_init()
147 memset(hv_context.post_msg_page, 0, in hv_init()
149 memset(hv_context.vp_index, 0, in hv_init()
151 memset(hv_context.event_dpc, 0, in hv_init()
153 memset(hv_context.clk_evt, 0, in hv_init()
161 hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0); in hv_init()
162 wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid); in hv_init()
184 hv_context.hypercall_page = virtaddr; in hv_init()
213 if (hv_context.hypercall_page) { in hv_cleanup()
216 vfree(hv_context.hypercall_page); in hv_cleanup()
217 hv_context.hypercall_page = NULL; in hv_cleanup()
238 hv_context.post_msg_page[get_cpu()]; in hv_post_message()
333 hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC); in hv_synic_alloc()
334 if (hv_context.event_dpc[cpu] == NULL) { in hv_synic_alloc()
338 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu); in hv_synic_alloc()
340 hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC); in hv_synic_alloc()
341 if (hv_context.clk_evt[cpu] == NULL) { in hv_synic_alloc()
345 hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu); in hv_synic_alloc()
347 hv_context.synic_message_page[cpu] = in hv_synic_alloc()
350 if (hv_context.synic_message_page[cpu] == NULL) { in hv_synic_alloc()
355 hv_context.synic_event_page[cpu] = in hv_synic_alloc()
358 if (hv_context.synic_event_page[cpu] == NULL) { in hv_synic_alloc()
363 hv_context.post_msg_page[cpu] = in hv_synic_alloc()
366 if (hv_context.post_msg_page[cpu] == NULL) { in hv_synic_alloc()
379 kfree(hv_context.event_dpc[cpu]); in hv_synic_free_cpu()
380 kfree(hv_context.clk_evt[cpu]); in hv_synic_free_cpu()
381 if (hv_context.synic_event_page[cpu]) in hv_synic_free_cpu()
382 free_page((unsigned long)hv_context.synic_event_page[cpu]); in hv_synic_free_cpu()
383 if (hv_context.synic_message_page[cpu]) in hv_synic_free_cpu()
384 free_page((unsigned long)hv_context.synic_message_page[cpu]); in hv_synic_free_cpu()
385 if (hv_context.post_msg_page[cpu]) in hv_synic_free_cpu()
386 free_page((unsigned long)hv_context.post_msg_page[cpu]); in hv_synic_free_cpu()
415 if (!hv_context.hypercall_page) in hv_synic_init()
424 simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu]) in hv_synic_init()
432 siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu]) in hv_synic_init()
453 hv_context.synic_initialized = true; in hv_synic_init()
461 hv_context.vp_index[cpu] = (u32)vp_index; in hv_synic_init()
463 INIT_LIST_HEAD(&hv_context.percpu_list[cpu]); in hv_synic_init()
469 clockevents_config_and_register(hv_context.clk_evt[cpu], in hv_synic_init()
487 clockevents_unbind_device(hv_context.clk_evt[cpu], cpu); in hv_synic_clockevents_cleanup()
501 if (!hv_context.synic_initialized) in hv_synic_cleanup()
507 hv_context.clk_evt[cpu]); in hv_synic_cleanup()