dtl 797 arch/powerpc/include/asm/kvm_host.h struct kvmppc_vpa dtl; dtl 193 arch/powerpc/kernel/time.c struct dtl_entry *dtl = local_paca->dtl_curr; dtl 200 arch/powerpc/kernel/time.c if (!dtl) dtl 206 arch/powerpc/kernel/time.c dtb = be64_to_cpu(dtl->timebase); dtl 207 arch/powerpc/kernel/time.c tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + dtl 208 arch/powerpc/kernel/time.c be32_to_cpu(dtl->ready_to_enqueue_time); dtl 213 arch/powerpc/kernel/time.c dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); dtl 219 arch/powerpc/kernel/time.c dtl_consumer(dtl, i); dtl 222 arch/powerpc/kernel/time.c ++dtl; dtl 223 arch/powerpc/kernel/time.c if (dtl == dtl_end) dtl 224 arch/powerpc/kernel/time.c dtl = local_paca->dispatch_log; dtl 227 arch/powerpc/kernel/time.c local_paca->dtl_curr = dtl; dtl 560 arch/powerpc/kvm/book3s_hv.c vpap = &tvcpu->arch.dtl; dtl 577 arch/powerpc/kvm/book3s_hv.c if (vpa_is_registered(&tvcpu->arch.dtl) || dtl 586 arch/powerpc/kvm/book3s_hv.c vpap = &tvcpu->arch.dtl; dtl 661 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl.update_pending)) dtl 670 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.dtl.update_pending) { dtl 671 arch/powerpc/kvm/book3s_hv.c kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); dtl 672 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; dtl 728 arch/powerpc/kvm/book3s_hv.c if (dt == vcpu->arch.dtl.pinned_end) dtl 729 arch/powerpc/kvm/book3s_hv.c dt = vcpu->arch.dtl.pinned_addr; dtl 734 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl.dirty = true; dtl 1742 arch/powerpc/kvm/book3s_hv.c val->vpaval.addr = vcpu->arch.dtl.next_gpa; dtl 1743 arch/powerpc/kvm/book3s_hv.c val->vpaval.length = vcpu->arch.dtl.len; dtl 1950 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl.next_gpa)) dtl 1970 arch/powerpc/kvm/book3s_hv.c r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); dtl 2429 arch/powerpc/kvm/book3s_hv.c unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); dtl 2856 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl.update_pending) dtl 4444 arch/powerpc/kvm/book3s_hv.c kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); dtl 28 arch/powerpc/platforms/pseries/dtl.c static DEFINE_PER_CPU(struct dtl, cpu_dtl); dtl 81 arch/powerpc/platforms/pseries/dtl.c static int dtl_start(struct dtl *dtl) dtl 83 arch/powerpc/platforms/pseries/dtl.c struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); dtl 85 arch/powerpc/platforms/pseries/dtl.c dtlr->buf = dtl->buf; dtl 86 arch/powerpc/platforms/pseries/dtl.c dtlr->buf_end = dtl->buf + dtl->buf_entries; dtl 91 arch/powerpc/platforms/pseries/dtl.c dtlr->write_ptr = dtl->buf; dtl 94 arch/powerpc/platforms/pseries/dtl.c lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask; dtl 101 arch/powerpc/platforms/pseries/dtl.c static void dtl_stop(struct dtl *dtl) dtl 103 arch/powerpc/platforms/pseries/dtl.c struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); dtl 111 arch/powerpc/platforms/pseries/dtl.c lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT; dtl 117 arch/powerpc/platforms/pseries/dtl.c static u64 dtl_current_index(struct dtl *dtl) dtl 119 arch/powerpc/platforms/pseries/dtl.c return per_cpu(dtl_rings, dtl->cpu).write_index; dtl 124 arch/powerpc/platforms/pseries/dtl.c static int dtl_start(struct dtl *dtl) dtl 131 arch/powerpc/platforms/pseries/dtl.c ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES); dtl 133 arch/powerpc/platforms/pseries/dtl.c hwcpu = get_hard_smp_processor_id(dtl->cpu); dtl 134 arch/powerpc/platforms/pseries/dtl.c addr = __pa(dtl->buf); dtl 138 arch/powerpc/platforms/pseries/dtl.c "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); dtl 143 arch/powerpc/platforms/pseries/dtl.c lppaca_of(dtl->cpu).dtl_idx = 0; dtl 150 arch/powerpc/platforms/pseries/dtl.c lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask; dtl 155 arch/powerpc/platforms/pseries/dtl.c static void dtl_stop(struct dtl *dtl) dtl 157 arch/powerpc/platforms/pseries/dtl.c int hwcpu = get_hard_smp_processor_id(dtl->cpu); dtl 159 arch/powerpc/platforms/pseries/dtl.c lppaca_of(dtl->cpu).dtl_enable_mask = 0x0; dtl 164 arch/powerpc/platforms/pseries/dtl.c static u64 dtl_current_index(struct dtl *dtl) dtl 166 arch/powerpc/platforms/pseries/dtl.c return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx); dtl 170 arch/powerpc/platforms/pseries/dtl.c static int dtl_enable(struct dtl *dtl) dtl 180 arch/powerpc/platforms/pseries/dtl.c if (dtl->buf) dtl 188 arch/powerpc/platforms/pseries/dtl.c buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu)); dtl 191 arch/powerpc/platforms/pseries/dtl.c __func__, dtl->cpu); dtl 196 arch/powerpc/platforms/pseries/dtl.c spin_lock(&dtl->lock); dtl 198 arch/powerpc/platforms/pseries/dtl.c if (!dtl->buf) { dtl 200 arch/powerpc/platforms/pseries/dtl.c dtl->buf_entries = n_entries; dtl 201 arch/powerpc/platforms/pseries/dtl.c dtl->buf = buf; dtl 202 arch/powerpc/platforms/pseries/dtl.c dtl->last_idx = 0; dtl 203 arch/powerpc/platforms/pseries/dtl.c rc = dtl_start(dtl); dtl 205 arch/powerpc/platforms/pseries/dtl.c dtl->buf = NULL; dtl 207 arch/powerpc/platforms/pseries/dtl.c spin_unlock(&dtl->lock); dtl 217 arch/powerpc/platforms/pseries/dtl.c static void dtl_disable(struct dtl *dtl) dtl 219 arch/powerpc/platforms/pseries/dtl.c spin_lock(&dtl->lock); dtl 220 arch/powerpc/platforms/pseries/dtl.c dtl_stop(dtl); dtl 221 arch/powerpc/platforms/pseries/dtl.c kmem_cache_free(dtl_cache, dtl->buf); dtl 222 arch/powerpc/platforms/pseries/dtl.c dtl->buf = NULL; dtl 223 arch/powerpc/platforms/pseries/dtl.c dtl->buf_entries = 0; dtl 224 arch/powerpc/platforms/pseries/dtl.c spin_unlock(&dtl->lock); dtl 232 arch/powerpc/platforms/pseries/dtl.c struct dtl *dtl = inode->i_private; dtl 235 arch/powerpc/platforms/pseries/dtl.c rc = dtl_enable(dtl); dtl 239 arch/powerpc/platforms/pseries/dtl.c filp->private_data = dtl; dtl 245 arch/powerpc/platforms/pseries/dtl.c struct dtl *dtl = inode->i_private; dtl 246 arch/powerpc/platforms/pseries/dtl.c dtl_disable(dtl); dtl 254 arch/powerpc/platforms/pseries/dtl.c struct dtl *dtl; dtl 260 arch/powerpc/platforms/pseries/dtl.c dtl = filp->private_data; dtl 268 arch/powerpc/platforms/pseries/dtl.c spin_lock(&dtl->lock); dtl 270 arch/powerpc/platforms/pseries/dtl.c cur_idx = dtl_current_index(dtl); dtl 271 arch/powerpc/platforms/pseries/dtl.c last_idx = dtl->last_idx; dtl 273 arch/powerpc/platforms/pseries/dtl.c if (last_idx + dtl->buf_entries <= cur_idx) dtl 274 arch/powerpc/platforms/pseries/dtl.c last_idx = cur_idx - dtl->buf_entries + 1; dtl 280 arch/powerpc/platforms/pseries/dtl.c dtl->last_idx = last_idx + n_req; dtl 282 arch/powerpc/platforms/pseries/dtl.c spin_unlock(&dtl->lock); dtl 287 arch/powerpc/platforms/pseries/dtl.c i = last_idx % dtl->buf_entries; dtl 290 arch/powerpc/platforms/pseries/dtl.c if (i + n_req > dtl->buf_entries) { dtl 291 arch/powerpc/platforms/pseries/dtl.c read_size = dtl->buf_entries - i; dtl 293 arch/powerpc/platforms/pseries/dtl.c rc = copy_to_user(buf, &dtl->buf[i], dtl 305 arch/powerpc/platforms/pseries/dtl.c rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry)); dtl 323 arch/powerpc/platforms/pseries/dtl.c static int dtl_setup_file(struct dtl *dtl) dtl 327 arch/powerpc/platforms/pseries/dtl.c sprintf(name, "cpu-%d", dtl->cpu); dtl 329 arch/powerpc/platforms/pseries/dtl.c dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops); dtl 330 arch/powerpc/platforms/pseries/dtl.c if (!dtl->file) dtl 366 arch/powerpc/platforms/pseries/dtl.c struct dtl *dtl = &per_cpu(cpu_dtl, i); dtl 367 arch/powerpc/platforms/pseries/dtl.c spin_lock_init(&dtl->lock); dtl 368 arch/powerpc/platforms/pseries/dtl.c dtl->cpu = i; dtl 370 arch/powerpc/platforms/pseries/dtl.c rc = dtl_setup_file(dtl); dtl 85 arch/powerpc/platforms/pseries/lpar.c struct dtl_entry *dtl; dtl 91 arch/powerpc/platforms/pseries/lpar.c dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); dtl 92 arch/powerpc/platforms/pseries/lpar.c if (!dtl) { dtl 102 arch/powerpc/platforms/pseries/lpar.c pp->dispatch_log = dtl; dtl 103 arch/powerpc/platforms/pseries/lpar.c pp->dispatch_log_end = dtl + N_DISPATCH_LOG; dtl 104 arch/powerpc/platforms/pseries/lpar.c pp->dtl_curr = dtl; dtl 117 arch/powerpc/platforms/pseries/lpar.c struct dtl_entry *dtl; dtl 121 arch/powerpc/platforms/pseries/lpar.c dtl = pp->dispatch_log; dtl 122 arch/powerpc/platforms/pseries/lpar.c if (dtl && dtl_mask) { dtl 124 arch/powerpc/platforms/pseries/lpar.c pp->dtl_curr = dtl; dtl 128 arch/powerpc/platforms/pseries/lpar.c dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); dtl 129 arch/powerpc/platforms/pseries/lpar.c ret = register_dtl(hwcpu, __pa(dtl)); dtl 360 arch/powerpc/platforms/pseries/lpar.c struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); dtl 379 arch/powerpc/platforms/pseries/lpar.c dtle = *dtl; dtl 387 arch/powerpc/platforms/pseries/lpar.c dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); dtl 392 arch/powerpc/platforms/pseries/lpar.c ++dtl; dtl 393 arch/powerpc/platforms/pseries/lpar.c if (dtl == dtl_end) dtl 394 arch/powerpc/platforms/pseries/lpar.c dtl = local_paca->dispatch_log;