1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv04.h"
25
26#include <core/gpuobj.h>
27#include <core/option.h>
28#include <subdev/timer.h>
29
30#define NV41_GART_SIZE (512 * 1024 * 1024)
31#define NV41_GART_PAGE (  4 * 1024)
32
33/*******************************************************************************
34 * VM map/unmap callbacks
35 ******************************************************************************/
36
37static void
38nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
39	       struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
40{
41	pte = pte * 4;
42	nvkm_kmap(pgt);
43	while (cnt) {
44		u32 page = PAGE_SIZE / NV41_GART_PAGE;
45		u64 phys = (u64)*list++;
46		while (cnt && page--) {
47			nvkm_wo32(pgt, pte, (phys >> 7) | 1);
48			phys += NV41_GART_PAGE;
49			pte += 4;
50			cnt -= 1;
51		}
52	}
53	nvkm_done(pgt);
54}
55
56static void
57nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
58{
59	pte = pte * 4;
60	nvkm_kmap(pgt);
61	while (cnt--) {
62		nvkm_wo32(pgt, pte, 0x00000000);
63		pte += 4;
64	}
65	nvkm_done(pgt);
66}
67
68static void
69nv41_vm_flush(struct nvkm_vm *vm)
70{
71	struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
72	struct nvkm_device *device = mmu->base.subdev.device;
73
74	mutex_lock(&mmu->base.subdev.mutex);
75	nvkm_wr32(device, 0x100810, 0x00000022);
76	nvkm_msec(device, 2000,
77		if (nvkm_rd32(device, 0x100810) & 0x00000020)
78			break;
79	);
80	nvkm_wr32(device, 0x100810, 0x00000000);
81	mutex_unlock(&mmu->base.subdev.mutex);
82}
83
84/*******************************************************************************
85 * MMU subdev
86 ******************************************************************************/
87
88static int
89nv41_mmu_oneinit(struct nvkm_mmu *base)
90{
91	struct nv04_mmu *mmu = nv04_mmu(base);
92	struct nvkm_device *device = mmu->base.subdev.device;
93	int ret;
94
95	ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
96			     &mmu->vm);
97	if (ret)
98		return ret;
99
100	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
101			      (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
102			      &mmu->vm->pgt[0].mem[0]);
103	mmu->vm->pgt[0].refcount[0] = 1;
104	return ret;
105}
106
107static void
108nv41_mmu_init(struct nvkm_mmu *base)
109{
110	struct nv04_mmu *mmu = nv04_mmu(base);
111	struct nvkm_device *device = mmu->base.subdev.device;
112	struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
113	nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
114	nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
115	nvkm_wr32(device, 0x100820, 0x00000000);
116}
117
118static const struct nvkm_mmu_func
119nv41_mmu = {
120	.dtor = nv04_mmu_dtor,
121	.oneinit = nv41_mmu_oneinit,
122	.init = nv41_mmu_init,
123	.limit = NV41_GART_SIZE,
124	.dma_bits = 39,
125	.pgt_bits = 32 - 12,
126	.spg_shift = 12,
127	.lpg_shift = 12,
128	.map_sg = nv41_vm_map_sg,
129	.unmap = nv41_vm_unmap,
130	.flush = nv41_vm_flush,
131};
132
133int
134nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
135{
136	if (device->type == NVKM_DEVICE_AGP ||
137	    !nvkm_boolopt(device->cfgopt, "NvPCIE", true))
138		return nv04_mmu_new(device, index, pmmu);
139
140	return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
141}
142