This source file includes following definitions.
- hwsq_cmd
- nvkm_hwsq_init
- nvkm_hwsq_fini
- nvkm_hwsq_wr32
- nvkm_hwsq_setf
- nvkm_hwsq_wait
- nvkm_hwsq_wait_vblank
- nvkm_hwsq_nsec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include "priv.h"
25
26 struct nvkm_hwsq {
27 struct nvkm_subdev *subdev;
28 u32 addr;
29 u32 data;
30 struct {
31 u8 data[512];
32 u16 size;
33 } c;
34 };
35
36 static void
37 hwsq_cmd(struct nvkm_hwsq *hwsq, int size, u8 data[])
38 {
39 memcpy(&hwsq->c.data[hwsq->c.size], data, size * sizeof(data[0]));
40 hwsq->c.size += size;
41 }
42
43 int
44 nvkm_hwsq_init(struct nvkm_subdev *subdev, struct nvkm_hwsq **phwsq)
45 {
46 struct nvkm_hwsq *hwsq;
47
48 hwsq = *phwsq = kmalloc(sizeof(*hwsq), GFP_KERNEL);
49 if (hwsq) {
50 hwsq->subdev = subdev;
51 hwsq->addr = ~0;
52 hwsq->data = ~0;
53 memset(hwsq->c.data, 0x7f, sizeof(hwsq->c.data));
54 hwsq->c.size = 0;
55 }
56
57 return hwsq ? 0 : -ENOMEM;
58 }
59
60 int
61 nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec)
62 {
63 struct nvkm_hwsq *hwsq = *phwsq;
64 int ret = 0, i;
65 if (hwsq) {
66 struct nvkm_subdev *subdev = hwsq->subdev;
67 struct nvkm_bus *bus = subdev->device->bus;
68 hwsq->c.size = (hwsq->c.size + 4) / 4;
69 if (hwsq->c.size <= bus->func->hwsq_size) {
70 if (exec)
71 ret = bus->func->hwsq_exec(bus,
72 (u32 *)hwsq->c.data,
73 hwsq->c.size);
74 if (ret)
75 nvkm_error(subdev, "hwsq exec failed: %d\n", ret);
76 } else {
77 nvkm_error(subdev, "hwsq ucode too large\n");
78 ret = -ENOSPC;
79 }
80
81 for (i = 0; ret && i < hwsq->c.size; i++)
82 nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]);
83
84 *phwsq = NULL;
85 kfree(hwsq);
86 }
87 return ret;
88 }
89
90 void
91 nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data)
92 {
93 nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data);
94
95 if (hwsq->data != data) {
96 if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) {
97 hwsq_cmd(hwsq, 5, (u8[]){ 0xe2, data, data >> 8,
98 data >> 16, data >> 24 });
99 } else {
100 hwsq_cmd(hwsq, 3, (u8[]){ 0x42, data, data >> 8 });
101 }
102 }
103
104 if ((addr & 0xffff0000) != (hwsq->addr & 0xffff0000)) {
105 hwsq_cmd(hwsq, 5, (u8[]){ 0xe0, addr, addr >> 8,
106 addr >> 16, addr >> 24 });
107 } else {
108 hwsq_cmd(hwsq, 3, (u8[]){ 0x40, addr, addr >> 8 });
109 }
110
111 hwsq->addr = addr;
112 hwsq->data = data;
113 }
114
115 void
116 nvkm_hwsq_setf(struct nvkm_hwsq *hwsq, u8 flag, int data)
117 {
118 nvkm_debug(hwsq->subdev, " FLAG[%02x] = %d\n", flag, data);
119 flag += 0x80;
120 if (data >= 0)
121 flag += 0x20;
122 if (data >= 1)
123 flag += 0x20;
124 hwsq_cmd(hwsq, 1, (u8[]){ flag });
125 }
126
127 void
128 nvkm_hwsq_wait(struct nvkm_hwsq *hwsq, u8 flag, u8 data)
129 {
130 nvkm_debug(hwsq->subdev, " WAIT[%02x] = %d\n", flag, data);
131 hwsq_cmd(hwsq, 3, (u8[]){ 0x5f, flag, data });
132 }
133
134 void
135 nvkm_hwsq_wait_vblank(struct nvkm_hwsq *hwsq)
136 {
137 struct nvkm_subdev *subdev = hwsq->subdev;
138 struct nvkm_device *device = subdev->device;
139 u32 heads, x, y, px = 0;
140 int i, head_sync;
141
142 heads = nvkm_rd32(device, 0x610050);
143 for (i = 0; i < 2; i++) {
144
145 if (heads & (2 << (i << 3))) {
146 x = nvkm_rd32(device, 0x610b40 + (0x540 * i));
147 y = (x & 0xffff0000) >> 16;
148 x &= 0x0000ffff;
149 if ((x * y) > px) {
150 px = (x * y);
151 head_sync = i;
152 }
153 }
154 }
155
156 if (px == 0) {
157 nvkm_debug(subdev, "WAIT VBLANK !NO ACTIVE HEAD\n");
158 return;
159 }
160
161 nvkm_debug(subdev, "WAIT VBLANK HEAD%d\n", head_sync);
162 nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x0);
163 nvkm_hwsq_wait(hwsq, head_sync ? 0x3 : 0x1, 0x1);
164 }
165
166 void
167 nvkm_hwsq_nsec(struct nvkm_hwsq *hwsq, u32 nsec)
168 {
169 u8 shift = 0, usec = nsec / 1000;
170 while (usec & ~3) {
171 usec >>= 2;
172 shift++;
173 }
174
175 nvkm_debug(hwsq->subdev, " DELAY = %d ns\n", nsec);
176 hwsq_cmd(hwsq, 1, (u8[]){ 0x00 | (shift << 2) | usec });
177 }