1/* 2 * Copyright (C) 2012 Red Hat 3 * 4 * based in parts on udlfb.c: 5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> 6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> 7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License v2. See the file COPYING in the main directory of this archive for 11 * more details. 12 */ 13#include <drm/drmP.h> 14#include "udl_drv.h" 15 16/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ 17#define BULK_SIZE 512 18 19#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE) 20#define WRITES_IN_FLIGHT (4) 21#define MAX_VENDOR_DESCRIPTOR_SIZE 256 22 23#define GET_URB_TIMEOUT HZ 24#define FREE_URB_TIMEOUT (HZ*2) 25 26static int udl_parse_vendor_descriptor(struct drm_device *dev, 27 struct usb_device *usbdev) 28{ 29 struct udl_device *udl = dev->dev_private; 30 char *desc; 31 char *buf; 32 char *desc_end; 33 34 u8 total_len = 0; 35 36 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); 37 if (!buf) 38 return false; 39 desc = buf; 40 41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ 42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 43 if (total_len > 5) { 44 DRM_INFO("vendor descriptor length:%x data:%11ph\n", 45 total_len, desc); 46 47 if ((desc[0] != total_len) || /* descriptor length */ 48 (desc[1] != 0x5f) || /* vendor descriptor type */ 49 (desc[2] != 0x01) || /* version (2 bytes) */ 50 (desc[3] != 0x00) || 51 (desc[4] != total_len - 2)) /* length after type */ 52 goto unrecognized; 53 54 desc_end = desc + total_len; 55 desc += 5; /* the fixed header we've already parsed */ 56 57 while (desc < desc_end) { 58 u8 length; 59 u16 key; 60 61 key = le16_to_cpu(*((u16 *) desc)); 62 desc += sizeof(u16); 63 length = *desc; 64 desc++; 65 66 switch (key) { 67 case 0x0200: { /* max_area */ 68 u32 max_area; 69 max_area = le32_to_cpu(*((u32 *)desc)); 70 DRM_DEBUG("DL chip limited to %d pixel modes\n", 71 max_area); 72 udl->sku_pixel_limit = max_area; 73 break; 74 } 75 default: 76 break; 77 } 78 desc += length; 79 } 80 } 81 82 goto success; 83 84unrecognized: 85 /* allow udlfb to load for now even if firmware unrecognized */ 86 DRM_ERROR("Unrecognized vendor firmware descriptor\n"); 87 88success: 89 kfree(buf); 90 return true; 91} 92 93static void udl_release_urb_work(struct work_struct *work) 94{ 95 struct urb_node *unode = container_of(work, struct urb_node, 96 release_urb_work.work); 97 98 up(&unode->dev->urbs.limit_sem); 99} 100 101void udl_urb_completion(struct urb *urb) 102{ 103 struct urb_node *unode = urb->context; 104 struct udl_device *udl = unode->dev; 105 unsigned long flags; 106 107 /* sync/async unlink faults aren't errors */ 108 if (urb->status) { 109 if (!(urb->status == -ENOENT || 110 urb->status == -ECONNRESET || 111 urb->status == -ESHUTDOWN)) { 112 DRM_ERROR("%s - nonzero write bulk status received: %d\n", 113 __func__, urb->status); 114 atomic_set(&udl->lost_pixels, 1); 115 } 116 } 117 118 urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */ 119 120 spin_lock_irqsave(&udl->urbs.lock, flags); 121 list_add_tail(&unode->entry, &udl->urbs.list); 122 udl->urbs.available++; 123 spin_unlock_irqrestore(&udl->urbs.lock, flags); 124 125#if 0 126 /* 127 * When using fb_defio, we deadlock if up() is called 128 * while another is waiting. So queue to another process. 129 */ 130 if (fb_defio) 131 schedule_delayed_work(&unode->release_urb_work, 0); 132 else 133#endif 134 up(&udl->urbs.limit_sem); 135} 136 137static void udl_free_urb_list(struct drm_device *dev) 138{ 139 struct udl_device *udl = dev->dev_private; 140 int count = udl->urbs.count; 141 struct list_head *node; 142 struct urb_node *unode; 143 struct urb *urb; 144 int ret; 145 unsigned long flags; 146 147 DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); 148 149 /* keep waiting and freeing, until we've got 'em all */ 150 while (count--) { 151 152 /* Getting interrupted means a leak, but ok at shutdown*/ 153 ret = down_interruptible(&udl->urbs.limit_sem); 154 if (ret) 155 break; 156 157 spin_lock_irqsave(&udl->urbs.lock, flags); 158 159 node = udl->urbs.list.next; /* have reserved one with sem */ 160 list_del_init(node); 161 162 spin_unlock_irqrestore(&udl->urbs.lock, flags); 163 164 unode = list_entry(node, struct urb_node, entry); 165 urb = unode->urb; 166 167 /* Free each separately allocated piece */ 168 usb_free_coherent(urb->dev, udl->urbs.size, 169 urb->transfer_buffer, urb->transfer_dma); 170 usb_free_urb(urb); 171 kfree(node); 172 } 173 udl->urbs.count = 0; 174} 175 176static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) 177{ 178 struct udl_device *udl = dev->dev_private; 179 int i = 0; 180 struct urb *urb; 181 struct urb_node *unode; 182 char *buf; 183 184 spin_lock_init(&udl->urbs.lock); 185 186 udl->urbs.size = size; 187 INIT_LIST_HEAD(&udl->urbs.list); 188 189 while (i < count) { 190 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); 191 if (!unode) 192 break; 193 unode->dev = udl; 194 195 INIT_DELAYED_WORK(&unode->release_urb_work, 196 udl_release_urb_work); 197 198 urb = usb_alloc_urb(0, GFP_KERNEL); 199 if (!urb) { 200 kfree(unode); 201 break; 202 } 203 unode->urb = urb; 204 205 buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL, 206 &urb->transfer_dma); 207 if (!buf) { 208 kfree(unode); 209 usb_free_urb(urb); 210 break; 211 } 212 213 /* urb->transfer_buffer_length set to actual before submit */ 214 usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1), 215 buf, size, udl_urb_completion, unode); 216 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 217 218 list_add_tail(&unode->entry, &udl->urbs.list); 219 220 i++; 221 } 222 223 sema_init(&udl->urbs.limit_sem, i); 224 udl->urbs.count = i; 225 udl->urbs.available = i; 226 227 DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size); 228 229 return i; 230} 231 232struct urb *udl_get_urb(struct drm_device *dev) 233{ 234 struct udl_device *udl = dev->dev_private; 235 int ret = 0; 236 struct list_head *entry; 237 struct urb_node *unode; 238 struct urb *urb = NULL; 239 unsigned long flags; 240 241 /* Wait for an in-flight buffer to complete and get re-queued */ 242 ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT); 243 if (ret) { 244 atomic_set(&udl->lost_pixels, 1); 245 DRM_INFO("wait for urb interrupted: %x available: %d\n", 246 ret, udl->urbs.available); 247 goto error; 248 } 249 250 spin_lock_irqsave(&udl->urbs.lock, flags); 251 252 BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */ 253 entry = udl->urbs.list.next; 254 list_del_init(entry); 255 udl->urbs.available--; 256 257 spin_unlock_irqrestore(&udl->urbs.lock, flags); 258 259 unode = list_entry(entry, struct urb_node, entry); 260 urb = unode->urb; 261 262error: 263 return urb; 264} 265 266int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) 267{ 268 struct udl_device *udl = dev->dev_private; 269 int ret; 270 271 BUG_ON(len > udl->urbs.size); 272 273 urb->transfer_buffer_length = len; /* set to actual payload len */ 274 ret = usb_submit_urb(urb, GFP_ATOMIC); 275 if (ret) { 276 udl_urb_completion(urb); /* because no one else will */ 277 atomic_set(&udl->lost_pixels, 1); 278 DRM_ERROR("usb_submit_urb error %x\n", ret); 279 } 280 return ret; 281} 282 283int udl_driver_load(struct drm_device *dev, unsigned long flags) 284{ 285 struct usb_device *udev = (void*)flags; 286 struct udl_device *udl; 287 int ret = -ENOMEM; 288 289 DRM_DEBUG("\n"); 290 udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL); 291 if (!udl) 292 return -ENOMEM; 293 294 udl->udev = udev; 295 udl->ddev = dev; 296 dev->dev_private = udl; 297 298 if (!udl_parse_vendor_descriptor(dev, udl->udev)) { 299 ret = -ENODEV; 300 DRM_ERROR("firmware not recognized. Assume incompatible device\n"); 301 goto err; 302 } 303 304 if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { 305 DRM_ERROR("udl_alloc_urb_list failed\n"); 306 goto err; 307 } 308 309 DRM_DEBUG("\n"); 310 ret = udl_modeset_init(dev); 311 if (ret) 312 goto err; 313 314 ret = udl_fbdev_init(dev); 315 if (ret) 316 goto err; 317 318 ret = drm_vblank_init(dev, 1); 319 if (ret) 320 goto err_fb; 321 322 return 0; 323err_fb: 324 udl_fbdev_cleanup(dev); 325err: 326 if (udl->urbs.count) 327 udl_free_urb_list(dev); 328 kfree(udl); 329 DRM_ERROR("%d\n", ret); 330 return ret; 331} 332 333int udl_drop_usb(struct drm_device *dev) 334{ 335 udl_free_urb_list(dev); 336 return 0; 337} 338 339int udl_driver_unload(struct drm_device *dev) 340{ 341 struct udl_device *udl = dev->dev_private; 342 343 drm_vblank_cleanup(dev); 344 345 if (udl->urbs.count) 346 udl_free_urb_list(dev); 347 348 udl_fbdev_cleanup(dev); 349 udl_modeset_cleanup(dev); 350 kfree(udl); 351 return 0; 352} 353