1/* 2 * udlfb.c -- Framebuffer driver for DisplayLink USB controller 3 * 4 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> 5 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> 6 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License v2. See the file COPYING in the main directory of this archive for 10 * more details. 11 * 12 * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven, 13 * usb-skeleton by GregKH. 14 * 15 * Device-specific portions based on information from Displaylink, with work 16 * from Florian Echtler, Henrik Bjerregaard Pedersen, and others. 17 */ 18 19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21#include <linux/module.h> 22#include <linux/kernel.h> 23#include <linux/init.h> 24#include <linux/usb.h> 25#include <linux/uaccess.h> 26#include <linux/mm.h> 27#include <linux/fb.h> 28#include <linux/vmalloc.h> 29#include <linux/slab.h> 30#include <linux/prefetch.h> 31#include <linux/delay.h> 32#include <video/udlfb.h> 33#include "edid.h" 34 35static struct fb_fix_screeninfo dlfb_fix = { 36 .id = "udlfb", 37 .type = FB_TYPE_PACKED_PIXELS, 38 .visual = FB_VISUAL_TRUECOLOR, 39 .xpanstep = 0, 40 .ypanstep = 0, 41 .ywrapstep = 0, 42 .accel = FB_ACCEL_NONE, 43}; 44 45static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST | 46 FBINFO_VIRTFB | 47 FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT | 48 FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR; 49 50/* 51 * There are many DisplayLink-based graphics products, all with unique PIDs. 52 * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff) 53 * We also require a match on SubClass (0x00) and Protocol (0x00), 54 * which is compatible with all known USB 2.0 era graphics chips and firmware, 55 * but allows DisplayLink to increment those for any future incompatible chips 56 */ 57static struct usb_device_id id_table[] = { 58 {.idVendor = 0x17e9, 59 .bInterfaceClass = 0xff, 60 .bInterfaceSubClass = 0x00, 61 .bInterfaceProtocol = 0x00, 62 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | 63 USB_DEVICE_ID_MATCH_INT_CLASS | 64 USB_DEVICE_ID_MATCH_INT_SUBCLASS | 65 USB_DEVICE_ID_MATCH_INT_PROTOCOL, 66 }, 67 {}, 68}; 69MODULE_DEVICE_TABLE(usb, id_table); 70 71/* module options */ 72static bool console = 1; /* Allow fbcon to open framebuffer */ 73static bool fb_defio = 1; /* Detect mmap writes using page faults */ 74static bool shadow = 1; /* Optionally disable shadow framebuffer */ 75static int pixel_limit; /* Optionally force a pixel resolution limit */ 76 77/* dlfb keeps a list of urbs for efficient bulk transfers */ 78static void dlfb_urb_completion(struct urb *urb); 79static struct urb *dlfb_get_urb(struct dlfb_data *dev); 80static int dlfb_submit_urb(struct dlfb_data *dev, struct urb * urb, size_t len); 81static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size); 82static void dlfb_free_urb_list(struct dlfb_data *dev); 83 84/* 85 * All DisplayLink bulk operations start with 0xAF, followed by specific code 86 * All operations are written to buffers which then later get sent to device 87 */ 88static char *dlfb_set_register(char *buf, u8 reg, u8 val) 89{ 90 *buf++ = 0xAF; 91 *buf++ = 0x20; 92 *buf++ = reg; 93 *buf++ = val; 94 return buf; 95} 96 97static char *dlfb_vidreg_lock(char *buf) 98{ 99 return dlfb_set_register(buf, 0xFF, 0x00); 100} 101 102static char *dlfb_vidreg_unlock(char *buf) 103{ 104 return dlfb_set_register(buf, 0xFF, 0xFF); 105} 106 107/* 108 * Map FB_BLANK_* to DisplayLink register 109 * DLReg FB_BLANK_* 110 * ----- ----------------------------- 111 * 0x00 FB_BLANK_UNBLANK (0) 112 * 0x01 FB_BLANK (1) 113 * 0x03 FB_BLANK_VSYNC_SUSPEND (2) 114 * 0x05 FB_BLANK_HSYNC_SUSPEND (3) 115 * 0x07 FB_BLANK_POWERDOWN (4) Note: requires modeset to come back 116 */ 117static char *dlfb_blanking(char *buf, int fb_blank) 118{ 119 u8 reg; 120 121 switch (fb_blank) { 122 case FB_BLANK_POWERDOWN: 123 reg = 0x07; 124 break; 125 case FB_BLANK_HSYNC_SUSPEND: 126 reg = 0x05; 127 break; 128 case FB_BLANK_VSYNC_SUSPEND: 129 reg = 0x03; 130 break; 131 case FB_BLANK_NORMAL: 132 reg = 0x01; 133 break; 134 default: 135 reg = 0x00; 136 } 137 138 buf = dlfb_set_register(buf, 0x1F, reg); 139 140 return buf; 141} 142 143static char *dlfb_set_color_depth(char *buf, u8 selection) 144{ 145 return dlfb_set_register(buf, 0x00, selection); 146} 147 148static char *dlfb_set_base16bpp(char *wrptr, u32 base) 149{ 150 /* the base pointer is 16 bits wide, 0x20 is hi byte. */ 151 wrptr = dlfb_set_register(wrptr, 0x20, base >> 16); 152 wrptr = dlfb_set_register(wrptr, 0x21, base >> 8); 153 return dlfb_set_register(wrptr, 0x22, base); 154} 155 156/* 157 * DisplayLink HW has separate 16bpp and 8bpp framebuffers. 158 * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer 159 */ 160static char *dlfb_set_base8bpp(char *wrptr, u32 base) 161{ 162 wrptr = dlfb_set_register(wrptr, 0x26, base >> 16); 163 wrptr = dlfb_set_register(wrptr, 0x27, base >> 8); 164 return dlfb_set_register(wrptr, 0x28, base); 165} 166 167static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value) 168{ 169 wrptr = dlfb_set_register(wrptr, reg, value >> 8); 170 return dlfb_set_register(wrptr, reg+1, value); 171} 172 173/* 174 * This is kind of weird because the controller takes some 175 * register values in a different byte order than other registers. 176 */ 177static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value) 178{ 179 wrptr = dlfb_set_register(wrptr, reg, value); 180 return dlfb_set_register(wrptr, reg+1, value >> 8); 181} 182 183/* 184 * LFSR is linear feedback shift register. The reason we have this is 185 * because the display controller needs to minimize the clock depth of 186 * various counters used in the display path. So this code reverses the 187 * provided value into the lfsr16 value by counting backwards to get 188 * the value that needs to be set in the hardware comparator to get the 189 * same actual count. This makes sense once you read above a couple of 190 * times and think about it from a hardware perspective. 191 */ 192static u16 dlfb_lfsr16(u16 actual_count) 193{ 194 u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */ 195 196 while (actual_count--) { 197 lv = ((lv << 1) | 198 (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1)) 199 & 0xFFFF; 200 } 201 202 return (u16) lv; 203} 204 205/* 206 * This does LFSR conversion on the value that is to be written. 207 * See LFSR explanation above for more detail. 208 */ 209static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value) 210{ 211 return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value)); 212} 213 214/* 215 * This takes a standard fbdev screeninfo struct and all of its monitor mode 216 * details and converts them into the DisplayLink equivalent register commands. 217 */ 218static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var) 219{ 220 u16 xds, yds; 221 u16 xde, yde; 222 u16 yec; 223 224 /* x display start */ 225 xds = var->left_margin + var->hsync_len; 226 wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds); 227 /* x display end */ 228 xde = xds + var->xres; 229 wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde); 230 231 /* y display start */ 232 yds = var->upper_margin + var->vsync_len; 233 wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds); 234 /* y display end */ 235 yde = yds + var->yres; 236 wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde); 237 238 /* x end count is active + blanking - 1 */ 239 wrptr = dlfb_set_register_lfsr16(wrptr, 0x09, 240 xde + var->right_margin - 1); 241 242 /* libdlo hardcodes hsync start to 1 */ 243 wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1); 244 245 /* hsync end is width of sync pulse + 1 */ 246 wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1); 247 248 /* hpixels is active pixels */ 249 wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres); 250 251 /* yendcount is vertical active + vertical blanking */ 252 yec = var->yres + var->upper_margin + var->lower_margin + 253 var->vsync_len; 254 wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec); 255 256 /* libdlo hardcodes vsync start to 0 */ 257 wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0); 258 259 /* vsync end is width of vsync pulse */ 260 wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len); 261 262 /* vpixels is active pixels */ 263 wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres); 264 265 /* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */ 266 wrptr = dlfb_set_register_16be(wrptr, 0x1B, 267 200*1000*1000/var->pixclock); 268 269 return wrptr; 270} 271 272/* 273 * This takes a standard fbdev screeninfo struct that was fetched or prepared 274 * and then generates the appropriate command sequence that then drives the 275 * display controller. 276 */ 277static int dlfb_set_video_mode(struct dlfb_data *dev, 278 struct fb_var_screeninfo *var) 279{ 280 char *buf; 281 char *wrptr; 282 int retval; 283 int writesize; 284 struct urb *urb; 285 286 if (!atomic_read(&dev->usb_active)) 287 return -EPERM; 288 289 urb = dlfb_get_urb(dev); 290 if (!urb) 291 return -ENOMEM; 292 293 buf = (char *) urb->transfer_buffer; 294 295 /* 296 * This first section has to do with setting the base address on the 297 * controller * associated with the display. There are 2 base 298 * pointers, currently, we only * use the 16 bpp segment. 299 */ 300 wrptr = dlfb_vidreg_lock(buf); 301 wrptr = dlfb_set_color_depth(wrptr, 0x00); 302 /* set base for 16bpp segment to 0 */ 303 wrptr = dlfb_set_base16bpp(wrptr, 0); 304 /* set base for 8bpp segment to end of fb */ 305 wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len); 306 307 wrptr = dlfb_set_vid_cmds(wrptr, var); 308 wrptr = dlfb_blanking(wrptr, FB_BLANK_UNBLANK); 309 wrptr = dlfb_vidreg_unlock(wrptr); 310 311 writesize = wrptr - buf; 312 313 retval = dlfb_submit_urb(dev, urb, writesize); 314 315 dev->blank_mode = FB_BLANK_UNBLANK; 316 317 return retval; 318} 319 320static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma) 321{ 322 unsigned long start = vma->vm_start; 323 unsigned long size = vma->vm_end - vma->vm_start; 324 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 325 unsigned long page, pos; 326 327 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 328 return -EINVAL; 329 if (size > info->fix.smem_len) 330 return -EINVAL; 331 if (offset > info->fix.smem_len - size) 332 return -EINVAL; 333 334 pos = (unsigned long)info->fix.smem_start + offset; 335 336 pr_notice("mmap() framebuffer addr:%lu size:%lu\n", 337 pos, size); 338 339 while (size > 0) { 340 page = vmalloc_to_pfn((void *)pos); 341 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) 342 return -EAGAIN; 343 344 start += PAGE_SIZE; 345 pos += PAGE_SIZE; 346 if (size > PAGE_SIZE) 347 size -= PAGE_SIZE; 348 else 349 size = 0; 350 } 351 352 return 0; 353} 354 355/* 356 * Trims identical data from front and back of line 357 * Sets new front buffer address and width 358 * And returns byte count of identical pixels 359 * Assumes CPU natural alignment (unsigned long) 360 * for back and front buffer ptrs and width 361 */ 362static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes) 363{ 364 int j, k; 365 const unsigned long *back = (const unsigned long *) bback; 366 const unsigned long *front = (const unsigned long *) *bfront; 367 const int width = *width_bytes / sizeof(unsigned long); 368 int identical = width; 369 int start = width; 370 int end = width; 371 372 prefetch((void *) front); 373 prefetch((void *) back); 374 375 for (j = 0; j < width; j++) { 376 if (back[j] != front[j]) { 377 start = j; 378 break; 379 } 380 } 381 382 for (k = width - 1; k > j; k--) { 383 if (back[k] != front[k]) { 384 end = k+1; 385 break; 386 } 387 } 388 389 identical = start + (width - end); 390 *bfront = (u8 *) &front[start]; 391 *width_bytes = (end - start) * sizeof(unsigned long); 392 393 return identical * sizeof(unsigned long); 394} 395 396/* 397 * Render a command stream for an encoded horizontal line segment of pixels. 398 * 399 * A command buffer holds several commands. 400 * It always begins with a fresh command header 401 * (the protocol doesn't require this, but we enforce it to allow 402 * multiple buffers to be potentially encoded and sent in parallel). 403 * A single command encodes one contiguous horizontal line of pixels 404 * 405 * The function relies on the client to do all allocation, so that 406 * rendering can be done directly to output buffers (e.g. USB URBs). 407 * The function fills the supplied command buffer, providing information 408 * on where it left off, so the client may call in again with additional 409 * buffers if the line will take several buffers to complete. 410 * 411 * A single command can transmit a maximum of 256 pixels, 412 * regardless of the compression ratio (protocol design limit). 413 * To the hardware, 0 for a size byte means 256 414 * 415 * Rather than 256 pixel commands which are either rl or raw encoded, 416 * the rlx command simply assumes alternating raw and rl spans within one cmd. 417 * This has a slightly larger header overhead, but produces more even results. 418 * It also processes all data (read and write) in a single pass. 419 * Performance benchmarks of common cases show it having just slightly better 420 * compression than 256 pixel raw or rle commands, with similar CPU consumpion. 421 * But for very rl friendly data, will compress not quite as well. 422 */ 423static void dlfb_compress_hline( 424 const uint16_t **pixel_start_ptr, 425 const uint16_t *const pixel_end, 426 uint32_t *device_address_ptr, 427 uint8_t **command_buffer_ptr, 428 const uint8_t *const cmd_buffer_end) 429{ 430 const uint16_t *pixel = *pixel_start_ptr; 431 uint32_t dev_addr = *device_address_ptr; 432 uint8_t *cmd = *command_buffer_ptr; 433 const int bpp = 2; 434 435 while ((pixel_end > pixel) && 436 (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) { 437 uint8_t *raw_pixels_count_byte = NULL; 438 uint8_t *cmd_pixels_count_byte = NULL; 439 const uint16_t *raw_pixel_start = NULL; 440 const uint16_t *cmd_pixel_start, *cmd_pixel_end = NULL; 441 442 prefetchw((void *) cmd); /* pull in one cache line at least */ 443 444 *cmd++ = 0xAF; 445 *cmd++ = 0x6B; 446 *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF); 447 *cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF); 448 *cmd++ = (uint8_t) ((dev_addr) & 0xFF); 449 450 cmd_pixels_count_byte = cmd++; /* we'll know this later */ 451 cmd_pixel_start = pixel; 452 453 raw_pixels_count_byte = cmd++; /* we'll know this later */ 454 raw_pixel_start = pixel; 455 456 cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1, 457 min((int)(pixel_end - pixel), 458 (int)(cmd_buffer_end - cmd) / bpp)); 459 460 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); 461 462 while (pixel < cmd_pixel_end) { 463 const uint16_t * const repeating_pixel = pixel; 464 465 *(uint16_t *)cmd = cpu_to_be16p(pixel); 466 cmd += 2; 467 pixel++; 468 469 if (unlikely((pixel < cmd_pixel_end) && 470 (*pixel == *repeating_pixel))) { 471 /* go back and fill in raw pixel count */ 472 *raw_pixels_count_byte = ((repeating_pixel - 473 raw_pixel_start) + 1) & 0xFF; 474 475 while ((pixel < cmd_pixel_end) 476 && (*pixel == *repeating_pixel)) { 477 pixel++; 478 } 479 480 /* immediately after raw data is repeat byte */ 481 *cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF; 482 483 /* Then start another raw pixel span */ 484 raw_pixel_start = pixel; 485 raw_pixels_count_byte = cmd++; 486 } 487 } 488 489 if (pixel > raw_pixel_start) { 490 /* finalize last RAW span */ 491 *raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF; 492 } 493 494 *cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF; 495 dev_addr += (pixel - cmd_pixel_start) * bpp; 496 } 497 498 if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) { 499 /* Fill leftover bytes with no-ops */ 500 if (cmd_buffer_end > cmd) 501 memset(cmd, 0xAF, cmd_buffer_end - cmd); 502 cmd = (uint8_t *) cmd_buffer_end; 503 } 504 505 *command_buffer_ptr = cmd; 506 *pixel_start_ptr = pixel; 507 *device_address_ptr = dev_addr; 508 509 return; 510} 511 512/* 513 * There are 3 copies of every pixel: The front buffer that the fbdev 514 * client renders to, the actual framebuffer across the USB bus in hardware 515 * (that we can only write to, slowly, and can never read), and (optionally) 516 * our shadow copy that tracks what's been sent to that hardware buffer. 517 */ 518static int dlfb_render_hline(struct dlfb_data *dev, struct urb **urb_ptr, 519 const char *front, char **urb_buf_ptr, 520 u32 byte_offset, u32 byte_width, 521 int *ident_ptr, int *sent_ptr) 522{ 523 const u8 *line_start, *line_end, *next_pixel; 524 u32 dev_addr = dev->base16 + byte_offset; 525 struct urb *urb = *urb_ptr; 526 u8 *cmd = *urb_buf_ptr; 527 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; 528 529 line_start = (u8 *) (front + byte_offset); 530 next_pixel = line_start; 531 line_end = next_pixel + byte_width; 532 533 if (dev->backing_buffer) { 534 int offset; 535 const u8 *back_start = (u8 *) (dev->backing_buffer 536 + byte_offset); 537 538 *ident_ptr += dlfb_trim_hline(back_start, &next_pixel, 539 &byte_width); 540 541 offset = next_pixel - line_start; 542 line_end = next_pixel + byte_width; 543 dev_addr += offset; 544 back_start += offset; 545 line_start += offset; 546 547 memcpy((char *)back_start, (char *) line_start, 548 byte_width); 549 } 550 551 while (next_pixel < line_end) { 552 553 dlfb_compress_hline((const uint16_t **) &next_pixel, 554 (const uint16_t *) line_end, &dev_addr, 555 (u8 **) &cmd, (u8 *) cmd_end); 556 557 if (cmd >= cmd_end) { 558 int len = cmd - (u8 *) urb->transfer_buffer; 559 if (dlfb_submit_urb(dev, urb, len)) 560 return 1; /* lost pixels is set */ 561 *sent_ptr += len; 562 urb = dlfb_get_urb(dev); 563 if (!urb) 564 return 1; /* lost_pixels is set */ 565 *urb_ptr = urb; 566 cmd = urb->transfer_buffer; 567 cmd_end = &cmd[urb->transfer_buffer_length]; 568 } 569 } 570 571 *urb_buf_ptr = cmd; 572 573 return 0; 574} 575 576static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y, 577 int width, int height, char *data) 578{ 579 int i, ret; 580 char *cmd; 581 cycles_t start_cycles, end_cycles; 582 int bytes_sent = 0; 583 int bytes_identical = 0; 584 struct urb *urb; 585 int aligned_x; 586 587 start_cycles = get_cycles(); 588 589 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); 590 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); 591 x = aligned_x; 592 593 if ((width <= 0) || 594 (x + width > dev->info->var.xres) || 595 (y + height > dev->info->var.yres)) 596 return -EINVAL; 597 598 if (!atomic_read(&dev->usb_active)) 599 return 0; 600 601 urb = dlfb_get_urb(dev); 602 if (!urb) 603 return 0; 604 cmd = urb->transfer_buffer; 605 606 for (i = y; i < y + height ; i++) { 607 const int line_offset = dev->info->fix.line_length * i; 608 const int byte_offset = line_offset + (x * BPP); 609 610 if (dlfb_render_hline(dev, &urb, 611 (char *) dev->info->fix.smem_start, 612 &cmd, byte_offset, width * BPP, 613 &bytes_identical, &bytes_sent)) 614 goto error; 615 } 616 617 if (cmd > (char *) urb->transfer_buffer) { 618 /* Send partial buffer remaining before exiting */ 619 int len = cmd - (char *) urb->transfer_buffer; 620 ret = dlfb_submit_urb(dev, urb, len); 621 bytes_sent += len; 622 } else 623 dlfb_urb_completion(urb); 624 625error: 626 atomic_add(bytes_sent, &dev->bytes_sent); 627 atomic_add(bytes_identical, &dev->bytes_identical); 628 atomic_add(width*height*2, &dev->bytes_rendered); 629 end_cycles = get_cycles(); 630 atomic_add(((unsigned int) ((end_cycles - start_cycles) 631 >> 10)), /* Kcycles */ 632 &dev->cpu_kcycles_used); 633 634 return 0; 635} 636 637/* 638 * Path triggered by usermode clients who write to filesystem 639 * e.g. cat filename > /dev/fb1 640 * Not used by X Windows or text-mode console. But useful for testing. 641 * Slow because of extra copy and we must assume all pixels dirty. 642 */ 643static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf, 644 size_t count, loff_t *ppos) 645{ 646 ssize_t result; 647 struct dlfb_data *dev = info->par; 648 u32 offset = (u32) *ppos; 649 650 result = fb_sys_write(info, buf, count, ppos); 651 652 if (result > 0) { 653 int start = max((int)(offset / info->fix.line_length), 0); 654 int lines = min((u32)((result / info->fix.line_length) + 1), 655 (u32)info->var.yres); 656 657 dlfb_handle_damage(dev, 0, start, info->var.xres, 658 lines, info->screen_base); 659 } 660 661 return result; 662} 663 664/* hardware has native COPY command (see libdlo), but not worth it for fbcon */ 665static void dlfb_ops_copyarea(struct fb_info *info, 666 const struct fb_copyarea *area) 667{ 668 669 struct dlfb_data *dev = info->par; 670 671 sys_copyarea(info, area); 672 673 dlfb_handle_damage(dev, area->dx, area->dy, 674 area->width, area->height, info->screen_base); 675} 676 677static void dlfb_ops_imageblit(struct fb_info *info, 678 const struct fb_image *image) 679{ 680 struct dlfb_data *dev = info->par; 681 682 sys_imageblit(info, image); 683 684 dlfb_handle_damage(dev, image->dx, image->dy, 685 image->width, image->height, info->screen_base); 686} 687 688static void dlfb_ops_fillrect(struct fb_info *info, 689 const struct fb_fillrect *rect) 690{ 691 struct dlfb_data *dev = info->par; 692 693 sys_fillrect(info, rect); 694 695 dlfb_handle_damage(dev, rect->dx, rect->dy, rect->width, 696 rect->height, info->screen_base); 697} 698 699/* 700 * NOTE: fb_defio.c is holding info->fbdefio.mutex 701 * Touching ANY framebuffer memory that triggers a page fault 702 * in fb_defio will cause a deadlock, when it also tries to 703 * grab the same mutex. 704 */ 705static void dlfb_dpy_deferred_io(struct fb_info *info, 706 struct list_head *pagelist) 707{ 708 struct page *cur; 709 struct fb_deferred_io *fbdefio = info->fbdefio; 710 struct dlfb_data *dev = info->par; 711 struct urb *urb; 712 char *cmd; 713 cycles_t start_cycles, end_cycles; 714 int bytes_sent = 0; 715 int bytes_identical = 0; 716 int bytes_rendered = 0; 717 718 if (!fb_defio) 719 return; 720 721 if (!atomic_read(&dev->usb_active)) 722 return; 723 724 start_cycles = get_cycles(); 725 726 urb = dlfb_get_urb(dev); 727 if (!urb) 728 return; 729 730 cmd = urb->transfer_buffer; 731 732 /* walk the written page list and render each to device */ 733 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 734 735 if (dlfb_render_hline(dev, &urb, (char *) info->fix.smem_start, 736 &cmd, cur->index << PAGE_SHIFT, 737 PAGE_SIZE, &bytes_identical, &bytes_sent)) 738 goto error; 739 bytes_rendered += PAGE_SIZE; 740 } 741 742 if (cmd > (char *) urb->transfer_buffer) { 743 /* Send partial buffer remaining before exiting */ 744 int len = cmd - (char *) urb->transfer_buffer; 745 dlfb_submit_urb(dev, urb, len); 746 bytes_sent += len; 747 } else 748 dlfb_urb_completion(urb); 749 750error: 751 atomic_add(bytes_sent, &dev->bytes_sent); 752 atomic_add(bytes_identical, &dev->bytes_identical); 753 atomic_add(bytes_rendered, &dev->bytes_rendered); 754 end_cycles = get_cycles(); 755 atomic_add(((unsigned int) ((end_cycles - start_cycles) 756 >> 10)), /* Kcycles */ 757 &dev->cpu_kcycles_used); 758} 759 760static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len) 761{ 762 int i; 763 int ret; 764 char *rbuf; 765 766 rbuf = kmalloc(2, GFP_KERNEL); 767 if (!rbuf) 768 return 0; 769 770 for (i = 0; i < len; i++) { 771 ret = usb_control_msg(dev->udev, 772 usb_rcvctrlpipe(dev->udev, 0), (0x02), 773 (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, 774 HZ); 775 if (ret < 1) { 776 pr_err("Read EDID byte %d failed err %x\n", i, ret); 777 i--; 778 break; 779 } 780 edid[i] = rbuf[1]; 781 } 782 783 kfree(rbuf); 784 785 return i; 786} 787 788static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd, 789 unsigned long arg) 790{ 791 792 struct dlfb_data *dev = info->par; 793 794 if (!atomic_read(&dev->usb_active)) 795 return 0; 796 797 /* TODO: Update X server to get this from sysfs instead */ 798 if (cmd == DLFB_IOCTL_RETURN_EDID) { 799 void __user *edid = (void __user *)arg; 800 if (copy_to_user(edid, dev->edid, dev->edid_size)) 801 return -EFAULT; 802 return 0; 803 } 804 805 /* TODO: Help propose a standard fb.h ioctl to report mmap damage */ 806 if (cmd == DLFB_IOCTL_REPORT_DAMAGE) { 807 struct dloarea area; 808 809 if (copy_from_user(&area, (void __user *)arg, 810 sizeof(struct dloarea))) 811 return -EFAULT; 812 813 /* 814 * If we have a damage-aware client, turn fb_defio "off" 815 * To avoid perf imact of unnecessary page fault handling. 816 * Done by resetting the delay for this fb_info to a very 817 * long period. Pages will become writable and stay that way. 818 * Reset to normal value when all clients have closed this fb. 819 */ 820 if (info->fbdefio) 821 info->fbdefio->delay = DL_DEFIO_WRITE_DISABLE; 822 823 if (area.x < 0) 824 area.x = 0; 825 826 if (area.x > info->var.xres) 827 area.x = info->var.xres; 828 829 if (area.y < 0) 830 area.y = 0; 831 832 if (area.y > info->var.yres) 833 area.y = info->var.yres; 834 835 dlfb_handle_damage(dev, area.x, area.y, area.w, area.h, 836 info->screen_base); 837 } 838 839 return 0; 840} 841 842/* taken from vesafb */ 843static int 844dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green, 845 unsigned blue, unsigned transp, struct fb_info *info) 846{ 847 int err = 0; 848 849 if (regno >= info->cmap.len) 850 return 1; 851 852 if (regno < 16) { 853 if (info->var.red.offset == 10) { 854 /* 1:5:5:5 */ 855 ((u32 *) (info->pseudo_palette))[regno] = 856 ((red & 0xf800) >> 1) | 857 ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11); 858 } else { 859 /* 0:5:6:5 */ 860 ((u32 *) (info->pseudo_palette))[regno] = 861 ((red & 0xf800)) | 862 ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); 863 } 864 } 865 866 return err; 867} 868 869/* 870 * It's common for several clients to have framebuffer open simultaneously. 871 * e.g. both fbcon and X. Makes things interesting. 872 * Assumes caller is holding info->lock (for open and release at least) 873 */ 874static int dlfb_ops_open(struct fb_info *info, int user) 875{ 876 struct dlfb_data *dev = info->par; 877 878 /* 879 * fbcon aggressively connects to first framebuffer it finds, 880 * preventing other clients (X) from working properly. Usually 881 * not what the user wants. Fail by default with option to enable. 882 */ 883 if ((user == 0) && (!console)) 884 return -EBUSY; 885 886 /* If the USB device is gone, we don't accept new opens */ 887 if (dev->virtualized) 888 return -ENODEV; 889 890 dev->fb_count++; 891 892 kref_get(&dev->kref); 893 894 if (fb_defio && (info->fbdefio == NULL)) { 895 /* enable defio at last moment if not disabled by client */ 896 897 struct fb_deferred_io *fbdefio; 898 899 fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); 900 901 if (fbdefio) { 902 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 903 fbdefio->deferred_io = dlfb_dpy_deferred_io; 904 } 905 906 info->fbdefio = fbdefio; 907 fb_deferred_io_init(info); 908 } 909 910 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", 911 info->node, user, info, dev->fb_count); 912 913 return 0; 914} 915 916/* 917 * Called when all client interfaces to start transactions have been disabled, 918 * and all references to our device instance (dlfb_data) are released. 919 * Every transaction must have a reference, so we know are fully spun down 920 */ 921static void dlfb_free(struct kref *kref) 922{ 923 struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref); 924 925 vfree(dev->backing_buffer); 926 927 kfree(dev->edid); 928 929 pr_warn("freeing dlfb_data %p\n", dev); 930 931 kfree(dev); 932} 933 934static void dlfb_release_urb_work(struct work_struct *work) 935{ 936 struct urb_node *unode = container_of(work, struct urb_node, 937 release_urb_work.work); 938 939 up(&unode->dev->urbs.limit_sem); 940} 941 942static void dlfb_free_framebuffer(struct dlfb_data *dev) 943{ 944 struct fb_info *info = dev->info; 945 946 if (info) { 947 int node = info->node; 948 949 unregister_framebuffer(info); 950 951 if (info->cmap.len != 0) 952 fb_dealloc_cmap(&info->cmap); 953 if (info->monspecs.modedb) 954 fb_destroy_modedb(info->monspecs.modedb); 955 vfree(info->screen_base); 956 957 fb_destroy_modelist(&info->modelist); 958 959 dev->info = NULL; 960 961 /* Assume info structure is freed after this point */ 962 framebuffer_release(info); 963 964 pr_warn("fb_info for /dev/fb%d has been freed\n", node); 965 } 966 967 /* ref taken in probe() as part of registering framebfufer */ 968 kref_put(&dev->kref, dlfb_free); 969} 970 971static void dlfb_free_framebuffer_work(struct work_struct *work) 972{ 973 struct dlfb_data *dev = container_of(work, struct dlfb_data, 974 free_framebuffer_work.work); 975 dlfb_free_framebuffer(dev); 976} 977/* 978 * Assumes caller is holding info->lock mutex (for open and release at least) 979 */ 980static int dlfb_ops_release(struct fb_info *info, int user) 981{ 982 struct dlfb_data *dev = info->par; 983 984 dev->fb_count--; 985 986 /* We can't free fb_info here - fbmem will touch it when we return */ 987 if (dev->virtualized && (dev->fb_count == 0)) 988 schedule_delayed_work(&dev->free_framebuffer_work, HZ); 989 990 if ((dev->fb_count == 0) && (info->fbdefio)) { 991 fb_deferred_io_cleanup(info); 992 kfree(info->fbdefio); 993 info->fbdefio = NULL; 994 info->fbops->fb_mmap = dlfb_ops_mmap; 995 } 996 997 pr_warn("released /dev/fb%d user=%d count=%d\n", 998 info->node, user, dev->fb_count); 999 1000 kref_put(&dev->kref, dlfb_free); 1001 1002 return 0; 1003} 1004 1005/* 1006 * Check whether a video mode is supported by the DisplayLink chip 1007 * We start from monitor's modes, so don't need to filter that here 1008 */ 1009static int dlfb_is_valid_mode(struct fb_videomode *mode, 1010 struct fb_info *info) 1011{ 1012 struct dlfb_data *dev = info->par; 1013 1014 if (mode->xres * mode->yres > dev->sku_pixel_limit) { 1015 pr_warn("%dx%d beyond chip capabilities\n", 1016 mode->xres, mode->yres); 1017 return 0; 1018 } 1019 1020 pr_info("%dx%d @ %d Hz valid mode\n", mode->xres, mode->yres, 1021 mode->refresh); 1022 1023 return 1; 1024} 1025 1026static void dlfb_var_color_format(struct fb_var_screeninfo *var) 1027{ 1028 const struct fb_bitfield red = { 11, 5, 0 }; 1029 const struct fb_bitfield green = { 5, 6, 0 }; 1030 const struct fb_bitfield blue = { 0, 5, 0 }; 1031 1032 var->bits_per_pixel = 16; 1033 var->red = red; 1034 var->green = green; 1035 var->blue = blue; 1036} 1037 1038static int dlfb_ops_check_var(struct fb_var_screeninfo *var, 1039 struct fb_info *info) 1040{ 1041 struct fb_videomode mode; 1042 1043 /* TODO: support dynamically changing framebuffer size */ 1044 if ((var->xres * var->yres * 2) > info->fix.smem_len) 1045 return -EINVAL; 1046 1047 /* set device-specific elements of var unrelated to mode */ 1048 dlfb_var_color_format(var); 1049 1050 fb_var_to_videomode(&mode, var); 1051 1052 if (!dlfb_is_valid_mode(&mode, info)) 1053 return -EINVAL; 1054 1055 return 0; 1056} 1057 1058static int dlfb_ops_set_par(struct fb_info *info) 1059{ 1060 struct dlfb_data *dev = info->par; 1061 int result; 1062 u16 *pix_framebuffer; 1063 int i; 1064 1065 pr_notice("set_par mode %dx%d\n", info->var.xres, info->var.yres); 1066 1067 result = dlfb_set_video_mode(dev, &info->var); 1068 1069 if ((result == 0) && (dev->fb_count == 0)) { 1070 1071 /* paint greenscreen */ 1072 1073 pix_framebuffer = (u16 *) info->screen_base; 1074 for (i = 0; i < info->fix.smem_len / 2; i++) 1075 pix_framebuffer[i] = 0x37e6; 1076 1077 dlfb_handle_damage(dev, 0, 0, info->var.xres, info->var.yres, 1078 info->screen_base); 1079 } 1080 1081 return result; 1082} 1083 1084/* To fonzi the jukebox (e.g. make blanking changes take effect) */ 1085static char *dlfb_dummy_render(char *buf) 1086{ 1087 *buf++ = 0xAF; 1088 *buf++ = 0x6A; /* copy */ 1089 *buf++ = 0x00; /* from address*/ 1090 *buf++ = 0x00; 1091 *buf++ = 0x00; 1092 *buf++ = 0x01; /* one pixel */ 1093 *buf++ = 0x00; /* to address */ 1094 *buf++ = 0x00; 1095 *buf++ = 0x00; 1096 return buf; 1097} 1098 1099/* 1100 * In order to come back from full DPMS off, we need to set the mode again 1101 */ 1102static int dlfb_ops_blank(int blank_mode, struct fb_info *info) 1103{ 1104 struct dlfb_data *dev = info->par; 1105 char *bufptr; 1106 struct urb *urb; 1107 1108 pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", 1109 info->node, dev->blank_mode, blank_mode); 1110 1111 if ((dev->blank_mode == FB_BLANK_POWERDOWN) && 1112 (blank_mode != FB_BLANK_POWERDOWN)) { 1113 1114 /* returning from powerdown requires a fresh modeset */ 1115 dlfb_set_video_mode(dev, &info->var); 1116 } 1117 1118 urb = dlfb_get_urb(dev); 1119 if (!urb) 1120 return 0; 1121 1122 bufptr = (char *) urb->transfer_buffer; 1123 bufptr = dlfb_vidreg_lock(bufptr); 1124 bufptr = dlfb_blanking(bufptr, blank_mode); 1125 bufptr = dlfb_vidreg_unlock(bufptr); 1126 1127 /* seems like a render op is needed to have blank change take effect */ 1128 bufptr = dlfb_dummy_render(bufptr); 1129 1130 dlfb_submit_urb(dev, urb, bufptr - 1131 (char *) urb->transfer_buffer); 1132 1133 dev->blank_mode = blank_mode; 1134 1135 return 0; 1136} 1137 1138static struct fb_ops dlfb_ops = { 1139 .owner = THIS_MODULE, 1140 .fb_read = fb_sys_read, 1141 .fb_write = dlfb_ops_write, 1142 .fb_setcolreg = dlfb_ops_setcolreg, 1143 .fb_fillrect = dlfb_ops_fillrect, 1144 .fb_copyarea = dlfb_ops_copyarea, 1145 .fb_imageblit = dlfb_ops_imageblit, 1146 .fb_mmap = dlfb_ops_mmap, 1147 .fb_ioctl = dlfb_ops_ioctl, 1148 .fb_open = dlfb_ops_open, 1149 .fb_release = dlfb_ops_release, 1150 .fb_blank = dlfb_ops_blank, 1151 .fb_check_var = dlfb_ops_check_var, 1152 .fb_set_par = dlfb_ops_set_par, 1153}; 1154 1155 1156/* 1157 * Assumes &info->lock held by caller 1158 * Assumes no active clients have framebuffer open 1159 */ 1160static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info) 1161{ 1162 int retval = -ENOMEM; 1163 int old_len = info->fix.smem_len; 1164 int new_len; 1165 unsigned char *old_fb = info->screen_base; 1166 unsigned char *new_fb; 1167 unsigned char *new_back = NULL; 1168 1169 pr_warn("Reallocating framebuffer. Addresses will change!\n"); 1170 1171 new_len = info->fix.line_length * info->var.yres; 1172 1173 if (PAGE_ALIGN(new_len) > old_len) { 1174 /* 1175 * Alloc system memory for virtual framebuffer 1176 */ 1177 new_fb = vmalloc(new_len); 1178 if (!new_fb) { 1179 pr_err("Virtual framebuffer alloc failed\n"); 1180 goto error; 1181 } 1182 1183 if (info->screen_base) { 1184 memcpy(new_fb, old_fb, old_len); 1185 vfree(info->screen_base); 1186 } 1187 1188 info->screen_base = new_fb; 1189 info->fix.smem_len = PAGE_ALIGN(new_len); 1190 info->fix.smem_start = (unsigned long) new_fb; 1191 info->flags = udlfb_info_flags; 1192 1193 /* 1194 * Second framebuffer copy to mirror the framebuffer state 1195 * on the physical USB device. We can function without this. 1196 * But with imperfect damage info we may send pixels over USB 1197 * that were, in fact, unchanged - wasting limited USB bandwidth 1198 */ 1199 if (shadow) 1200 new_back = vzalloc(new_len); 1201 if (!new_back) 1202 pr_info("No shadow/backing buffer allocated\n"); 1203 else { 1204 vfree(dev->backing_buffer); 1205 dev->backing_buffer = new_back; 1206 } 1207 } 1208 1209 retval = 0; 1210 1211error: 1212 return retval; 1213} 1214 1215/* 1216 * 1) Get EDID from hw, or use sw default 1217 * 2) Parse into various fb_info structs 1218 * 3) Allocate virtual framebuffer memory to back highest res mode 1219 * 1220 * Parses EDID into three places used by various parts of fbdev: 1221 * fb_var_screeninfo contains the timing of the monitor's preferred mode 1222 * fb_info.monspecs is full parsed EDID info, including monspecs.modedb 1223 * fb_info.modelist is a linked list of all monitor & VESA modes which work 1224 * 1225 * If EDID is not readable/valid, then modelist is all VESA modes, 1226 * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode 1227 * Returns 0 if successful 1228 */ 1229static int dlfb_setup_modes(struct dlfb_data *dev, 1230 struct fb_info *info, 1231 char *default_edid, size_t default_edid_size) 1232{ 1233 int i; 1234 const struct fb_videomode *default_vmode = NULL; 1235 int result = 0; 1236 char *edid; 1237 int tries = 3; 1238 1239 if (info->dev) /* only use mutex if info has been registered */ 1240 mutex_lock(&info->lock); 1241 1242 edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 1243 if (!edid) { 1244 result = -ENOMEM; 1245 goto error; 1246 } 1247 1248 fb_destroy_modelist(&info->modelist); 1249 memset(&info->monspecs, 0, sizeof(info->monspecs)); 1250 1251 /* 1252 * Try to (re)read EDID from hardware first 1253 * EDID data may return, but not parse as valid 1254 * Try again a few times, in case of e.g. analog cable noise 1255 */ 1256 while (tries--) { 1257 1258 i = dlfb_get_edid(dev, edid, EDID_LENGTH); 1259 1260 if (i >= EDID_LENGTH) 1261 fb_edid_to_monspecs(edid, &info->monspecs); 1262 1263 if (info->monspecs.modedb_len > 0) { 1264 dev->edid = edid; 1265 dev->edid_size = i; 1266 break; 1267 } 1268 } 1269 1270 /* If that fails, use a previously returned EDID if available */ 1271 if (info->monspecs.modedb_len == 0) { 1272 1273 pr_err("Unable to get valid EDID from device/display\n"); 1274 1275 if (dev->edid) { 1276 fb_edid_to_monspecs(dev->edid, &info->monspecs); 1277 if (info->monspecs.modedb_len > 0) 1278 pr_err("Using previously queried EDID\n"); 1279 } 1280 } 1281 1282 /* If that fails, use the default EDID we were handed */ 1283 if (info->monspecs.modedb_len == 0) { 1284 if (default_edid_size >= EDID_LENGTH) { 1285 fb_edid_to_monspecs(default_edid, &info->monspecs); 1286 if (info->monspecs.modedb_len > 0) { 1287 memcpy(edid, default_edid, default_edid_size); 1288 dev->edid = edid; 1289 dev->edid_size = default_edid_size; 1290 pr_err("Using default/backup EDID\n"); 1291 } 1292 } 1293 } 1294 1295 /* If we've got modes, let's pick a best default mode */ 1296 if (info->monspecs.modedb_len > 0) { 1297 1298 for (i = 0; i < info->monspecs.modedb_len; i++) { 1299 if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info)) 1300 fb_add_videomode(&info->monspecs.modedb[i], 1301 &info->modelist); 1302 else { 1303 if (i == 0) 1304 /* if we've removed top/best mode */ 1305 info->monspecs.misc 1306 &= ~FB_MISC_1ST_DETAIL; 1307 } 1308 } 1309 1310 default_vmode = fb_find_best_display(&info->monspecs, 1311 &info->modelist); 1312 } 1313 1314 /* If everything else has failed, fall back to safe default mode */ 1315 if (default_vmode == NULL) { 1316 1317 struct fb_videomode fb_vmode = {0}; 1318 1319 /* 1320 * Add the standard VESA modes to our modelist 1321 * Since we don't have EDID, there may be modes that 1322 * overspec monitor and/or are incorrect aspect ratio, etc. 1323 * But at least the user has a chance to choose 1324 */ 1325 for (i = 0; i < VESA_MODEDB_SIZE; i++) { 1326 if (dlfb_is_valid_mode((struct fb_videomode *) 1327 &vesa_modes[i], info)) 1328 fb_add_videomode(&vesa_modes[i], 1329 &info->modelist); 1330 } 1331 1332 /* 1333 * default to resolution safe for projectors 1334 * (since they are most common case without EDID) 1335 */ 1336 fb_vmode.xres = 800; 1337 fb_vmode.yres = 600; 1338 fb_vmode.refresh = 60; 1339 default_vmode = fb_find_nearest_mode(&fb_vmode, 1340 &info->modelist); 1341 } 1342 1343 /* If we have good mode and no active clients*/ 1344 if ((default_vmode != NULL) && (dev->fb_count == 0)) { 1345 1346 fb_videomode_to_var(&info->var, default_vmode); 1347 dlfb_var_color_format(&info->var); 1348 1349 /* 1350 * with mode size info, we can now alloc our framebuffer. 1351 */ 1352 memcpy(&info->fix, &dlfb_fix, sizeof(dlfb_fix)); 1353 info->fix.line_length = info->var.xres * 1354 (info->var.bits_per_pixel / 8); 1355 1356 result = dlfb_realloc_framebuffer(dev, info); 1357 1358 } else 1359 result = -EINVAL; 1360 1361error: 1362 if (edid && (dev->edid != edid)) 1363 kfree(edid); 1364 1365 if (info->dev) 1366 mutex_unlock(&info->lock); 1367 1368 return result; 1369} 1370 1371static ssize_t metrics_bytes_rendered_show(struct device *fbdev, 1372 struct device_attribute *a, char *buf) { 1373 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1374 struct dlfb_data *dev = fb_info->par; 1375 return snprintf(buf, PAGE_SIZE, "%u\n", 1376 atomic_read(&dev->bytes_rendered)); 1377} 1378 1379static ssize_t metrics_bytes_identical_show(struct device *fbdev, 1380 struct device_attribute *a, char *buf) { 1381 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1382 struct dlfb_data *dev = fb_info->par; 1383 return snprintf(buf, PAGE_SIZE, "%u\n", 1384 atomic_read(&dev->bytes_identical)); 1385} 1386 1387static ssize_t metrics_bytes_sent_show(struct device *fbdev, 1388 struct device_attribute *a, char *buf) { 1389 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1390 struct dlfb_data *dev = fb_info->par; 1391 return snprintf(buf, PAGE_SIZE, "%u\n", 1392 atomic_read(&dev->bytes_sent)); 1393} 1394 1395static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, 1396 struct device_attribute *a, char *buf) { 1397 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1398 struct dlfb_data *dev = fb_info->par; 1399 return snprintf(buf, PAGE_SIZE, "%u\n", 1400 atomic_read(&dev->cpu_kcycles_used)); 1401} 1402 1403static ssize_t edid_show( 1404 struct file *filp, 1405 struct kobject *kobj, struct bin_attribute *a, 1406 char *buf, loff_t off, size_t count) { 1407 struct device *fbdev = container_of(kobj, struct device, kobj); 1408 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1409 struct dlfb_data *dev = fb_info->par; 1410 1411 if (dev->edid == NULL) 1412 return 0; 1413 1414 if ((off >= dev->edid_size) || (count > dev->edid_size)) 1415 return 0; 1416 1417 if (off + count > dev->edid_size) 1418 count = dev->edid_size - off; 1419 1420 pr_info("sysfs edid copy %p to %p, %d bytes\n", 1421 dev->edid, buf, (int) count); 1422 1423 memcpy(buf, dev->edid, count); 1424 1425 return count; 1426} 1427 1428static ssize_t edid_store( 1429 struct file *filp, 1430 struct kobject *kobj, struct bin_attribute *a, 1431 char *src, loff_t src_off, size_t src_size) { 1432 struct device *fbdev = container_of(kobj, struct device, kobj); 1433 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1434 struct dlfb_data *dev = fb_info->par; 1435 int ret; 1436 1437 /* We only support write of entire EDID at once, no offset*/ 1438 if ((src_size != EDID_LENGTH) || (src_off != 0)) 1439 return -EINVAL; 1440 1441 ret = dlfb_setup_modes(dev, fb_info, src, src_size); 1442 if (ret) 1443 return ret; 1444 1445 if (!dev->edid || memcmp(src, dev->edid, src_size)) 1446 return -EINVAL; 1447 1448 pr_info("sysfs written EDID is new default\n"); 1449 dlfb_ops_set_par(fb_info); 1450 return src_size; 1451} 1452 1453static ssize_t metrics_reset_store(struct device *fbdev, 1454 struct device_attribute *attr, 1455 const char *buf, size_t count) 1456{ 1457 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1458 struct dlfb_data *dev = fb_info->par; 1459 1460 atomic_set(&dev->bytes_rendered, 0); 1461 atomic_set(&dev->bytes_identical, 0); 1462 atomic_set(&dev->bytes_sent, 0); 1463 atomic_set(&dev->cpu_kcycles_used, 0); 1464 1465 return count; 1466} 1467 1468static struct bin_attribute edid_attr = { 1469 .attr.name = "edid", 1470 .attr.mode = 0666, 1471 .size = EDID_LENGTH, 1472 .read = edid_show, 1473 .write = edid_store 1474}; 1475 1476static struct device_attribute fb_device_attrs[] = { 1477 __ATTR_RO(metrics_bytes_rendered), 1478 __ATTR_RO(metrics_bytes_identical), 1479 __ATTR_RO(metrics_bytes_sent), 1480 __ATTR_RO(metrics_cpu_kcycles_used), 1481 __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store), 1482}; 1483 1484/* 1485 * This is necessary before we can communicate with the display controller. 1486 */ 1487static int dlfb_select_std_channel(struct dlfb_data *dev) 1488{ 1489 int ret; 1490 u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7, 1491 0x1C, 0x88, 0x5E, 0x15, 1492 0x60, 0xFE, 0xC6, 0x97, 1493 0x16, 0x3D, 0x47, 0xF2 }; 1494 1495 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1496 NR_USB_REQUEST_CHANNEL, 1497 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, 1498 set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); 1499 return ret; 1500} 1501 1502static int dlfb_parse_vendor_descriptor(struct dlfb_data *dev, 1503 struct usb_interface *interface) 1504{ 1505 char *desc; 1506 char *buf; 1507 char *desc_end; 1508 int total_len; 1509 1510 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); 1511 if (!buf) 1512 return false; 1513 desc = buf; 1514 1515 total_len = usb_get_descriptor(interface_to_usbdev(interface), 1516 0x5f, /* vendor specific */ 1517 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 1518 1519 /* if not found, look in configuration descriptor */ 1520 if (total_len < 0) { 1521 if (0 == usb_get_extra_descriptor(interface->cur_altsetting, 1522 0x5f, &desc)) 1523 total_len = (int) desc[0]; 1524 } 1525 1526 if (total_len > 5) { 1527 pr_info("vendor descriptor length:%x data:%11ph\n", total_len, 1528 desc); 1529 1530 if ((desc[0] != total_len) || /* descriptor length */ 1531 (desc[1] != 0x5f) || /* vendor descriptor type */ 1532 (desc[2] != 0x01) || /* version (2 bytes) */ 1533 (desc[3] != 0x00) || 1534 (desc[4] != total_len - 2)) /* length after type */ 1535 goto unrecognized; 1536 1537 desc_end = desc + total_len; 1538 desc += 5; /* the fixed header we've already parsed */ 1539 1540 while (desc < desc_end) { 1541 u8 length; 1542 u16 key; 1543 1544 key = le16_to_cpu(*((u16 *) desc)); 1545 desc += sizeof(u16); 1546 length = *desc; 1547 desc++; 1548 1549 switch (key) { 1550 case 0x0200: { /* max_area */ 1551 u32 max_area; 1552 max_area = le32_to_cpu(*((u32 *)desc)); 1553 pr_warn("DL chip limited to %d pixel modes\n", 1554 max_area); 1555 dev->sku_pixel_limit = max_area; 1556 break; 1557 } 1558 default: 1559 break; 1560 } 1561 desc += length; 1562 } 1563 } else { 1564 pr_info("vendor descriptor not available (%d)\n", total_len); 1565 } 1566 1567 goto success; 1568 1569unrecognized: 1570 /* allow udlfb to load for now even if firmware unrecognized */ 1571 pr_err("Unrecognized vendor firmware descriptor\n"); 1572 1573success: 1574 kfree(buf); 1575 return true; 1576} 1577 1578static void dlfb_init_framebuffer_work(struct work_struct *work); 1579 1580static int dlfb_usb_probe(struct usb_interface *interface, 1581 const struct usb_device_id *id) 1582{ 1583 struct usb_device *usbdev; 1584 struct dlfb_data *dev; 1585 int retval = -ENOMEM; 1586 1587 /* usb initialization */ 1588 1589 usbdev = interface_to_usbdev(interface); 1590 1591 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1592 if (dev == NULL) { 1593 dev_err(&interface->dev, "dlfb_usb_probe: failed alloc of dev struct\n"); 1594 goto error; 1595 } 1596 1597 kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */ 1598 1599 dev->udev = usbdev; 1600 dev->gdev = &usbdev->dev; /* our generic struct device * */ 1601 usb_set_intfdata(interface, dev); 1602 1603 pr_info("%s %s - serial #%s\n", 1604 usbdev->manufacturer, usbdev->product, usbdev->serial); 1605 pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", 1606 usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1607 usbdev->descriptor.bcdDevice, dev); 1608 pr_info("console enable=%d\n", console); 1609 pr_info("fb_defio enable=%d\n", fb_defio); 1610 pr_info("shadow enable=%d\n", shadow); 1611 1612 dev->sku_pixel_limit = 2048 * 1152; /* default to maximum */ 1613 1614 if (!dlfb_parse_vendor_descriptor(dev, interface)) { 1615 pr_err("firmware not recognized. Assume incompatible device\n"); 1616 goto error; 1617 } 1618 1619 if (pixel_limit) { 1620 pr_warn("DL chip limit of %d overridden" 1621 " by module param to %d\n", 1622 dev->sku_pixel_limit, pixel_limit); 1623 dev->sku_pixel_limit = pixel_limit; 1624 } 1625 1626 1627 if (!dlfb_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { 1628 retval = -ENOMEM; 1629 pr_err("dlfb_alloc_urb_list failed\n"); 1630 goto error; 1631 } 1632 1633 kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */ 1634 1635 /* We don't register a new USB class. Our client interface is fbdev */ 1636 1637 /* Workitem keep things fast & simple during USB enumeration */ 1638 INIT_DELAYED_WORK(&dev->init_framebuffer_work, 1639 dlfb_init_framebuffer_work); 1640 schedule_delayed_work(&dev->init_framebuffer_work, 0); 1641 1642 return 0; 1643 1644error: 1645 if (dev) { 1646 1647 kref_put(&dev->kref, dlfb_free); /* ref for framebuffer */ 1648 kref_put(&dev->kref, dlfb_free); /* last ref from kref_init */ 1649 1650 /* dev has been deallocated. Do not dereference */ 1651 } 1652 1653 return retval; 1654} 1655 1656static void dlfb_init_framebuffer_work(struct work_struct *work) 1657{ 1658 struct dlfb_data *dev = container_of(work, struct dlfb_data, 1659 init_framebuffer_work.work); 1660 struct fb_info *info; 1661 int retval; 1662 int i; 1663 1664 /* allocates framebuffer driver structure, not framebuffer memory */ 1665 info = framebuffer_alloc(0, dev->gdev); 1666 if (!info) { 1667 pr_err("framebuffer_alloc failed\n"); 1668 goto error; 1669 } 1670 1671 dev->info = info; 1672 info->par = dev; 1673 info->pseudo_palette = dev->pseudo_palette; 1674 info->fbops = &dlfb_ops; 1675 1676 retval = fb_alloc_cmap(&info->cmap, 256, 0); 1677 if (retval < 0) { 1678 pr_err("fb_alloc_cmap failed %x\n", retval); 1679 goto error; 1680 } 1681 1682 INIT_DELAYED_WORK(&dev->free_framebuffer_work, 1683 dlfb_free_framebuffer_work); 1684 1685 INIT_LIST_HEAD(&info->modelist); 1686 1687 retval = dlfb_setup_modes(dev, info, NULL, 0); 1688 if (retval != 0) { 1689 pr_err("unable to find common mode for display and adapter\n"); 1690 goto error; 1691 } 1692 1693 /* ready to begin using device */ 1694 1695 atomic_set(&dev->usb_active, 1); 1696 dlfb_select_std_channel(dev); 1697 1698 dlfb_ops_check_var(&info->var, info); 1699 dlfb_ops_set_par(info); 1700 1701 retval = register_framebuffer(info); 1702 if (retval < 0) { 1703 pr_err("register_framebuffer failed %d\n", retval); 1704 goto error; 1705 } 1706 1707 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) { 1708 retval = device_create_file(info->dev, &fb_device_attrs[i]); 1709 if (retval) { 1710 pr_warn("device_create_file failed %d\n", retval); 1711 } 1712 } 1713 1714 retval = device_create_bin_file(info->dev, &edid_attr); 1715 if (retval) { 1716 pr_warn("device_create_bin_file failed %d\n", retval); 1717 } 1718 1719 pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution." 1720 " Using %dK framebuffer memory\n", info->node, 1721 info->var.xres, info->var.yres, 1722 ((dev->backing_buffer) ? 1723 info->fix.smem_len * 2 : info->fix.smem_len) >> 10); 1724 return; 1725 1726error: 1727 dlfb_free_framebuffer(dev); 1728} 1729 1730static void dlfb_usb_disconnect(struct usb_interface *interface) 1731{ 1732 struct dlfb_data *dev; 1733 struct fb_info *info; 1734 int i; 1735 1736 dev = usb_get_intfdata(interface); 1737 info = dev->info; 1738 1739 pr_info("USB disconnect starting\n"); 1740 1741 /* we virtualize until all fb clients release. Then we free */ 1742 dev->virtualized = true; 1743 1744 /* When non-active we'll update virtual framebuffer, but no new urbs */ 1745 atomic_set(&dev->usb_active, 0); 1746 1747 /* this function will wait for all in-flight urbs to complete */ 1748 dlfb_free_urb_list(dev); 1749 1750 if (info) { 1751 /* remove udlfb's sysfs interfaces */ 1752 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) 1753 device_remove_file(info->dev, &fb_device_attrs[i]); 1754 device_remove_bin_file(info->dev, &edid_attr); 1755 unlink_framebuffer(info); 1756 } 1757 1758 usb_set_intfdata(interface, NULL); 1759 dev->udev = NULL; 1760 dev->gdev = NULL; 1761 1762 /* if clients still have us open, will be freed on last close */ 1763 if (dev->fb_count == 0) 1764 schedule_delayed_work(&dev->free_framebuffer_work, 0); 1765 1766 /* release reference taken by kref_init in probe() */ 1767 kref_put(&dev->kref, dlfb_free); 1768 1769 /* consider dlfb_data freed */ 1770 1771 return; 1772} 1773 1774static struct usb_driver dlfb_driver = { 1775 .name = "udlfb", 1776 .probe = dlfb_usb_probe, 1777 .disconnect = dlfb_usb_disconnect, 1778 .id_table = id_table, 1779}; 1780 1781module_usb_driver(dlfb_driver); 1782 1783static void dlfb_urb_completion(struct urb *urb) 1784{ 1785 struct urb_node *unode = urb->context; 1786 struct dlfb_data *dev = unode->dev; 1787 unsigned long flags; 1788 1789 /* sync/async unlink faults aren't errors */ 1790 if (urb->status) { 1791 if (!(urb->status == -ENOENT || 1792 urb->status == -ECONNRESET || 1793 urb->status == -ESHUTDOWN)) { 1794 pr_err("%s - nonzero write bulk status received: %d\n", 1795 __func__, urb->status); 1796 atomic_set(&dev->lost_pixels, 1); 1797 } 1798 } 1799 1800 urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */ 1801 1802 spin_lock_irqsave(&dev->urbs.lock, flags); 1803 list_add_tail(&unode->entry, &dev->urbs.list); 1804 dev->urbs.available++; 1805 spin_unlock_irqrestore(&dev->urbs.lock, flags); 1806 1807 /* 1808 * When using fb_defio, we deadlock if up() is called 1809 * while another is waiting. So queue to another process. 1810 */ 1811 if (fb_defio) 1812 schedule_delayed_work(&unode->release_urb_work, 0); 1813 else 1814 up(&dev->urbs.limit_sem); 1815} 1816 1817static void dlfb_free_urb_list(struct dlfb_data *dev) 1818{ 1819 int count = dev->urbs.count; 1820 struct list_head *node; 1821 struct urb_node *unode; 1822 struct urb *urb; 1823 int ret; 1824 unsigned long flags; 1825 1826 pr_notice("Freeing all render urbs\n"); 1827 1828 /* keep waiting and freeing, until we've got 'em all */ 1829 while (count--) { 1830 1831 /* Getting interrupted means a leak, but ok at disconnect */ 1832 ret = down_interruptible(&dev->urbs.limit_sem); 1833 if (ret) 1834 break; 1835 1836 spin_lock_irqsave(&dev->urbs.lock, flags); 1837 1838 node = dev->urbs.list.next; /* have reserved one with sem */ 1839 list_del_init(node); 1840 1841 spin_unlock_irqrestore(&dev->urbs.lock, flags); 1842 1843 unode = list_entry(node, struct urb_node, entry); 1844 urb = unode->urb; 1845 1846 /* Free each separately allocated piece */ 1847 usb_free_coherent(urb->dev, dev->urbs.size, 1848 urb->transfer_buffer, urb->transfer_dma); 1849 usb_free_urb(urb); 1850 kfree(node); 1851 } 1852 1853 dev->urbs.count = 0; 1854} 1855 1856static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size) 1857{ 1858 int i = 0; 1859 struct urb *urb; 1860 struct urb_node *unode; 1861 char *buf; 1862 1863 spin_lock_init(&dev->urbs.lock); 1864 1865 dev->urbs.size = size; 1866 INIT_LIST_HEAD(&dev->urbs.list); 1867 1868 while (i < count) { 1869 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); 1870 if (!unode) 1871 break; 1872 unode->dev = dev; 1873 1874 INIT_DELAYED_WORK(&unode->release_urb_work, 1875 dlfb_release_urb_work); 1876 1877 urb = usb_alloc_urb(0, GFP_KERNEL); 1878 if (!urb) { 1879 kfree(unode); 1880 break; 1881 } 1882 unode->urb = urb; 1883 1884 buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL, 1885 &urb->transfer_dma); 1886 if (!buf) { 1887 kfree(unode); 1888 usb_free_urb(urb); 1889 break; 1890 } 1891 1892 /* urb->transfer_buffer_length set to actual before submit */ 1893 usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1), 1894 buf, size, dlfb_urb_completion, unode); 1895 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1896 1897 list_add_tail(&unode->entry, &dev->urbs.list); 1898 1899 i++; 1900 } 1901 1902 sema_init(&dev->urbs.limit_sem, i); 1903 dev->urbs.count = i; 1904 dev->urbs.available = i; 1905 1906 pr_notice("allocated %d %d byte urbs\n", i, (int) size); 1907 1908 return i; 1909} 1910 1911static struct urb *dlfb_get_urb(struct dlfb_data *dev) 1912{ 1913 int ret; 1914 struct list_head *entry; 1915 struct urb_node *unode; 1916 struct urb *urb = NULL; 1917 unsigned long flags; 1918 1919 /* Wait for an in-flight buffer to complete and get re-queued */ 1920 ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT); 1921 if (ret) { 1922 atomic_set(&dev->lost_pixels, 1); 1923 pr_warn("wait for urb interrupted: %x available: %d\n", 1924 ret, dev->urbs.available); 1925 goto error; 1926 } 1927 1928 spin_lock_irqsave(&dev->urbs.lock, flags); 1929 1930 BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */ 1931 entry = dev->urbs.list.next; 1932 list_del_init(entry); 1933 dev->urbs.available--; 1934 1935 spin_unlock_irqrestore(&dev->urbs.lock, flags); 1936 1937 unode = list_entry(entry, struct urb_node, entry); 1938 urb = unode->urb; 1939 1940error: 1941 return urb; 1942} 1943 1944static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len) 1945{ 1946 int ret; 1947 1948 BUG_ON(len > dev->urbs.size); 1949 1950 urb->transfer_buffer_length = len; /* set to actual payload len */ 1951 ret = usb_submit_urb(urb, GFP_KERNEL); 1952 if (ret) { 1953 dlfb_urb_completion(urb); /* because no one else will */ 1954 atomic_set(&dev->lost_pixels, 1); 1955 pr_err("usb_submit_urb error %x\n", ret); 1956 } 1957 return ret; 1958} 1959 1960module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1961MODULE_PARM_DESC(console, "Allow fbcon to open framebuffer"); 1962 1963module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1964MODULE_PARM_DESC(fb_defio, "Page fault detection of mmap writes"); 1965 1966module_param(shadow, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1967MODULE_PARM_DESC(shadow, "Shadow vid mem. Disable to save mem but lose perf"); 1968 1969module_param(pixel_limit, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1970MODULE_PARM_DESC(pixel_limit, "Force limit on max mode (in x*y pixels)"); 1971 1972MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, " 1973 "Jaya Kumar <jayakumar.lkml@gmail.com>, " 1974 "Bernie Thompson <bernie@plugable.com>"); 1975MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver"); 1976MODULE_LICENSE("GPL"); 1977 1978