1/* 2 * Copyright (C) STMicroelectronics SA 2014 3 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. 4 * License terms: GNU General Public License (GPL), version 2 5 */ 6 7#include <linux/delay.h> 8 9#include "bdisp.h" 10#include "bdisp-filter.h" 11#include "bdisp-reg.h" 12 13/* Max width of the source frame in a single node */ 14#define MAX_SRC_WIDTH 2048 15 16/* Reset & boot poll config */ 17#define POLL_RST_MAX 50 18#define POLL_RST_DELAY_MS 20 19 20enum bdisp_target_plan { 21 BDISP_RGB, 22 BDISP_Y, 23 BDISP_CBCR 24}; 25 26struct bdisp_op_cfg { 27 bool cconv; /* RGB - YUV conversion */ 28 bool hflip; /* Horizontal flip */ 29 bool vflip; /* Vertical flip */ 30 bool wide; /* Wide (>MAX_SRC_WIDTH) */ 31 bool scale; /* Scale */ 32 u16 h_inc; /* Horizontal increment in 6.10 format */ 33 u16 v_inc; /* Vertical increment in 6.10 format */ 34 bool src_interlaced; /* is the src an interlaced buffer */ 35 u8 src_nbp; /* nb of planes of the src */ 36 bool src_yuv; /* is the src a YUV color format */ 37 bool src_420; /* is the src 4:2:0 chroma subsampled */ 38 u8 dst_nbp; /* nb of planes of the dst */ 39 bool dst_yuv; /* is the dst a YUV color format */ 40 bool dst_420; /* is the dst 4:2:0 chroma subsampled */ 41}; 42 43struct bdisp_filter_addr { 44 u16 min; /* Filter min scale factor (6.10 fixed point) */ 45 u16 max; /* Filter max scale factor (6.10 fixed point) */ 46 void *virt; /* Virtual address for filter table */ 47 dma_addr_t paddr; /* Physical address for filter table */ 48}; 49 50static struct bdisp_filter_addr bdisp_h_filter[NB_H_FILTER]; 51static struct bdisp_filter_addr bdisp_v_filter[NB_V_FILTER]; 52 53/** 54 * bdisp_hw_reset 55 * @bdisp: bdisp entity 56 * 57 * Resets HW 58 * 59 * RETURNS: 60 * 0 on success. 61 */ 62int bdisp_hw_reset(struct bdisp_dev *bdisp) 63{ 64 unsigned int i; 65 66 dev_dbg(bdisp->dev, "%s\n", __func__); 67 68 /* Mask Interrupt */ 69 writel(0, bdisp->regs + BLT_ITM0); 70 71 /* Reset */ 72 writel(readl(bdisp->regs + BLT_CTL) | BLT_CTL_RESET, 73 bdisp->regs + BLT_CTL); 74 writel(0, bdisp->regs + BLT_CTL); 75 76 /* Wait for reset done */ 77 for (i = 0; i < POLL_RST_MAX; i++) { 78 if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE) 79 break; 80 msleep(POLL_RST_DELAY_MS); 81 } 82 if (i == POLL_RST_MAX) 83 dev_err(bdisp->dev, "Reset timeout\n"); 84 85 return (i == POLL_RST_MAX) ? -EAGAIN : 0; 86} 87 88/** 89 * bdisp_hw_get_and_clear_irq 90 * @bdisp: bdisp entity 91 * 92 * Read then reset interrupt status 93 * 94 * RETURNS: 95 * 0 if expected interrupt was raised. 96 */ 97int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp) 98{ 99 u32 its; 100 101 its = readl(bdisp->regs + BLT_ITS); 102 103 /* Check for the only expected IT: LastNode of AQ1 */ 104 if (!(its & BLT_ITS_AQ1_LNA)) { 105 dev_dbg(bdisp->dev, "Unexpected IT status: 0x%08X\n", its); 106 writel(its, bdisp->regs + BLT_ITS); 107 return -1; 108 } 109 110 /* Clear and mask */ 111 writel(its, bdisp->regs + BLT_ITS); 112 writel(0, bdisp->regs + BLT_ITM0); 113 114 return 0; 115} 116 117/** 118 * bdisp_hw_free_nodes 119 * @ctx: bdisp context 120 * 121 * Free node memory 122 * 123 * RETURNS: 124 * None 125 */ 126void bdisp_hw_free_nodes(struct bdisp_ctx *ctx) 127{ 128 if (ctx && ctx->node[0]) { 129 DEFINE_DMA_ATTRS(attrs); 130 131 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 132 dma_free_attrs(ctx->bdisp_dev->dev, 133 sizeof(struct bdisp_node) * MAX_NB_NODE, 134 ctx->node[0], ctx->node_paddr[0], &attrs); 135 } 136} 137 138/** 139 * bdisp_hw_alloc_nodes 140 * @ctx: bdisp context 141 * 142 * Allocate dma memory for nodes 143 * 144 * RETURNS: 145 * 0 on success 146 */ 147int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx) 148{ 149 struct device *dev = ctx->bdisp_dev->dev; 150 unsigned int i, node_size = sizeof(struct bdisp_node); 151 void *base; 152 dma_addr_t paddr; 153 DEFINE_DMA_ATTRS(attrs); 154 155 /* Allocate all the nodes within a single memory page */ 156 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 157 base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr, 158 GFP_KERNEL | GFP_DMA, &attrs); 159 if (!base) { 160 dev_err(dev, "%s no mem\n", __func__); 161 return -ENOMEM; 162 } 163 164 memset(base, 0, node_size * MAX_NB_NODE); 165 166 for (i = 0; i < MAX_NB_NODE; i++) { 167 ctx->node[i] = base; 168 ctx->node_paddr[i] = paddr; 169 dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i], 170 &paddr); 171 base += node_size; 172 paddr += node_size; 173 } 174 175 return 0; 176} 177 178/** 179 * bdisp_hw_free_filters 180 * @dev: device 181 * 182 * Free filters memory 183 * 184 * RETURNS: 185 * None 186 */ 187void bdisp_hw_free_filters(struct device *dev) 188{ 189 int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); 190 191 if (bdisp_h_filter[0].virt) { 192 DEFINE_DMA_ATTRS(attrs); 193 194 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 195 dma_free_attrs(dev, size, bdisp_h_filter[0].virt, 196 bdisp_h_filter[0].paddr, &attrs); 197 } 198} 199 200/** 201 * bdisp_hw_alloc_filters 202 * @dev: device 203 * 204 * Allocate dma memory for filters 205 * 206 * RETURNS: 207 * 0 on success 208 */ 209int bdisp_hw_alloc_filters(struct device *dev) 210{ 211 unsigned int i, size; 212 void *base; 213 dma_addr_t paddr; 214 DEFINE_DMA_ATTRS(attrs); 215 216 /* Allocate all the filters within a single memory page */ 217 size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); 218 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 219 base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs); 220 if (!base) 221 return -ENOMEM; 222 223 /* Setup filter addresses */ 224 for (i = 0; i < NB_H_FILTER; i++) { 225 bdisp_h_filter[i].min = bdisp_h_spec[i].min; 226 bdisp_h_filter[i].max = bdisp_h_spec[i].max; 227 memcpy(base, bdisp_h_spec[i].coef, BDISP_HF_NB); 228 bdisp_h_filter[i].virt = base; 229 bdisp_h_filter[i].paddr = paddr; 230 base += BDISP_HF_NB; 231 paddr += BDISP_HF_NB; 232 } 233 234 for (i = 0; i < NB_V_FILTER; i++) { 235 bdisp_v_filter[i].min = bdisp_v_spec[i].min; 236 bdisp_v_filter[i].max = bdisp_v_spec[i].max; 237 memcpy(base, bdisp_v_spec[i].coef, BDISP_VF_NB); 238 bdisp_v_filter[i].virt = base; 239 bdisp_v_filter[i].paddr = paddr; 240 base += BDISP_VF_NB; 241 paddr += BDISP_VF_NB; 242 } 243 244 return 0; 245} 246 247/** 248 * bdisp_hw_get_hf_addr 249 * @inc: resize increment 250 * 251 * Find the horizontal filter table that fits the resize increment 252 * 253 * RETURNS: 254 * table physical address 255 */ 256static dma_addr_t bdisp_hw_get_hf_addr(u16 inc) 257{ 258 unsigned int i; 259 260 for (i = NB_H_FILTER - 1; i > 0; i--) 261 if ((bdisp_h_filter[i].min < inc) && 262 (inc <= bdisp_h_filter[i].max)) 263 break; 264 265 return bdisp_h_filter[i].paddr; 266} 267 268/** 269 * bdisp_hw_get_vf_addr 270 * @inc: resize increment 271 * 272 * Find the vertical filter table that fits the resize increment 273 * 274 * RETURNS: 275 * table physical address 276 */ 277static dma_addr_t bdisp_hw_get_vf_addr(u16 inc) 278{ 279 unsigned int i; 280 281 for (i = NB_V_FILTER - 1; i > 0; i--) 282 if ((bdisp_v_filter[i].min < inc) && 283 (inc <= bdisp_v_filter[i].max)) 284 break; 285 286 return bdisp_v_filter[i].paddr; 287} 288 289/** 290 * bdisp_hw_get_inc 291 * @from: input size 292 * @to: output size 293 * @inc: resize increment in 6.10 format 294 * 295 * Computes the increment (inverse of scale) in 6.10 format 296 * 297 * RETURNS: 298 * 0 on success 299 */ 300static int bdisp_hw_get_inc(u32 from, u32 to, u16 *inc) 301{ 302 u32 tmp; 303 304 if (!to) 305 return -EINVAL; 306 307 if (to == from) { 308 *inc = 1 << 10; 309 return 0; 310 } 311 312 tmp = (from << 10) / to; 313 if ((tmp > 0xFFFF) || (!tmp)) 314 /* overflow (downscale x 63) or too small (upscale x 1024) */ 315 return -EINVAL; 316 317 *inc = (u16)tmp; 318 319 return 0; 320} 321 322/** 323 * bdisp_hw_get_hv_inc 324 * @ctx: device context 325 * @h_inc: horizontal increment 326 * @v_inc: vertical increment 327 * 328 * Computes the horizontal & vertical increments (inverse of scale) 329 * 330 * RETURNS: 331 * 0 on success 332 */ 333static int bdisp_hw_get_hv_inc(struct bdisp_ctx *ctx, u16 *h_inc, u16 *v_inc) 334{ 335 u32 src_w, src_h, dst_w, dst_h; 336 337 src_w = ctx->src.crop.width; 338 src_h = ctx->src.crop.height; 339 dst_w = ctx->dst.crop.width; 340 dst_h = ctx->dst.crop.height; 341 342 if (bdisp_hw_get_inc(src_w, dst_w, h_inc) || 343 bdisp_hw_get_inc(src_h, dst_h, v_inc)) { 344 dev_err(ctx->bdisp_dev->dev, 345 "scale factors failed (%dx%d)->(%dx%d)\n", 346 src_w, src_h, dst_w, dst_h); 347 return -EINVAL; 348 } 349 350 return 0; 351} 352 353/** 354 * bdisp_hw_get_op_cfg 355 * @ctx: device context 356 * @c: operation configuration 357 * 358 * Check which blitter operations are expected and sets the scaling increments 359 * 360 * RETURNS: 361 * 0 on success 362 */ 363static int bdisp_hw_get_op_cfg(struct bdisp_ctx *ctx, struct bdisp_op_cfg *c) 364{ 365 struct device *dev = ctx->bdisp_dev->dev; 366 struct bdisp_frame *src = &ctx->src; 367 struct bdisp_frame *dst = &ctx->dst; 368 369 if (src->width > MAX_SRC_WIDTH * MAX_VERTICAL_STRIDES) { 370 dev_err(dev, "Image width out of HW caps\n"); 371 return -EINVAL; 372 } 373 374 c->wide = src->width > MAX_SRC_WIDTH; 375 376 c->hflip = ctx->hflip; 377 c->vflip = ctx->vflip; 378 379 c->src_interlaced = (src->field == V4L2_FIELD_INTERLACED); 380 381 c->src_nbp = src->fmt->nb_planes; 382 c->src_yuv = (src->fmt->pixelformat == V4L2_PIX_FMT_NV12) || 383 (src->fmt->pixelformat == V4L2_PIX_FMT_YUV420); 384 c->src_420 = c->src_yuv; 385 386 c->dst_nbp = dst->fmt->nb_planes; 387 c->dst_yuv = (dst->fmt->pixelformat == V4L2_PIX_FMT_NV12) || 388 (dst->fmt->pixelformat == V4L2_PIX_FMT_YUV420); 389 c->dst_420 = c->dst_yuv; 390 391 c->cconv = (c->src_yuv != c->dst_yuv); 392 393 if (bdisp_hw_get_hv_inc(ctx, &c->h_inc, &c->v_inc)) { 394 dev_err(dev, "Scale factor out of HW caps\n"); 395 return -EINVAL; 396 } 397 398 /* Deinterlacing adjustment : stretch a field to a frame */ 399 if (c->src_interlaced) 400 c->v_inc /= 2; 401 402 if ((c->h_inc != (1 << 10)) || (c->v_inc != (1 << 10))) 403 c->scale = true; 404 else 405 c->scale = false; 406 407 return 0; 408} 409 410/** 411 * bdisp_hw_color_format 412 * @pixelformat: v4l2 pixel format 413 * 414 * v4l2 to bdisp pixel format convert 415 * 416 * RETURNS: 417 * bdisp pixel format 418 */ 419static u32 bdisp_hw_color_format(u32 pixelformat) 420{ 421 u32 ret; 422 423 switch (pixelformat) { 424 case V4L2_PIX_FMT_YUV420: 425 ret = (BDISP_YUV_3B << BLT_TTY_COL_SHIFT); 426 break; 427 case V4L2_PIX_FMT_NV12: 428 ret = (BDISP_NV12 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END; 429 break; 430 case V4L2_PIX_FMT_RGB565: 431 ret = (BDISP_RGB565 << BLT_TTY_COL_SHIFT); 432 break; 433 case V4L2_PIX_FMT_XBGR32: /* This V4L format actually refers to xRGB */ 434 ret = (BDISP_XRGB8888 << BLT_TTY_COL_SHIFT); 435 break; 436 case V4L2_PIX_FMT_RGB24: /* RGB888 format */ 437 ret = (BDISP_RGB888 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END; 438 break; 439 case V4L2_PIX_FMT_ABGR32: /* This V4L format actually refers to ARGB */ 440 441 default: 442 ret = (BDISP_ARGB8888 << BLT_TTY_COL_SHIFT) | BLT_TTY_ALPHA_R; 443 break; 444 } 445 446 return ret; 447} 448 449/** 450 * bdisp_hw_build_node 451 * @ctx: device context 452 * @cfg: operation configuration 453 * @node: node to be set 454 * @t_plan: whether the node refers to a RGB/Y or a CbCr plane 455 * @src_x_offset: x offset in the source image 456 * 457 * Build a node 458 * 459 * RETURNS: 460 * None 461 */ 462static void bdisp_hw_build_node(struct bdisp_ctx *ctx, 463 struct bdisp_op_cfg *cfg, 464 struct bdisp_node *node, 465 enum bdisp_target_plan t_plan, int src_x_offset) 466{ 467 struct bdisp_frame *src = &ctx->src; 468 struct bdisp_frame *dst = &ctx->dst; 469 u16 h_inc, v_inc, yh_inc, yv_inc; 470 struct v4l2_rect src_rect = src->crop; 471 struct v4l2_rect dst_rect = dst->crop; 472 int dst_x_offset; 473 s32 dst_width = dst->crop.width; 474 u32 src_fmt, dst_fmt; 475 const u32 *ivmx; 476 477 dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__); 478 479 memset(node, 0, sizeof(*node)); 480 481 /* Adjust src and dst areas wrt src_x_offset */ 482 src_rect.left += src_x_offset; 483 src_rect.width -= src_x_offset; 484 src_rect.width = min_t(__s32, MAX_SRC_WIDTH, src_rect.width); 485 486 dst_x_offset = (src_x_offset * dst_width) / ctx->src.crop.width; 487 dst_rect.left += dst_x_offset; 488 dst_rect.width = (src_rect.width * dst_width) / ctx->src.crop.width; 489 490 /* General */ 491 src_fmt = src->fmt->pixelformat; 492 dst_fmt = dst->fmt->pixelformat; 493 494 node->nip = 0; 495 node->cic = BLT_CIC_ALL_GRP; 496 node->ack = BLT_ACK_BYPASS_S2S3; 497 498 switch (cfg->src_nbp) { 499 case 1: 500 /* Src2 = RGB / Src1 = Src3 = off */ 501 node->ins = BLT_INS_S1_OFF | BLT_INS_S2_MEM | BLT_INS_S3_OFF; 502 break; 503 case 2: 504 /* Src3 = Y 505 * Src2 = CbCr or ColorFill if writing the Y plane 506 * Src1 = off */ 507 node->ins = BLT_INS_S1_OFF | BLT_INS_S3_MEM; 508 if (t_plan == BDISP_Y) 509 node->ins |= BLT_INS_S2_CF; 510 else 511 node->ins |= BLT_INS_S2_MEM; 512 break; 513 case 3: 514 default: 515 /* Src3 = Y 516 * Src2 = Cb or ColorFill if writing the Y plane 517 * Src1 = Cr or ColorFill if writing the Y plane */ 518 node->ins = BLT_INS_S3_MEM; 519 if (t_plan == BDISP_Y) 520 node->ins |= BLT_INS_S2_CF | BLT_INS_S1_CF; 521 else 522 node->ins |= BLT_INS_S2_MEM | BLT_INS_S1_MEM; 523 break; 524 } 525 526 /* Color convert */ 527 node->ins |= cfg->cconv ? BLT_INS_IVMX : 0; 528 /* Scale needed if scaling OR 4:2:0 up/downsampling */ 529 node->ins |= (cfg->scale || cfg->src_420 || cfg->dst_420) ? 530 BLT_INS_SCALE : 0; 531 532 /* Target */ 533 node->tba = (t_plan == BDISP_CBCR) ? dst->paddr[1] : dst->paddr[0]; 534 535 node->tty = dst->bytesperline; 536 node->tty |= bdisp_hw_color_format(dst_fmt); 537 node->tty |= BLT_TTY_DITHER; 538 node->tty |= (t_plan == BDISP_CBCR) ? BLT_TTY_CHROMA : 0; 539 node->tty |= cfg->hflip ? BLT_TTY_HSO : 0; 540 node->tty |= cfg->vflip ? BLT_TTY_VSO : 0; 541 542 if (cfg->dst_420 && (t_plan == BDISP_CBCR)) { 543 /* 420 chroma downsampling */ 544 dst_rect.height /= 2; 545 dst_rect.width /= 2; 546 dst_rect.left /= 2; 547 dst_rect.top /= 2; 548 dst_x_offset /= 2; 549 dst_width /= 2; 550 } 551 552 node->txy = cfg->vflip ? (dst_rect.height - 1) : dst_rect.top; 553 node->txy <<= 16; 554 node->txy |= cfg->hflip ? (dst_width - dst_x_offset - 1) : 555 dst_rect.left; 556 557 node->tsz = dst_rect.height << 16 | dst_rect.width; 558 559 if (cfg->src_interlaced) { 560 /* handle only the top field which is half height of a frame */ 561 src_rect.top /= 2; 562 src_rect.height /= 2; 563 } 564 565 if (cfg->src_nbp == 1) { 566 /* Src 2 : RGB */ 567 node->s2ba = src->paddr[0]; 568 569 node->s2ty = src->bytesperline; 570 if (cfg->src_interlaced) 571 node->s2ty *= 2; 572 573 node->s2ty |= bdisp_hw_color_format(src_fmt); 574 575 node->s2xy = src_rect.top << 16 | src_rect.left; 576 node->s2sz = src_rect.height << 16 | src_rect.width; 577 } else { 578 /* Src 2 : Cb or CbCr */ 579 if (cfg->src_420) { 580 /* 420 chroma upsampling */ 581 src_rect.top /= 2; 582 src_rect.left /= 2; 583 src_rect.width /= 2; 584 src_rect.height /= 2; 585 } 586 587 node->s2ba = src->paddr[1]; 588 589 node->s2ty = src->bytesperline; 590 if (cfg->src_nbp == 3) 591 node->s2ty /= 2; 592 if (cfg->src_interlaced) 593 node->s2ty *= 2; 594 595 node->s2ty |= bdisp_hw_color_format(src_fmt); 596 597 node->s2xy = src_rect.top << 16 | src_rect.left; 598 node->s2sz = src_rect.height << 16 | src_rect.width; 599 600 if (cfg->src_nbp == 3) { 601 /* Src 1 : Cr */ 602 node->s1ba = src->paddr[2]; 603 604 node->s1ty = node->s2ty; 605 node->s1xy = node->s2xy; 606 } 607 608 /* Src 3 : Y */ 609 node->s3ba = src->paddr[0]; 610 611 node->s3ty = src->bytesperline; 612 if (cfg->src_interlaced) 613 node->s3ty *= 2; 614 node->s3ty |= bdisp_hw_color_format(src_fmt); 615 616 if ((t_plan != BDISP_CBCR) && cfg->src_420) { 617 /* No chroma upsampling for output RGB / Y plane */ 618 node->s3xy = node->s2xy * 2; 619 node->s3sz = node->s2sz * 2; 620 } else { 621 /* No need to read Y (Src3) when writing Chroma */ 622 node->s3ty |= BLT_S3TY_BLANK_ACC; 623 node->s3xy = node->s2xy; 624 node->s3sz = node->s2sz; 625 } 626 } 627 628 /* Resize (scale OR 4:2:0: chroma up/downsampling) */ 629 if (node->ins & BLT_INS_SCALE) { 630 /* no need to compute Y when writing CbCr from RGB input */ 631 bool skip_y = (t_plan == BDISP_CBCR) && !cfg->src_yuv; 632 633 /* FCTL */ 634 if (cfg->scale) { 635 node->fctl = BLT_FCTL_HV_SCALE; 636 if (!skip_y) 637 node->fctl |= BLT_FCTL_Y_HV_SCALE; 638 } else { 639 node->fctl = BLT_FCTL_HV_SAMPLE; 640 if (!skip_y) 641 node->fctl |= BLT_FCTL_Y_HV_SAMPLE; 642 } 643 644 /* RSF - Chroma may need to be up/downsampled */ 645 h_inc = cfg->h_inc; 646 v_inc = cfg->v_inc; 647 if (!cfg->src_420 && cfg->dst_420 && (t_plan == BDISP_CBCR)) { 648 /* RGB to 4:2:0 for Chroma: downsample */ 649 h_inc *= 2; 650 v_inc *= 2; 651 } else if (cfg->src_420 && !cfg->dst_420) { 652 /* 4:2:0: to RGB: upsample*/ 653 h_inc /= 2; 654 v_inc /= 2; 655 } 656 node->rsf = v_inc << 16 | h_inc; 657 658 /* RZI */ 659 node->rzi = BLT_RZI_DEFAULT; 660 661 /* Filter table physical addr */ 662 node->hfp = bdisp_hw_get_hf_addr(h_inc); 663 node->vfp = bdisp_hw_get_vf_addr(v_inc); 664 665 /* Y version */ 666 if (!skip_y) { 667 yh_inc = cfg->h_inc; 668 yv_inc = cfg->v_inc; 669 670 node->y_rsf = yv_inc << 16 | yh_inc; 671 node->y_rzi = BLT_RZI_DEFAULT; 672 node->y_hfp = bdisp_hw_get_hf_addr(yh_inc); 673 node->y_vfp = bdisp_hw_get_vf_addr(yv_inc); 674 } 675 } 676 677 /* Versatile matrix for RGB / YUV conversion */ 678 if (cfg->cconv) { 679 ivmx = cfg->src_yuv ? bdisp_yuv_to_rgb : bdisp_rgb_to_yuv; 680 681 node->ivmx0 = ivmx[0]; 682 node->ivmx1 = ivmx[1]; 683 node->ivmx2 = ivmx[2]; 684 node->ivmx3 = ivmx[3]; 685 } 686} 687 688/** 689 * bdisp_hw_build_all_nodes 690 * @ctx: device context 691 * 692 * Build all the nodes for the blitter operation 693 * 694 * RETURNS: 695 * 0 on success 696 */ 697static int bdisp_hw_build_all_nodes(struct bdisp_ctx *ctx) 698{ 699 struct bdisp_op_cfg cfg; 700 unsigned int i, nid = 0; 701 int src_x_offset = 0; 702 703 for (i = 0; i < MAX_NB_NODE; i++) 704 if (!ctx->node[i]) { 705 dev_err(ctx->bdisp_dev->dev, "node %d is null\n", i); 706 return -EINVAL; 707 } 708 709 /* Get configuration (scale, flip, ...) */ 710 if (bdisp_hw_get_op_cfg(ctx, &cfg)) 711 return -EINVAL; 712 713 /* Split source in vertical strides (HW constraint) */ 714 for (i = 0; i < MAX_VERTICAL_STRIDES; i++) { 715 /* Build RGB/Y node and link it to the previous node */ 716 bdisp_hw_build_node(ctx, &cfg, ctx->node[nid], 717 cfg.dst_nbp == 1 ? BDISP_RGB : BDISP_Y, 718 src_x_offset); 719 if (nid) 720 ctx->node[nid - 1]->nip = ctx->node_paddr[nid]; 721 nid++; 722 723 /* Build additional Cb(Cr) node, link it to the previous one */ 724 if (cfg.dst_nbp > 1) { 725 bdisp_hw_build_node(ctx, &cfg, ctx->node[nid], 726 BDISP_CBCR, src_x_offset); 727 ctx->node[nid - 1]->nip = ctx->node_paddr[nid]; 728 nid++; 729 } 730 731 /* Next stride until full width covered */ 732 src_x_offset += MAX_SRC_WIDTH; 733 if (src_x_offset >= ctx->src.crop.width) 734 break; 735 } 736 737 /* Mark last node as the last */ 738 ctx->node[nid - 1]->nip = 0; 739 740 return 0; 741} 742 743/** 744 * bdisp_hw_save_request 745 * @ctx: device context 746 * 747 * Save a copy of the request and of the built nodes 748 * 749 * RETURNS: 750 * None 751 */ 752static void bdisp_hw_save_request(struct bdisp_ctx *ctx) 753{ 754 struct bdisp_node **copy_node = ctx->bdisp_dev->dbg.copy_node; 755 struct bdisp_request *request = &ctx->bdisp_dev->dbg.copy_request; 756 struct bdisp_node **node = ctx->node; 757 int i; 758 759 /* Request copy */ 760 request->src = ctx->src; 761 request->dst = ctx->dst; 762 request->hflip = ctx->hflip; 763 request->vflip = ctx->vflip; 764 request->nb_req++; 765 766 /* Nodes copy */ 767 for (i = 0; i < MAX_NB_NODE; i++) { 768 /* Allocate memory if not done yet */ 769 if (!copy_node[i]) { 770 copy_node[i] = devm_kzalloc(ctx->bdisp_dev->dev, 771 sizeof(*copy_node[i]), 772 GFP_KERNEL); 773 if (!copy_node[i]) 774 return; 775 } 776 *copy_node[i] = *node[i]; 777 } 778} 779 780/** 781 * bdisp_hw_update 782 * @ctx: device context 783 * 784 * Send the request to the HW 785 * 786 * RETURNS: 787 * 0 on success 788 */ 789int bdisp_hw_update(struct bdisp_ctx *ctx) 790{ 791 int ret; 792 struct bdisp_dev *bdisp = ctx->bdisp_dev; 793 struct device *dev = bdisp->dev; 794 unsigned int node_id; 795 796 dev_dbg(dev, "%s\n", __func__); 797 798 /* build nodes */ 799 ret = bdisp_hw_build_all_nodes(ctx); 800 if (ret) { 801 dev_err(dev, "cannot build nodes (%d)\n", ret); 802 return ret; 803 } 804 805 /* Save a copy of the request */ 806 bdisp_hw_save_request(ctx); 807 808 /* Configure interrupt to 'Last Node Reached for AQ1' */ 809 writel(BLT_AQ1_CTL_CFG, bdisp->regs + BLT_AQ1_CTL); 810 writel(BLT_ITS_AQ1_LNA, bdisp->regs + BLT_ITM0); 811 812 /* Write first node addr */ 813 writel(ctx->node_paddr[0], bdisp->regs + BLT_AQ1_IP); 814 815 /* Find and write last node addr : this starts the HW processing */ 816 for (node_id = 0; node_id < MAX_NB_NODE - 1; node_id++) { 817 if (!ctx->node[node_id]->nip) 818 break; 819 } 820 writel(ctx->node_paddr[node_id], bdisp->regs + BLT_AQ1_LNA); 821 822 return 0; 823} 824