1/* 2 * FUJITSU Extended Socket Network Device driver 3 * Copyright (c) 2015 FUJITSU LIMITED 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, see <http://www.gnu.org/licenses/>. 16 * 17 * The full GNU General Public License is included in this distribution in 18 * the file called "COPYING". 19 * 20 */ 21 22#include "fjes_hw.h" 23#include "fjes.h" 24 25static void fjes_hw_update_zone_task(struct work_struct *); 26static void fjes_hw_epstop_task(struct work_struct *); 27 28/* supported MTU list */ 29const u32 fjes_support_mtu[] = { 30 FJES_MTU_DEFINE(8 * 1024), 31 FJES_MTU_DEFINE(16 * 1024), 32 FJES_MTU_DEFINE(32 * 1024), 33 FJES_MTU_DEFINE(64 * 1024), 34 0 35}; 36 37u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg) 38{ 39 u8 *base = hw->base; 40 u32 value = 0; 41 42 value = readl(&base[reg]); 43 44 return value; 45} 46 47static u8 *fjes_hw_iomap(struct fjes_hw *hw) 48{ 49 u8 *base; 50 51 if (!request_mem_region(hw->hw_res.start, hw->hw_res.size, 52 fjes_driver_name)) { 53 pr_err("request_mem_region failed\n"); 54 return NULL; 55 } 56 57 base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size); 58 59 return base; 60} 61 62static void fjes_hw_iounmap(struct fjes_hw *hw) 63{ 64 iounmap(hw->base); 65 release_mem_region(hw->hw_res.start, hw->hw_res.size); 66} 67 68int fjes_hw_reset(struct fjes_hw *hw) 69{ 70 union REG_DCTL dctl; 71 int timeout; 72 73 dctl.reg = 0; 74 dctl.bits.reset = 1; 75 wr32(XSCT_DCTL, dctl.reg); 76 77 timeout = FJES_DEVICE_RESET_TIMEOUT * 1000; 78 dctl.reg = rd32(XSCT_DCTL); 79 while ((dctl.bits.reset == 1) && (timeout > 0)) { 80 msleep(1000); 81 dctl.reg = rd32(XSCT_DCTL); 82 timeout -= 1000; 83 } 84 85 return timeout > 0 ? 0 : -EIO; 86} 87 88static int fjes_hw_get_max_epid(struct fjes_hw *hw) 89{ 90 union REG_MAX_EP info; 91 92 info.reg = rd32(XSCT_MAX_EP); 93 94 return info.bits.maxep; 95} 96 97static int fjes_hw_get_my_epid(struct fjes_hw *hw) 98{ 99 union REG_OWNER_EPID info; 100 101 info.reg = rd32(XSCT_OWNER_EPID); 102 103 return info.bits.epid; 104} 105 106static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw) 107{ 108 size_t size; 109 110 size = sizeof(struct fjes_device_shared_info) + 111 (sizeof(u8) * hw->max_epid); 112 hw->hw_info.share = kzalloc(size, GFP_KERNEL); 113 if (!hw->hw_info.share) 114 return -ENOMEM; 115 116 hw->hw_info.share->epnum = hw->max_epid; 117 118 return 0; 119} 120 121static void fjes_hw_free_shared_status_region(struct fjes_hw *hw) 122{ 123 kfree(hw->hw_info.share); 124 hw->hw_info.share = NULL; 125} 126 127static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh) 128{ 129 void *mem; 130 131 mem = vzalloc(EP_BUFFER_SIZE); 132 if (!mem) 133 return -ENOMEM; 134 135 epbh->buffer = mem; 136 epbh->size = EP_BUFFER_SIZE; 137 138 epbh->info = (union ep_buffer_info *)mem; 139 epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info)); 140 141 return 0; 142} 143 144static void fjes_hw_free_epbuf(struct epbuf_handler *epbh) 145{ 146 vfree(epbh->buffer); 147 epbh->buffer = NULL; 148 epbh->size = 0; 149 150 epbh->info = NULL; 151 epbh->ring = NULL; 152} 153 154void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu) 155{ 156 union ep_buffer_info *info = epbh->info; 157 u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX]; 158 int i; 159 160 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) 161 vlan_id[i] = info->v1i.vlan_id[i]; 162 163 memset(info, 0, sizeof(union ep_buffer_info)); 164 165 info->v1i.version = 0; /* version 0 */ 166 167 for (i = 0; i < ETH_ALEN; i++) 168 info->v1i.mac_addr[i] = mac_addr[i]; 169 170 info->v1i.head = 0; 171 info->v1i.tail = 1; 172 173 info->v1i.info_size = sizeof(union ep_buffer_info); 174 info->v1i.buffer_size = epbh->size - info->v1i.info_size; 175 176 info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu); 177 info->v1i.count_max = 178 EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max); 179 180 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) 181 info->v1i.vlan_id[i] = vlan_id[i]; 182} 183 184void 185fjes_hw_init_command_registers(struct fjes_hw *hw, 186 struct fjes_device_command_param *param) 187{ 188 /* Request Buffer length */ 189 wr32(XSCT_REQBL, (__le32)(param->req_len)); 190 /* Response Buffer Length */ 191 wr32(XSCT_RESPBL, (__le32)(param->res_len)); 192 193 /* Request Buffer Address */ 194 wr32(XSCT_REQBAL, 195 (__le32)(param->req_start & GENMASK_ULL(31, 0))); 196 wr32(XSCT_REQBAH, 197 (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32)); 198 199 /* Response Buffer Address */ 200 wr32(XSCT_RESPBAL, 201 (__le32)(param->res_start & GENMASK_ULL(31, 0))); 202 wr32(XSCT_RESPBAH, 203 (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32)); 204 205 /* Share status address */ 206 wr32(XSCT_SHSTSAL, 207 (__le32)(param->share_start & GENMASK_ULL(31, 0))); 208 wr32(XSCT_SHSTSAH, 209 (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32)); 210} 211 212static int fjes_hw_setup(struct fjes_hw *hw) 213{ 214 u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 215 struct fjes_device_command_param param; 216 struct ep_share_mem_info *buf_pair; 217 size_t mem_size; 218 int result; 219 int epidx; 220 void *buf; 221 222 hw->hw_info.max_epid = &hw->max_epid; 223 hw->hw_info.my_epid = &hw->my_epid; 224 225 buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info), 226 GFP_KERNEL); 227 if (!buf) 228 return -ENOMEM; 229 230 hw->ep_shm_info = (struct ep_share_mem_info *)buf; 231 232 mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid); 233 hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL); 234 if (!(hw->hw_info.req_buf)) 235 return -ENOMEM; 236 237 hw->hw_info.req_buf_size = mem_size; 238 239 mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid); 240 hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL); 241 if (!(hw->hw_info.res_buf)) 242 return -ENOMEM; 243 244 hw->hw_info.res_buf_size = mem_size; 245 246 result = fjes_hw_alloc_shared_status_region(hw); 247 if (result) 248 return result; 249 250 hw->hw_info.buffer_share_bit = 0; 251 hw->hw_info.buffer_unshare_reserve_bit = 0; 252 253 for (epidx = 0; epidx < hw->max_epid; epidx++) { 254 if (epidx != hw->my_epid) { 255 buf_pair = &hw->ep_shm_info[epidx]; 256 257 result = fjes_hw_alloc_epbuf(&buf_pair->tx); 258 if (result) 259 return result; 260 261 result = fjes_hw_alloc_epbuf(&buf_pair->rx); 262 if (result) 263 return result; 264 265 fjes_hw_setup_epbuf(&buf_pair->tx, mac, 266 fjes_support_mtu[0]); 267 fjes_hw_setup_epbuf(&buf_pair->rx, mac, 268 fjes_support_mtu[0]); 269 } 270 } 271 272 memset(¶m, 0, sizeof(param)); 273 274 param.req_len = hw->hw_info.req_buf_size; 275 param.req_start = __pa(hw->hw_info.req_buf); 276 param.res_len = hw->hw_info.res_buf_size; 277 param.res_start = __pa(hw->hw_info.res_buf); 278 279 param.share_start = __pa(hw->hw_info.share->ep_status); 280 281 fjes_hw_init_command_registers(hw, ¶m); 282 283 return 0; 284} 285 286static void fjes_hw_cleanup(struct fjes_hw *hw) 287{ 288 int epidx; 289 290 if (!hw->ep_shm_info) 291 return; 292 293 fjes_hw_free_shared_status_region(hw); 294 295 kfree(hw->hw_info.req_buf); 296 hw->hw_info.req_buf = NULL; 297 298 kfree(hw->hw_info.res_buf); 299 hw->hw_info.res_buf = NULL; 300 301 for (epidx = 0; epidx < hw->max_epid ; epidx++) { 302 if (epidx == hw->my_epid) 303 continue; 304 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx); 305 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx); 306 } 307 308 kfree(hw->ep_shm_info); 309 hw->ep_shm_info = NULL; 310} 311 312int fjes_hw_init(struct fjes_hw *hw) 313{ 314 int ret; 315 316 hw->base = fjes_hw_iomap(hw); 317 if (!hw->base) 318 return -EIO; 319 320 ret = fjes_hw_reset(hw); 321 if (ret) 322 return ret; 323 324 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true); 325 326 INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task); 327 INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task); 328 329 mutex_init(&hw->hw_info.lock); 330 331 hw->max_epid = fjes_hw_get_max_epid(hw); 332 hw->my_epid = fjes_hw_get_my_epid(hw); 333 334 if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) 335 return -ENXIO; 336 337 ret = fjes_hw_setup(hw); 338 339 return ret; 340} 341 342void fjes_hw_exit(struct fjes_hw *hw) 343{ 344 int ret; 345 346 if (hw->base) { 347 ret = fjes_hw_reset(hw); 348 if (ret) 349 pr_err("%s: reset error", __func__); 350 351 fjes_hw_iounmap(hw); 352 hw->base = NULL; 353 } 354 355 fjes_hw_cleanup(hw); 356 357 cancel_work_sync(&hw->update_zone_task); 358 cancel_work_sync(&hw->epstop_task); 359} 360 361static enum fjes_dev_command_response_e 362fjes_hw_issue_request_command(struct fjes_hw *hw, 363 enum fjes_dev_command_request_type type) 364{ 365 enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN; 366 union REG_CR cr; 367 union REG_CS cs; 368 int timeout; 369 370 cr.reg = 0; 371 cr.bits.req_start = 1; 372 cr.bits.req_code = type; 373 wr32(XSCT_CR, cr.reg); 374 cr.reg = rd32(XSCT_CR); 375 376 if (cr.bits.error == 0) { 377 timeout = FJES_COMMAND_REQ_TIMEOUT * 1000; 378 cs.reg = rd32(XSCT_CS); 379 380 while ((cs.bits.complete != 1) && timeout > 0) { 381 msleep(1000); 382 cs.reg = rd32(XSCT_CS); 383 timeout -= 1000; 384 } 385 386 if (cs.bits.complete == 1) 387 ret = FJES_CMD_STATUS_NORMAL; 388 else if (timeout <= 0) 389 ret = FJES_CMD_STATUS_TIMEOUT; 390 391 } else { 392 switch (cr.bits.err_info) { 393 case FJES_CMD_REQ_ERR_INFO_PARAM: 394 ret = FJES_CMD_STATUS_ERROR_PARAM; 395 break; 396 case FJES_CMD_REQ_ERR_INFO_STATUS: 397 ret = FJES_CMD_STATUS_ERROR_STATUS; 398 break; 399 default: 400 ret = FJES_CMD_STATUS_UNKNOWN; 401 break; 402 } 403 } 404 405 return ret; 406} 407 408int fjes_hw_request_info(struct fjes_hw *hw) 409{ 410 union fjes_device_command_req *req_buf = hw->hw_info.req_buf; 411 union fjes_device_command_res *res_buf = hw->hw_info.res_buf; 412 enum fjes_dev_command_response_e ret; 413 int result; 414 415 memset(req_buf, 0, hw->hw_info.req_buf_size); 416 memset(res_buf, 0, hw->hw_info.res_buf_size); 417 418 req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN; 419 420 res_buf->info.length = 0; 421 res_buf->info.code = 0; 422 423 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO); 424 425 result = 0; 426 427 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) != 428 res_buf->info.length) { 429 result = -ENOMSG; 430 } else if (ret == FJES_CMD_STATUS_NORMAL) { 431 switch (res_buf->info.code) { 432 case FJES_CMD_REQ_RES_CODE_NORMAL: 433 result = 0; 434 break; 435 default: 436 result = -EPERM; 437 break; 438 } 439 } else { 440 switch (ret) { 441 case FJES_CMD_STATUS_UNKNOWN: 442 result = -EPERM; 443 break; 444 case FJES_CMD_STATUS_TIMEOUT: 445 result = -EBUSY; 446 break; 447 case FJES_CMD_STATUS_ERROR_PARAM: 448 result = -EPERM; 449 break; 450 case FJES_CMD_STATUS_ERROR_STATUS: 451 result = -EPERM; 452 break; 453 default: 454 result = -EPERM; 455 break; 456 } 457 } 458 459 return result; 460} 461 462int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid, 463 struct ep_share_mem_info *buf_pair) 464{ 465 union fjes_device_command_req *req_buf = hw->hw_info.req_buf; 466 union fjes_device_command_res *res_buf = hw->hw_info.res_buf; 467 enum fjes_dev_command_response_e ret; 468 int page_count; 469 int timeout; 470 int i, idx; 471 void *addr; 472 int result; 473 474 if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit)) 475 return 0; 476 477 memset(req_buf, 0, hw->hw_info.req_buf_size); 478 memset(res_buf, 0, hw->hw_info.res_buf_size); 479 480 req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN( 481 buf_pair->tx.size, 482 buf_pair->rx.size); 483 req_buf->share_buffer.epid = dest_epid; 484 485 idx = 0; 486 req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size; 487 page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE; 488 for (i = 0; i < page_count; i++) { 489 addr = ((u8 *)(buf_pair->tx.buffer)) + 490 (i * EP_BUFFER_INFO_SIZE); 491 req_buf->share_buffer.buffer[idx++] = 492 (__le64)(page_to_phys(vmalloc_to_page(addr)) + 493 offset_in_page(addr)); 494 } 495 496 req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size; 497 page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE; 498 for (i = 0; i < page_count; i++) { 499 addr = ((u8 *)(buf_pair->rx.buffer)) + 500 (i * EP_BUFFER_INFO_SIZE); 501 req_buf->share_buffer.buffer[idx++] = 502 (__le64)(page_to_phys(vmalloc_to_page(addr)) + 503 offset_in_page(addr)); 504 } 505 506 res_buf->share_buffer.length = 0; 507 res_buf->share_buffer.code = 0; 508 509 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER); 510 511 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000; 512 while ((ret == FJES_CMD_STATUS_NORMAL) && 513 (res_buf->share_buffer.length == 514 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) && 515 (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) && 516 (timeout > 0)) { 517 msleep(200 + hw->my_epid * 20); 518 timeout -= (200 + hw->my_epid * 20); 519 520 res_buf->share_buffer.length = 0; 521 res_buf->share_buffer.code = 0; 522 523 ret = fjes_hw_issue_request_command( 524 hw, FJES_CMD_REQ_SHARE_BUFFER); 525 } 526 527 result = 0; 528 529 if (res_buf->share_buffer.length != 530 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) 531 result = -ENOMSG; 532 else if (ret == FJES_CMD_STATUS_NORMAL) { 533 switch (res_buf->share_buffer.code) { 534 case FJES_CMD_REQ_RES_CODE_NORMAL: 535 result = 0; 536 set_bit(dest_epid, &hw->hw_info.buffer_share_bit); 537 break; 538 case FJES_CMD_REQ_RES_CODE_BUSY: 539 result = -EBUSY; 540 break; 541 default: 542 result = -EPERM; 543 break; 544 } 545 } else { 546 switch (ret) { 547 case FJES_CMD_STATUS_UNKNOWN: 548 result = -EPERM; 549 break; 550 case FJES_CMD_STATUS_TIMEOUT: 551 result = -EBUSY; 552 break; 553 case FJES_CMD_STATUS_ERROR_PARAM: 554 case FJES_CMD_STATUS_ERROR_STATUS: 555 default: 556 result = -EPERM; 557 break; 558 } 559 } 560 561 return result; 562} 563 564int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid) 565{ 566 union fjes_device_command_req *req_buf = hw->hw_info.req_buf; 567 union fjes_device_command_res *res_buf = hw->hw_info.res_buf; 568 struct fjes_device_shared_info *share = hw->hw_info.share; 569 enum fjes_dev_command_response_e ret; 570 int timeout; 571 int result; 572 573 if (!hw->base) 574 return -EPERM; 575 576 if (!req_buf || !res_buf || !share) 577 return -EPERM; 578 579 if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit)) 580 return 0; 581 582 memset(req_buf, 0, hw->hw_info.req_buf_size); 583 memset(res_buf, 0, hw->hw_info.res_buf_size); 584 585 req_buf->unshare_buffer.length = 586 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN; 587 req_buf->unshare_buffer.epid = dest_epid; 588 589 res_buf->unshare_buffer.length = 0; 590 res_buf->unshare_buffer.code = 0; 591 592 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER); 593 594 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000; 595 while ((ret == FJES_CMD_STATUS_NORMAL) && 596 (res_buf->unshare_buffer.length == 597 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) && 598 (res_buf->unshare_buffer.code == 599 FJES_CMD_REQ_RES_CODE_BUSY) && 600 (timeout > 0)) { 601 msleep(200 + hw->my_epid * 20); 602 timeout -= (200 + hw->my_epid * 20); 603 604 res_buf->unshare_buffer.length = 0; 605 res_buf->unshare_buffer.code = 0; 606 607 ret = 608 fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER); 609 } 610 611 result = 0; 612 613 if (res_buf->unshare_buffer.length != 614 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) { 615 result = -ENOMSG; 616 } else if (ret == FJES_CMD_STATUS_NORMAL) { 617 switch (res_buf->unshare_buffer.code) { 618 case FJES_CMD_REQ_RES_CODE_NORMAL: 619 result = 0; 620 clear_bit(dest_epid, &hw->hw_info.buffer_share_bit); 621 break; 622 case FJES_CMD_REQ_RES_CODE_BUSY: 623 result = -EBUSY; 624 break; 625 default: 626 result = -EPERM; 627 break; 628 } 629 } else { 630 switch (ret) { 631 case FJES_CMD_STATUS_UNKNOWN: 632 result = -EPERM; 633 break; 634 case FJES_CMD_STATUS_TIMEOUT: 635 result = -EBUSY; 636 break; 637 case FJES_CMD_STATUS_ERROR_PARAM: 638 case FJES_CMD_STATUS_ERROR_STATUS: 639 default: 640 result = -EPERM; 641 break; 642 } 643 } 644 645 return result; 646} 647 648int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid, 649 enum REG_ICTL_MASK mask) 650{ 651 u32 ig = mask | dest_epid; 652 653 wr32(XSCT_IG, cpu_to_le32(ig)); 654 655 return 0; 656} 657 658u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw) 659{ 660 u32 cur_is; 661 662 cur_is = rd32(XSCT_IS); 663 664 return cur_is; 665} 666 667void fjes_hw_set_irqmask(struct fjes_hw *hw, 668 enum REG_ICTL_MASK intr_mask, bool mask) 669{ 670 if (mask) 671 wr32(XSCT_IMS, intr_mask); 672 else 673 wr32(XSCT_IMC, intr_mask); 674} 675 676bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid) 677{ 678 if (epid >= hw->max_epid) 679 return false; 680 681 if ((hw->ep_shm_info[epid].es_status != 682 FJES_ZONING_STATUS_ENABLE) || 683 (hw->ep_shm_info[hw->my_epid].zone == 684 FJES_ZONING_ZONE_TYPE_NONE)) 685 return false; 686 else 687 return (hw->ep_shm_info[epid].zone == 688 hw->ep_shm_info[hw->my_epid].zone); 689} 690 691int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share, 692 int dest_epid) 693{ 694 int value = false; 695 696 if (dest_epid < share->epnum) 697 value = share->ep_status[dest_epid]; 698 699 return value; 700} 701 702static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid) 703{ 704 return test_bit(src_epid, &hw->txrx_stop_req_bit); 705} 706 707static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid) 708{ 709 return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status & 710 FJES_RX_STOP_REQ_DONE); 711} 712 713enum ep_partner_status 714fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid) 715{ 716 enum ep_partner_status status; 717 718 if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) { 719 if (fjes_hw_epid_is_stop_requested(hw, epid)) { 720 status = EP_PARTNER_WAITING; 721 } else { 722 if (fjes_hw_epid_is_stop_process_done(hw, epid)) 723 status = EP_PARTNER_COMPLETE; 724 else 725 status = EP_PARTNER_SHARED; 726 } 727 } else { 728 status = EP_PARTNER_UNSHARE; 729 } 730 731 return status; 732} 733 734void fjes_hw_raise_epstop(struct fjes_hw *hw) 735{ 736 enum ep_partner_status status; 737 int epidx; 738 739 for (epidx = 0; epidx < hw->max_epid; epidx++) { 740 if (epidx == hw->my_epid) 741 continue; 742 743 status = fjes_hw_get_partner_ep_status(hw, epidx); 744 switch (status) { 745 case EP_PARTNER_SHARED: 746 fjes_hw_raise_interrupt(hw, epidx, 747 REG_ICTL_MASK_TXRX_STOP_REQ); 748 break; 749 default: 750 break; 751 } 752 753 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); 754 set_bit(epidx, &hw->txrx_stop_req_bit); 755 756 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |= 757 FJES_RX_STOP_REQ_REQUEST; 758 } 759} 760 761int fjes_hw_wait_epstop(struct fjes_hw *hw) 762{ 763 enum ep_partner_status status; 764 union ep_buffer_info *info; 765 int wait_time = 0; 766 int epidx; 767 768 while (hw->hw_info.buffer_unshare_reserve_bit && 769 (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) { 770 for (epidx = 0; epidx < hw->max_epid; epidx++) { 771 if (epidx == hw->my_epid) 772 continue; 773 status = fjes_hw_epid_is_shared(hw->hw_info.share, 774 epidx); 775 info = hw->ep_shm_info[epidx].rx.info; 776 if ((!status || 777 (info->v1i.rx_status & 778 FJES_RX_STOP_REQ_DONE)) && 779 test_bit(epidx, 780 &hw->hw_info.buffer_unshare_reserve_bit)) { 781 clear_bit(epidx, 782 &hw->hw_info.buffer_unshare_reserve_bit); 783 } 784 } 785 786 msleep(100); 787 wait_time += 100; 788 } 789 790 for (epidx = 0; epidx < hw->max_epid; epidx++) { 791 if (epidx == hw->my_epid) 792 continue; 793 if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit)) 794 clear_bit(epidx, 795 &hw->hw_info.buffer_unshare_reserve_bit); 796 } 797 798 return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000) 799 ? 0 : -EBUSY; 800} 801 802bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version) 803{ 804 union ep_buffer_info *info = epbh->info; 805 806 return (info->common.version == version); 807} 808 809bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu) 810{ 811 union ep_buffer_info *info = epbh->info; 812 813 return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)); 814} 815 816bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id) 817{ 818 union ep_buffer_info *info = epbh->info; 819 bool ret = false; 820 int i; 821 822 if (vlan_id == 0) { 823 ret = true; 824 } else { 825 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) { 826 if (vlan_id == info->v1i.vlan_id[i]) { 827 ret = true; 828 break; 829 } 830 } 831 } 832 return ret; 833} 834 835bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id) 836{ 837 union ep_buffer_info *info = epbh->info; 838 int i; 839 840 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) { 841 if (info->v1i.vlan_id[i] == 0) { 842 info->v1i.vlan_id[i] = vlan_id; 843 return true; 844 } 845 } 846 return false; 847} 848 849void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id) 850{ 851 union ep_buffer_info *info = epbh->info; 852 int i; 853 854 if (0 != vlan_id) { 855 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) { 856 if (vlan_id == info->v1i.vlan_id[i]) 857 info->v1i.vlan_id[i] = 0; 858 } 859 } 860} 861 862bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh) 863{ 864 union ep_buffer_info *info = epbh->info; 865 866 if (info->v1i.count_max == 0) 867 return true; 868 869 return EP_RING_EMPTY(info->v1i.head, info->v1i.tail, 870 info->v1i.count_max); 871} 872 873void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh, 874 size_t *psize) 875{ 876 union ep_buffer_info *info = epbh->info; 877 struct esmem_frame *ring_frame; 878 void *frame; 879 880 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX 881 (info->v1i.head, 882 info->v1i.count_max) * 883 info->v1i.frame_max]); 884 885 *psize = (size_t)ring_frame->frame_size; 886 887 frame = ring_frame->frame_data; 888 889 return frame; 890} 891 892void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh) 893{ 894 union ep_buffer_info *info = epbh->info; 895 896 if (fjes_hw_epbuf_rx_is_empty(epbh)) 897 return; 898 899 EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max); 900} 901 902int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh, 903 void *frame, size_t size) 904{ 905 union ep_buffer_info *info = epbh->info; 906 struct esmem_frame *ring_frame; 907 908 if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max)) 909 return -ENOBUFS; 910 911 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX 912 (info->v1i.tail - 1, 913 info->v1i.count_max) * 914 info->v1i.frame_max]); 915 916 ring_frame->frame_size = size; 917 memcpy((void *)(ring_frame->frame_data), (void *)frame, size); 918 919 EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max); 920 921 return 0; 922} 923 924static void fjes_hw_update_zone_task(struct work_struct *work) 925{ 926 struct fjes_hw *hw = container_of(work, 927 struct fjes_hw, update_zone_task); 928 929 struct my_s {u8 es_status; u8 zone; } *info; 930 union fjes_device_command_res *res_buf; 931 enum ep_partner_status pstatus; 932 933 struct fjes_adapter *adapter; 934 struct net_device *netdev; 935 936 ulong unshare_bit = 0; 937 ulong share_bit = 0; 938 ulong irq_bit = 0; 939 940 int epidx; 941 int ret; 942 943 adapter = (struct fjes_adapter *)hw->back; 944 netdev = adapter->netdev; 945 res_buf = hw->hw_info.res_buf; 946 info = (struct my_s *)&res_buf->info.info; 947 948 mutex_lock(&hw->hw_info.lock); 949 950 ret = fjes_hw_request_info(hw); 951 switch (ret) { 952 case -ENOMSG: 953 case -EBUSY: 954 default: 955 if (!work_pending(&adapter->force_close_task)) { 956 adapter->force_reset = true; 957 schedule_work(&adapter->force_close_task); 958 } 959 break; 960 961 case 0: 962 963 for (epidx = 0; epidx < hw->max_epid; epidx++) { 964 if (epidx == hw->my_epid) { 965 hw->ep_shm_info[epidx].es_status = 966 info[epidx].es_status; 967 hw->ep_shm_info[epidx].zone = 968 info[epidx].zone; 969 continue; 970 } 971 972 pstatus = fjes_hw_get_partner_ep_status(hw, epidx); 973 switch (pstatus) { 974 case EP_PARTNER_UNSHARE: 975 default: 976 if ((info[epidx].zone != 977 FJES_ZONING_ZONE_TYPE_NONE) && 978 (info[epidx].es_status == 979 FJES_ZONING_STATUS_ENABLE) && 980 (info[epidx].zone == 981 info[hw->my_epid].zone)) 982 set_bit(epidx, &share_bit); 983 else 984 set_bit(epidx, &unshare_bit); 985 break; 986 987 case EP_PARTNER_COMPLETE: 988 case EP_PARTNER_WAITING: 989 if ((info[epidx].zone == 990 FJES_ZONING_ZONE_TYPE_NONE) || 991 (info[epidx].es_status != 992 FJES_ZONING_STATUS_ENABLE) || 993 (info[epidx].zone != 994 info[hw->my_epid].zone)) { 995 set_bit(epidx, 996 &adapter->unshare_watch_bitmask); 997 set_bit(epidx, 998 &hw->hw_info.buffer_unshare_reserve_bit); 999 } 1000 break; 1001 1002 case EP_PARTNER_SHARED: 1003 if ((info[epidx].zone == 1004 FJES_ZONING_ZONE_TYPE_NONE) || 1005 (info[epidx].es_status != 1006 FJES_ZONING_STATUS_ENABLE) || 1007 (info[epidx].zone != 1008 info[hw->my_epid].zone)) 1009 set_bit(epidx, &irq_bit); 1010 break; 1011 } 1012 1013 hw->ep_shm_info[epidx].es_status = 1014 info[epidx].es_status; 1015 hw->ep_shm_info[epidx].zone = info[epidx].zone; 1016 } 1017 break; 1018 } 1019 1020 mutex_unlock(&hw->hw_info.lock); 1021 1022 for (epidx = 0; epidx < hw->max_epid; epidx++) { 1023 if (epidx == hw->my_epid) 1024 continue; 1025 1026 if (test_bit(epidx, &share_bit)) { 1027 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx, 1028 netdev->dev_addr, netdev->mtu); 1029 1030 mutex_lock(&hw->hw_info.lock); 1031 1032 ret = fjes_hw_register_buff_addr( 1033 hw, epidx, &hw->ep_shm_info[epidx]); 1034 1035 switch (ret) { 1036 case 0: 1037 break; 1038 case -ENOMSG: 1039 case -EBUSY: 1040 default: 1041 if (!work_pending(&adapter->force_close_task)) { 1042 adapter->force_reset = true; 1043 schedule_work( 1044 &adapter->force_close_task); 1045 } 1046 break; 1047 } 1048 mutex_unlock(&hw->hw_info.lock); 1049 } 1050 1051 if (test_bit(epidx, &unshare_bit)) { 1052 mutex_lock(&hw->hw_info.lock); 1053 1054 ret = fjes_hw_unregister_buff_addr(hw, epidx); 1055 1056 switch (ret) { 1057 case 0: 1058 break; 1059 case -ENOMSG: 1060 case -EBUSY: 1061 default: 1062 if (!work_pending(&adapter->force_close_task)) { 1063 adapter->force_reset = true; 1064 schedule_work( 1065 &adapter->force_close_task); 1066 } 1067 break; 1068 } 1069 1070 mutex_unlock(&hw->hw_info.lock); 1071 1072 if (ret == 0) 1073 fjes_hw_setup_epbuf( 1074 &hw->ep_shm_info[epidx].tx, 1075 netdev->dev_addr, netdev->mtu); 1076 } 1077 1078 if (test_bit(epidx, &irq_bit)) { 1079 fjes_hw_raise_interrupt(hw, epidx, 1080 REG_ICTL_MASK_TXRX_STOP_REQ); 1081 1082 set_bit(epidx, &hw->txrx_stop_req_bit); 1083 hw->ep_shm_info[epidx].tx. 1084 info->v1i.rx_status |= 1085 FJES_RX_STOP_REQ_REQUEST; 1086 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit); 1087 } 1088 } 1089 1090 if (irq_bit || adapter->unshare_watch_bitmask) { 1091 if (!work_pending(&adapter->unshare_watch_task)) 1092 queue_work(adapter->control_wq, 1093 &adapter->unshare_watch_task); 1094 } 1095} 1096 1097static void fjes_hw_epstop_task(struct work_struct *work) 1098{ 1099 struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task); 1100 struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back; 1101 1102 ulong remain_bit; 1103 int epid_bit; 1104 1105 while ((remain_bit = hw->epstop_req_bit)) { 1106 for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) { 1107 if (remain_bit & 1) { 1108 hw->ep_shm_info[epid_bit]. 1109 tx.info->v1i.rx_status |= 1110 FJES_RX_STOP_REQ_DONE; 1111 1112 clear_bit(epid_bit, &hw->epstop_req_bit); 1113 set_bit(epid_bit, 1114 &adapter->unshare_watch_bitmask); 1115 1116 if (!work_pending(&adapter->unshare_watch_task)) 1117 queue_work( 1118 adapter->control_wq, 1119 &adapter->unshare_watch_task); 1120 } 1121 } 1122 } 1123} 1124