root/drivers/hid/intel-ish-hid/ishtp/client.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ishtp_cl_get_tx_free_buffer_size
  2. ishtp_cl_get_tx_free_rings
  3. ishtp_read_list_flush
  4. ishtp_cl_flush_queues
  5. ishtp_cl_init
  6. ishtp_cl_allocate
  7. ishtp_cl_free
  8. ishtp_cl_link
  9. ishtp_cl_unlink
  10. ishtp_cl_disconnect
  11. ishtp_cl_is_other_connecting
  12. ishtp_cl_connect
  13. ishtp_cl_read_start
  14. ishtp_cl_send
  15. ishtp_cl_read_complete
  16. ipc_tx_callback
  17. ishtp_cl_send_msg_ipc
  18. ishtp_cl_send_msg_dma
  19. ishtp_cl_send_msg
  20. recv_ishtp_cl_msg
  21. recv_ishtp_cl_msg_dma
  22. ishtp_get_client_data
  23. ishtp_set_client_data
  24. ishtp_get_ishtp_device
  25. ishtp_set_tx_ring_size
  26. ishtp_set_rx_ring_size
  27. ishtp_set_connection_state
  28. ishtp_cl_set_fw_client_id

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * ISHTP client logic
   4  *
   5  * Copyright (c) 2003-2016, Intel Corporation.
   6  */
   7 
   8 #include <linux/slab.h>
   9 #include <linux/sched.h>
  10 #include <linux/wait.h>
  11 #include <linux/delay.h>
  12 #include <linux/dma-mapping.h>
  13 #include "hbm.h"
  14 #include "client.h"
  15 
  16 int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
  17 {
  18         unsigned long tx_free_flags;
  19         int size;
  20 
  21         spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  22         size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
  23         spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
  24 
  25         return size;
  26 }
  27 EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
  28 
  29 int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
  30 {
  31         return cl->tx_ring_free_size;
  32 }
  33 EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
  34 
  35 /**
  36  * ishtp_read_list_flush() - Flush read queue
  37  * @cl: ishtp client instance
  38  *
  39  * Used to remove all entries from read queue for a client
  40  */
  41 static void ishtp_read_list_flush(struct ishtp_cl *cl)
  42 {
  43         struct ishtp_cl_rb *rb;
  44         struct ishtp_cl_rb *next;
  45         unsigned long   flags;
  46 
  47         spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
  48         list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
  49                 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
  50                         list_del(&rb->list);
  51                         ishtp_io_rb_free(rb);
  52                 }
  53         spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
  54 }
  55 
  56 /**
  57  * ishtp_cl_flush_queues() - Flush all queues for a client
  58  * @cl: ishtp client instance
  59  *
  60  * Used to remove all queues for a client. This is called when a client device
  61  * needs reset due to error, S3 resume or during module removal
  62  *
  63  * Return: 0 on success else -EINVAL if device is NULL
  64  */
  65 int ishtp_cl_flush_queues(struct ishtp_cl *cl)
  66 {
  67         if (WARN_ON(!cl || !cl->dev))
  68                 return -EINVAL;
  69 
  70         ishtp_read_list_flush(cl);
  71 
  72         return 0;
  73 }
  74 EXPORT_SYMBOL(ishtp_cl_flush_queues);
  75 
  76 /**
  77  * ishtp_cl_init() - Initialize all fields of a client device
  78  * @cl: ishtp client instance
  79  * @dev: ishtp device
  80  *
  81  * Initializes a client device fields: Init spinlocks, init queues etc.
  82  * This function is called during new client creation
  83  */
  84 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
  85 {
  86         memset(cl, 0, sizeof(struct ishtp_cl));
  87         init_waitqueue_head(&cl->wait_ctrl_res);
  88         spin_lock_init(&cl->free_list_spinlock);
  89         spin_lock_init(&cl->in_process_spinlock);
  90         spin_lock_init(&cl->tx_list_spinlock);
  91         spin_lock_init(&cl->tx_free_list_spinlock);
  92         spin_lock_init(&cl->fc_spinlock);
  93         INIT_LIST_HEAD(&cl->link);
  94         cl->dev = dev;
  95 
  96         INIT_LIST_HEAD(&cl->free_rb_list.list);
  97         INIT_LIST_HEAD(&cl->tx_list.list);
  98         INIT_LIST_HEAD(&cl->tx_free_list.list);
  99         INIT_LIST_HEAD(&cl->in_process_list.list);
 100 
 101         cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
 102         cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
 103         cl->tx_ring_free_size = cl->tx_ring_size;
 104 
 105         /* dma */
 106         cl->last_tx_path = CL_TX_PATH_IPC;
 107         cl->last_dma_acked = 1;
 108         cl->last_dma_addr = NULL;
 109         cl->last_ipc_acked = 1;
 110 }
 111 
 112 /**
 113  * ishtp_cl_allocate() - allocates client structure and sets it up.
 114  * @dev: ishtp device
 115  *
 116  * Allocate memory for new client device and call to initialize each field.
 117  *
 118  * Return: The allocated client instance or NULL on failure
 119  */
 120 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
 121 {
 122         struct ishtp_cl *cl;
 123 
 124         cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
 125         if (!cl)
 126                 return NULL;
 127 
 128         ishtp_cl_init(cl, cl_device->ishtp_dev);
 129         return cl;
 130 }
 131 EXPORT_SYMBOL(ishtp_cl_allocate);
 132 
 133 /**
 134  * ishtp_cl_free() - Frees a client device
 135  * @cl: client device instance
 136  *
 137  * Frees a client device
 138  */
 139 void    ishtp_cl_free(struct ishtp_cl *cl)
 140 {
 141         struct ishtp_device *dev;
 142         unsigned long flags;
 143 
 144         if (!cl)
 145                 return;
 146 
 147         dev = cl->dev;
 148         if (!dev)
 149                 return;
 150 
 151         spin_lock_irqsave(&dev->cl_list_lock, flags);
 152         ishtp_cl_free_rx_ring(cl);
 153         ishtp_cl_free_tx_ring(cl);
 154         kfree(cl);
 155         spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 156 }
 157 EXPORT_SYMBOL(ishtp_cl_free);
 158 
 159 /**
 160  * ishtp_cl_link() - Reserve a host id and link the client instance
 161  * @cl: client device instance
 162  *
 163  * This allocates a single bit in the hostmap. This function will make sure
 164  * that not many client sessions are opened at the same time. Once allocated
 165  * the client device instance is added to the ishtp device in the current
 166  * client list
 167  *
 168  * Return: 0 or error code on failure
 169  */
 170 int ishtp_cl_link(struct ishtp_cl *cl)
 171 {
 172         struct ishtp_device *dev;
 173         unsigned long flags, flags_cl;
 174         int id, ret = 0;
 175 
 176         if (WARN_ON(!cl || !cl->dev))
 177                 return -EINVAL;
 178 
 179         dev = cl->dev;
 180 
 181         spin_lock_irqsave(&dev->device_lock, flags);
 182 
 183         if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
 184                 ret = -EMFILE;
 185                 goto unlock_dev;
 186         }
 187 
 188         id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
 189 
 190         if (id >= ISHTP_CLIENTS_MAX) {
 191                 spin_unlock_irqrestore(&dev->device_lock, flags);
 192                 dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
 193                 return -ENOENT;
 194         }
 195 
 196         dev->open_handle_count++;
 197         cl->host_client_id = id;
 198         spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
 199         if (dev->dev_state != ISHTP_DEV_ENABLED) {
 200                 ret = -ENODEV;
 201                 goto unlock_cl;
 202         }
 203         list_add_tail(&cl->link, &dev->cl_list);
 204         set_bit(id, dev->host_clients_map);
 205         cl->state = ISHTP_CL_INITIALIZING;
 206 
 207 unlock_cl:
 208         spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
 209 unlock_dev:
 210         spin_unlock_irqrestore(&dev->device_lock, flags);
 211         return ret;
 212 }
 213 EXPORT_SYMBOL(ishtp_cl_link);
 214 
 215 /**
 216  * ishtp_cl_unlink() - remove fw_cl from the client device list
 217  * @cl: client device instance
 218  *
 219  * Remove a previously linked device to a ishtp device
 220  */
 221 void ishtp_cl_unlink(struct ishtp_cl *cl)
 222 {
 223         struct ishtp_device *dev;
 224         struct ishtp_cl *pos;
 225         unsigned long   flags;
 226 
 227         /* don't shout on error exit path */
 228         if (!cl || !cl->dev)
 229                 return;
 230 
 231         dev = cl->dev;
 232 
 233         spin_lock_irqsave(&dev->device_lock, flags);
 234         if (dev->open_handle_count > 0) {
 235                 clear_bit(cl->host_client_id, dev->host_clients_map);
 236                 dev->open_handle_count--;
 237         }
 238         spin_unlock_irqrestore(&dev->device_lock, flags);
 239 
 240         /*
 241          * This checks that 'cl' is actually linked into device's structure,
 242          * before attempting 'list_del'
 243          */
 244         spin_lock_irqsave(&dev->cl_list_lock, flags);
 245         list_for_each_entry(pos, &dev->cl_list, link)
 246                 if (cl->host_client_id == pos->host_client_id) {
 247                         list_del_init(&pos->link);
 248                         break;
 249                 }
 250         spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 251 }
 252 EXPORT_SYMBOL(ishtp_cl_unlink);
 253 
 254 /**
 255  * ishtp_cl_disconnect() - Send disconnect request to firmware
 256  * @cl: client device instance
 257  *
 258  * Send a disconnect request for a client to firmware.
 259  *
 260  * Return: 0 if successful disconnect response from the firmware or error
 261  * code on failure
 262  */
 263 int ishtp_cl_disconnect(struct ishtp_cl *cl)
 264 {
 265         struct ishtp_device *dev;
 266         int err;
 267 
 268         if (WARN_ON(!cl || !cl->dev))
 269                 return -ENODEV;
 270 
 271         dev = cl->dev;
 272 
 273         dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
 274 
 275         if (cl->state != ISHTP_CL_DISCONNECTING) {
 276                 dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
 277                 return 0;
 278         }
 279 
 280         if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
 281                 dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
 282                 dev_err(&cl->device->dev, "failed to disconnect.\n");
 283                 return -ENODEV;
 284         }
 285 
 286         err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
 287                         (dev->dev_state != ISHTP_DEV_ENABLED ||
 288                         cl->state == ISHTP_CL_DISCONNECTED),
 289                         ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
 290 
 291         /*
 292          * If FW reset arrived, this will happen. Don't check cl->,
 293          * as 'cl' may be freed already
 294          */
 295         if (dev->dev_state != ISHTP_DEV_ENABLED) {
 296                 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
 297                                __func__);
 298                 return -ENODEV;
 299         }
 300 
 301         if (cl->state == ISHTP_CL_DISCONNECTED) {
 302                 dev->print_log(dev, "%s() successful\n", __func__);
 303                 return 0;
 304         }
 305 
 306         return -ENODEV;
 307 }
 308 EXPORT_SYMBOL(ishtp_cl_disconnect);
 309 
 310 /**
 311  * ishtp_cl_is_other_connecting() - Check other client is connecting
 312  * @cl: client device instance
 313  *
 314  * Checks if other client with the same fw client id is connecting
 315  *
 316  * Return: true if other client is connected else false
 317  */
 318 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
 319 {
 320         struct ishtp_device *dev;
 321         struct ishtp_cl *pos;
 322         unsigned long   flags;
 323 
 324         if (WARN_ON(!cl || !cl->dev))
 325                 return false;
 326 
 327         dev = cl->dev;
 328         spin_lock_irqsave(&dev->cl_list_lock, flags);
 329         list_for_each_entry(pos, &dev->cl_list, link) {
 330                 if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
 331                                 cl->fw_client_id == pos->fw_client_id) {
 332                         spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 333                         return true;
 334                 }
 335         }
 336         spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 337 
 338         return false;
 339 }
 340 
 341 /**
 342  * ishtp_cl_connect() - Send connect request to firmware
 343  * @cl: client device instance
 344  *
 345  * Send a connect request for a client to firmware. If successful it will
 346  * RX and TX ring buffers
 347  *
 348  * Return: 0 if successful connect response from the firmware and able
 349  * to bind and allocate ring buffers or error code on failure
 350  */
 351 int ishtp_cl_connect(struct ishtp_cl *cl)
 352 {
 353         struct ishtp_device *dev;
 354         int rets;
 355 
 356         if (WARN_ON(!cl || !cl->dev))
 357                 return -ENODEV;
 358 
 359         dev = cl->dev;
 360 
 361         dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
 362 
 363         if (ishtp_cl_is_other_connecting(cl)) {
 364                 dev->print_log(dev, "%s() Busy\n", __func__);
 365                 return  -EBUSY;
 366         }
 367 
 368         if (ishtp_hbm_cl_connect_req(dev, cl)) {
 369                 dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
 370                 return -ENODEV;
 371         }
 372 
 373         rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
 374                                 (dev->dev_state == ISHTP_DEV_ENABLED &&
 375                                 (cl->state == ISHTP_CL_CONNECTED ||
 376                                  cl->state == ISHTP_CL_DISCONNECTED)),
 377                                 ishtp_secs_to_jiffies(
 378                                         ISHTP_CL_CONNECT_TIMEOUT));
 379         /*
 380          * If FW reset arrived, this will happen. Don't check cl->,
 381          * as 'cl' may be freed already
 382          */
 383         if (dev->dev_state != ISHTP_DEV_ENABLED) {
 384                 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
 385                                __func__);
 386                 return -EFAULT;
 387         }
 388 
 389         if (cl->state != ISHTP_CL_CONNECTED) {
 390                 dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
 391                                __func__);
 392                 return -EFAULT;
 393         }
 394 
 395         rets = cl->status;
 396         if (rets) {
 397                 dev->print_log(dev, "%s() Invalid status\n", __func__);
 398                 return rets;
 399         }
 400 
 401         rets = ishtp_cl_device_bind(cl);
 402         if (rets) {
 403                 dev->print_log(dev, "%s() Bind error\n", __func__);
 404                 ishtp_cl_disconnect(cl);
 405                 return rets;
 406         }
 407 
 408         rets = ishtp_cl_alloc_rx_ring(cl);
 409         if (rets) {
 410                 dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
 411                 /* if failed allocation, disconnect */
 412                 ishtp_cl_disconnect(cl);
 413                 return rets;
 414         }
 415 
 416         rets = ishtp_cl_alloc_tx_ring(cl);
 417         if (rets) {
 418                 dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
 419                 /* if failed allocation, disconnect */
 420                 ishtp_cl_free_rx_ring(cl);
 421                 ishtp_cl_disconnect(cl);
 422                 return rets;
 423         }
 424 
 425         /* Upon successful connection and allocation, emit flow-control */
 426         rets = ishtp_cl_read_start(cl);
 427 
 428         dev->print_log(dev, "%s() successful\n", __func__);
 429 
 430         return rets;
 431 }
 432 EXPORT_SYMBOL(ishtp_cl_connect);
 433 
 434 /**
 435  * ishtp_cl_read_start() - Prepare to read client message
 436  * @cl: client device instance
 437  *
 438  * Get a free buffer from pool of free read buffers and add to read buffer
 439  * pool to add contents. Send a flow control request to firmware to be able
 440  * send next message.
 441  *
 442  * Return: 0 if successful or error code on failure
 443  */
 444 int ishtp_cl_read_start(struct ishtp_cl *cl)
 445 {
 446         struct ishtp_device *dev;
 447         struct ishtp_cl_rb *rb;
 448         int rets;
 449         int i;
 450         unsigned long   flags;
 451         unsigned long   dev_flags;
 452 
 453         if (WARN_ON(!cl || !cl->dev))
 454                 return -ENODEV;
 455 
 456         dev = cl->dev;
 457 
 458         if (cl->state != ISHTP_CL_CONNECTED)
 459                 return -ENODEV;
 460 
 461         if (dev->dev_state != ISHTP_DEV_ENABLED)
 462                 return -ENODEV;
 463 
 464         i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
 465         if (i < 0) {
 466                 dev_err(&cl->device->dev, "no such fw client %d\n",
 467                         cl->fw_client_id);
 468                 return -ENODEV;
 469         }
 470 
 471         /* The current rb is the head of the free rb list */
 472         spin_lock_irqsave(&cl->free_list_spinlock, flags);
 473         if (list_empty(&cl->free_rb_list.list)) {
 474                 dev_warn(&cl->device->dev,
 475                          "[ishtp-ish] Rx buffers pool is empty\n");
 476                 rets = -ENOMEM;
 477                 rb = NULL;
 478                 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
 479                 goto out;
 480         }
 481         rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
 482         list_del_init(&rb->list);
 483         spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
 484 
 485         rb->cl = cl;
 486         rb->buf_idx = 0;
 487 
 488         INIT_LIST_HEAD(&rb->list);
 489         rets = 0;
 490 
 491         /*
 492          * This must be BEFORE sending flow control -
 493          * response in ISR may come too fast...
 494          */
 495         spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
 496         list_add_tail(&rb->list, &dev->read_list.list);
 497         spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
 498         if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
 499                 rets = -ENODEV;
 500                 goto out;
 501         }
 502 out:
 503         /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
 504         if (rets && rb) {
 505                 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
 506                 list_del(&rb->list);
 507                 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
 508 
 509                 spin_lock_irqsave(&cl->free_list_spinlock, flags);
 510                 list_add_tail(&rb->list, &cl->free_rb_list.list);
 511                 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
 512         }
 513         return rets;
 514 }
 515 
 516 /**
 517  * ishtp_cl_send() - Send a message to firmware
 518  * @cl: client device instance
 519  * @buf: message buffer
 520  * @length: length of message
 521  *
 522  * If the client is correct state to send message, this function gets a buffer
 523  * from tx ring buffers, copy the message data and call to send the message
 524  * using ishtp_cl_send_msg()
 525  *
 526  * Return: 0 if successful or error code on failure
 527  */
 528 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
 529 {
 530         struct ishtp_device     *dev;
 531         int     id;
 532         struct ishtp_cl_tx_ring *cl_msg;
 533         int     have_msg_to_send = 0;
 534         unsigned long   tx_flags, tx_free_flags;
 535 
 536         if (WARN_ON(!cl || !cl->dev))
 537                 return -ENODEV;
 538 
 539         dev = cl->dev;
 540 
 541         if (cl->state != ISHTP_CL_CONNECTED) {
 542                 ++cl->err_send_msg;
 543                 return -EPIPE;
 544         }
 545 
 546         if (dev->dev_state != ISHTP_DEV_ENABLED) {
 547                 ++cl->err_send_msg;
 548                 return -ENODEV;
 549         }
 550 
 551         /* Check if we have fw client device */
 552         id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
 553         if (id < 0) {
 554                 ++cl->err_send_msg;
 555                 return -ENOENT;
 556         }
 557 
 558         if (length > dev->fw_clients[id].props.max_msg_length) {
 559                 ++cl->err_send_msg;
 560                 return -EMSGSIZE;
 561         }
 562 
 563         /* No free bufs */
 564         spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
 565         if (list_empty(&cl->tx_free_list.list)) {
 566                 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
 567                         tx_free_flags);
 568                 ++cl->err_send_msg;
 569                 return  -ENOMEM;
 570         }
 571 
 572         cl_msg = list_first_entry(&cl->tx_free_list.list,
 573                 struct ishtp_cl_tx_ring, list);
 574         if (!cl_msg->send_buf.data) {
 575                 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
 576                         tx_free_flags);
 577                 return  -EIO;
 578                 /* Should not happen, as free list is pre-allocated */
 579         }
 580         /*
 581          * This is safe, as 'length' is already checked for not exceeding
 582          * max ISHTP message size per client
 583          */
 584         list_del_init(&cl_msg->list);
 585         --cl->tx_ring_free_size;
 586 
 587         spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
 588         memcpy(cl_msg->send_buf.data, buf, length);
 589         cl_msg->send_buf.size = length;
 590         spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
 591         have_msg_to_send = !list_empty(&cl->tx_list.list);
 592         list_add_tail(&cl_msg->list, &cl->tx_list.list);
 593         spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 594 
 595         if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
 596                 ishtp_cl_send_msg(dev, cl);
 597 
 598         return  0;
 599 }
 600 EXPORT_SYMBOL(ishtp_cl_send);
 601 
 602 /**
 603  * ishtp_cl_read_complete() - read complete
 604  * @rb: Pointer to client request block
 605  *
 606  * If the message is completely received call ishtp_cl_bus_rx_event()
 607  * to process message
 608  */
 609 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
 610 {
 611         unsigned long   flags;
 612         int     schedule_work_flag = 0;
 613         struct ishtp_cl *cl = rb->cl;
 614 
 615         spin_lock_irqsave(&cl->in_process_spinlock, flags);
 616         /*
 617          * if in-process list is empty, then need to schedule
 618          * the processing thread
 619          */
 620         schedule_work_flag = list_empty(&cl->in_process_list.list);
 621         list_add_tail(&rb->list, &cl->in_process_list.list);
 622         spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
 623 
 624         if (schedule_work_flag)
 625                 ishtp_cl_bus_rx_event(cl->device);
 626 }
 627 
 628 /**
 629  * ipc_tx_callback() - IPC tx callback function
 630  * @prm: Pointer to client device instance
 631  *
 632  * Send message over IPC either first time or on callback on previous message
 633  * completion
 634  */
 635 static void ipc_tx_callback(void *prm)
 636 {
 637         struct ishtp_cl *cl = prm;
 638         struct ishtp_cl_tx_ring *cl_msg;
 639         size_t  rem;
 640         struct ishtp_device     *dev = (cl ? cl->dev : NULL);
 641         struct ishtp_msg_hdr    ishtp_hdr;
 642         unsigned long   tx_flags, tx_free_flags;
 643         unsigned char   *pmsg;
 644 
 645         if (!dev)
 646                 return;
 647 
 648         /*
 649          * Other conditions if some critical error has
 650          * occurred before this callback is called
 651          */
 652         if (dev->dev_state != ISHTP_DEV_ENABLED)
 653                 return;
 654 
 655         if (cl->state != ISHTP_CL_CONNECTED)
 656                 return;
 657 
 658         spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
 659         if (list_empty(&cl->tx_list.list)) {
 660                 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 661                 return;
 662         }
 663 
 664         if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
 665                 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 666                 return;
 667         }
 668 
 669         if (!cl->sending) {
 670                 --cl->ishtp_flow_ctrl_creds;
 671                 cl->last_ipc_acked = 0;
 672                 cl->last_tx_path = CL_TX_PATH_IPC;
 673                 cl->sending = 1;
 674         }
 675 
 676         cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
 677                             list);
 678         rem = cl_msg->send_buf.size - cl->tx_offs;
 679 
 680         ishtp_hdr.host_addr = cl->host_client_id;
 681         ishtp_hdr.fw_addr = cl->fw_client_id;
 682         ishtp_hdr.reserved = 0;
 683         pmsg = cl_msg->send_buf.data + cl->tx_offs;
 684 
 685         if (rem <= dev->mtu) {
 686                 ishtp_hdr.length = rem;
 687                 ishtp_hdr.msg_complete = 1;
 688                 cl->sending = 0;
 689                 list_del_init(&cl_msg->list);   /* Must be before write */
 690                 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 691                 /* Submit to IPC queue with no callback */
 692                 ishtp_write_message(dev, &ishtp_hdr, pmsg);
 693                 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
 694                 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
 695                 ++cl->tx_ring_free_size;
 696                 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
 697                         tx_free_flags);
 698         } else {
 699                 /* Send IPC fragment */
 700                 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 701                 cl->tx_offs += dev->mtu;
 702                 ishtp_hdr.length = dev->mtu;
 703                 ishtp_hdr.msg_complete = 0;
 704                 ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
 705         }
 706 }
 707 
 708 /**
 709  * ishtp_cl_send_msg_ipc() -Send message using IPC
 710  * @dev: ISHTP device instance
 711  * @cl: Pointer to client device instance
 712  *
 713  * Send message over IPC not using DMA
 714  */
 715 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
 716                                   struct ishtp_cl *cl)
 717 {
 718         /* If last DMA message wasn't acked yet, leave this one in Tx queue */
 719         if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
 720                 return;
 721 
 722         cl->tx_offs = 0;
 723         ipc_tx_callback(cl);
 724         ++cl->send_msg_cnt_ipc;
 725 }
 726 
 727 /**
 728  * ishtp_cl_send_msg_dma() -Send message using DMA
 729  * @dev: ISHTP device instance
 730  * @cl: Pointer to client device instance
 731  *
 732  * Send message using DMA
 733  */
 734 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
 735         struct ishtp_cl *cl)
 736 {
 737         struct ishtp_msg_hdr    hdr;
 738         struct dma_xfer_hbm     dma_xfer;
 739         unsigned char   *msg_addr;
 740         int off;
 741         struct ishtp_cl_tx_ring *cl_msg;
 742         unsigned long tx_flags, tx_free_flags;
 743 
 744         /* If last IPC message wasn't acked yet, leave this one in Tx queue */
 745         if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
 746                 return;
 747 
 748         spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
 749         if (list_empty(&cl->tx_list.list)) {
 750                 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 751                 return;
 752         }
 753 
 754         cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
 755                 list);
 756 
 757         msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
 758         if (!msg_addr) {
 759                 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 760                 if (dev->transfer_path == CL_TX_PATH_DEFAULT)
 761                         ishtp_cl_send_msg_ipc(dev, cl);
 762                 return;
 763         }
 764 
 765         list_del_init(&cl_msg->list);   /* Must be before write */
 766         spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
 767 
 768         --cl->ishtp_flow_ctrl_creds;
 769         cl->last_dma_acked = 0;
 770         cl->last_dma_addr = msg_addr;
 771         cl->last_tx_path = CL_TX_PATH_DMA;
 772 
 773         /* write msg to dma buf */
 774         memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
 775 
 776         /* send dma_xfer hbm msg */
 777         off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
 778         ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
 779         dma_xfer.hbm = DMA_XFER;
 780         dma_xfer.fw_client_id = cl->fw_client_id;
 781         dma_xfer.host_client_id = cl->host_client_id;
 782         dma_xfer.reserved = 0;
 783         dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
 784         dma_xfer.msg_length = cl_msg->send_buf.size;
 785         dma_xfer.reserved2 = 0;
 786         ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
 787         spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
 788         list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
 789         ++cl->tx_ring_free_size;
 790         spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
 791         ++cl->send_msg_cnt_dma;
 792 }
 793 
 794 /**
 795  * ishtp_cl_send_msg() -Send message using DMA or IPC
 796  * @dev: ISHTP device instance
 797  * @cl: Pointer to client device instance
 798  *
 799  * Send message using DMA or IPC based on transfer_path
 800  */
 801 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
 802 {
 803         if (dev->transfer_path == CL_TX_PATH_DMA)
 804                 ishtp_cl_send_msg_dma(dev, cl);
 805         else
 806                 ishtp_cl_send_msg_ipc(dev, cl);
 807 }
 808 
 809 /**
 810  * recv_ishtp_cl_msg() -Receive client message
 811  * @dev: ISHTP device instance
 812  * @ishtp_hdr: Pointer to message header
 813  *
 814  * Receive and dispatch ISHTP client messages. This function executes in ISR
 815  * or work queue context
 816  */
 817 void recv_ishtp_cl_msg(struct ishtp_device *dev,
 818                        struct ishtp_msg_hdr *ishtp_hdr)
 819 {
 820         struct ishtp_cl *cl;
 821         struct ishtp_cl_rb *rb;
 822         struct ishtp_cl_rb *new_rb;
 823         unsigned char *buffer = NULL;
 824         struct ishtp_cl_rb *complete_rb = NULL;
 825         unsigned long   flags;
 826         int     rb_count;
 827 
 828         if (ishtp_hdr->reserved) {
 829                 dev_err(dev->devc, "corrupted message header.\n");
 830                 goto    eoi;
 831         }
 832 
 833         if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
 834                 dev_err(dev->devc,
 835                         "ISHTP message length in hdr exceeds IPC MTU\n");
 836                 goto    eoi;
 837         }
 838 
 839         spin_lock_irqsave(&dev->read_list_spinlock, flags);
 840         rb_count = -1;
 841         list_for_each_entry(rb, &dev->read_list.list, list) {
 842                 ++rb_count;
 843                 cl = rb->cl;
 844                 if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
 845                                 cl->fw_client_id == ishtp_hdr->fw_addr) ||
 846                                 !(cl->state == ISHTP_CL_CONNECTED))
 847                         continue;
 848 
 849                  /* If no Rx buffer is allocated, disband the rb */
 850                 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
 851                         spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 852                         dev_err(&cl->device->dev,
 853                                 "Rx buffer is not allocated.\n");
 854                         list_del(&rb->list);
 855                         ishtp_io_rb_free(rb);
 856                         cl->status = -ENOMEM;
 857                         goto    eoi;
 858                 }
 859 
 860                 /*
 861                  * If message buffer overflown (exceeds max. client msg
 862                  * size, drop message and return to free buffer.
 863                  * Do we need to disconnect such a client? (We don't send
 864                  * back FC, so communication will be stuck anyway)
 865                  */
 866                 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
 867                         spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 868                         dev_err(&cl->device->dev,
 869                                 "message overflow. size %d len %d idx %ld\n",
 870                                 rb->buffer.size, ishtp_hdr->length,
 871                                 rb->buf_idx);
 872                         list_del(&rb->list);
 873                         ishtp_cl_io_rb_recycle(rb);
 874                         cl->status = -EIO;
 875                         goto    eoi;
 876                 }
 877 
 878                 buffer = rb->buffer.data + rb->buf_idx;
 879                 dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
 880 
 881                 rb->buf_idx += ishtp_hdr->length;
 882                 if (ishtp_hdr->msg_complete) {
 883                         /* Last fragment in message - it's complete */
 884                         cl->status = 0;
 885                         list_del(&rb->list);
 886                         complete_rb = rb;
 887 
 888                         --cl->out_flow_ctrl_creds;
 889                         /*
 890                          * the whole msg arrived, send a new FC, and add a new
 891                          * rb buffer for the next coming msg
 892                          */
 893                         spin_lock(&cl->free_list_spinlock);
 894 
 895                         if (!list_empty(&cl->free_rb_list.list)) {
 896                                 new_rb = list_entry(cl->free_rb_list.list.next,
 897                                         struct ishtp_cl_rb, list);
 898                                 list_del_init(&new_rb->list);
 899                                 spin_unlock(&cl->free_list_spinlock);
 900                                 new_rb->cl = cl;
 901                                 new_rb->buf_idx = 0;
 902                                 INIT_LIST_HEAD(&new_rb->list);
 903                                 list_add_tail(&new_rb->list,
 904                                         &dev->read_list.list);
 905 
 906                                 ishtp_hbm_cl_flow_control_req(dev, cl);
 907                         } else {
 908                                 spin_unlock(&cl->free_list_spinlock);
 909                         }
 910                 }
 911                 /* One more fragment in message (even if this was last) */
 912                 ++cl->recv_msg_num_frags;
 913 
 914                 /*
 915                  * We can safely break here (and in BH too),
 916                  * a single input message can go only to a single request!
 917                  */
 918                 break;
 919         }
 920 
 921         spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 922         /* If it's nobody's message, just read and discard it */
 923         if (!buffer) {
 924                 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
 925 
 926                 dev_err(dev->devc, "Dropped Rx msg - no request\n");
 927                 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
 928                 goto    eoi;
 929         }
 930 
 931         if (complete_rb) {
 932                 cl = complete_rb->cl;
 933                 cl->ts_rx = ktime_get();
 934                 ++cl->recv_msg_cnt_ipc;
 935                 ishtp_cl_read_complete(complete_rb);
 936         }
 937 eoi:
 938         return;
 939 }
 940 
 941 /**
 942  * recv_ishtp_cl_msg_dma() -Receive client message
 943  * @dev: ISHTP device instance
 944  * @msg: message pointer
 945  * @hbm: hbm buffer
 946  *
 947  * Receive and dispatch ISHTP client messages using DMA. This function executes
 948  * in ISR or work queue context
 949  */
 950 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
 951                            struct dma_xfer_hbm *hbm)
 952 {
 953         struct ishtp_cl *cl;
 954         struct ishtp_cl_rb *rb;
 955         struct ishtp_cl_rb *new_rb;
 956         unsigned char *buffer = NULL;
 957         struct ishtp_cl_rb *complete_rb = NULL;
 958         unsigned long   flags;
 959 
 960         spin_lock_irqsave(&dev->read_list_spinlock, flags);
 961 
 962         list_for_each_entry(rb, &dev->read_list.list, list) {
 963                 cl = rb->cl;
 964                 if (!cl || !(cl->host_client_id == hbm->host_client_id &&
 965                                 cl->fw_client_id == hbm->fw_client_id) ||
 966                                 !(cl->state == ISHTP_CL_CONNECTED))
 967                         continue;
 968 
 969                 /*
 970                  * If no Rx buffer is allocated, disband the rb
 971                  */
 972                 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
 973                         spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 974                         dev_err(&cl->device->dev,
 975                                 "response buffer is not allocated.\n");
 976                         list_del(&rb->list);
 977                         ishtp_io_rb_free(rb);
 978                         cl->status = -ENOMEM;
 979                         goto    eoi;
 980                 }
 981 
 982                 /*
 983                  * If message buffer overflown (exceeds max. client msg
 984                  * size, drop message and return to free buffer.
 985                  * Do we need to disconnect such a client? (We don't send
 986                  * back FC, so communication will be stuck anyway)
 987                  */
 988                 if (rb->buffer.size < hbm->msg_length) {
 989                         spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
 990                         dev_err(&cl->device->dev,
 991                                 "message overflow. size %d len %d idx %ld\n",
 992                                 rb->buffer.size, hbm->msg_length, rb->buf_idx);
 993                         list_del(&rb->list);
 994                         ishtp_cl_io_rb_recycle(rb);
 995                         cl->status = -EIO;
 996                         goto    eoi;
 997                 }
 998 
 999                 buffer = rb->buffer.data;
1000                 memcpy(buffer, msg, hbm->msg_length);
1001                 rb->buf_idx = hbm->msg_length;
1002 
1003                 /* Last fragment in message - it's complete */
1004                 cl->status = 0;
1005                 list_del(&rb->list);
1006                 complete_rb = rb;
1007 
1008                 --cl->out_flow_ctrl_creds;
1009                 /*
1010                  * the whole msg arrived, send a new FC, and add a new
1011                  * rb buffer for the next coming msg
1012                  */
1013                 spin_lock(&cl->free_list_spinlock);
1014 
1015                 if (!list_empty(&cl->free_rb_list.list)) {
1016                         new_rb = list_entry(cl->free_rb_list.list.next,
1017                                 struct ishtp_cl_rb, list);
1018                         list_del_init(&new_rb->list);
1019                         spin_unlock(&cl->free_list_spinlock);
1020                         new_rb->cl = cl;
1021                         new_rb->buf_idx = 0;
1022                         INIT_LIST_HEAD(&new_rb->list);
1023                         list_add_tail(&new_rb->list,
1024                                 &dev->read_list.list);
1025 
1026                         ishtp_hbm_cl_flow_control_req(dev, cl);
1027                 } else {
1028                         spin_unlock(&cl->free_list_spinlock);
1029                 }
1030 
1031                 /* One more fragment in message (this is always last) */
1032                 ++cl->recv_msg_num_frags;
1033 
1034                 /*
1035                  * We can safely break here (and in BH too),
1036                  * a single input message can go only to a single request!
1037                  */
1038                 break;
1039         }
1040 
1041         spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1042         /* If it's nobody's message, just read and discard it */
1043         if (!buffer) {
1044                 dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1045                 goto    eoi;
1046         }
1047 
1048         if (complete_rb) {
1049                 cl = complete_rb->cl;
1050                 cl->ts_rx = ktime_get();
1051                 ++cl->recv_msg_cnt_dma;
1052                 ishtp_cl_read_complete(complete_rb);
1053         }
1054 eoi:
1055         return;
1056 }
1057 
1058 void *ishtp_get_client_data(struct ishtp_cl *cl)
1059 {
1060         return cl->client_data;
1061 }
1062 EXPORT_SYMBOL(ishtp_get_client_data);
1063 
1064 void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
1065 {
1066         cl->client_data = data;
1067 }
1068 EXPORT_SYMBOL(ishtp_set_client_data);
1069 
1070 struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
1071 {
1072         return cl->dev;
1073 }
1074 EXPORT_SYMBOL(ishtp_get_ishtp_device);
1075 
1076 void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
1077 {
1078         cl->tx_ring_size = size;
1079 }
1080 EXPORT_SYMBOL(ishtp_set_tx_ring_size);
1081 
1082 void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
1083 {
1084         cl->rx_ring_size = size;
1085 }
1086 EXPORT_SYMBOL(ishtp_set_rx_ring_size);
1087 
1088 void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
1089 {
1090         cl->state = state;
1091 }
1092 EXPORT_SYMBOL(ishtp_set_connection_state);
1093 
1094 void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
1095 {
1096         cl->fw_client_id = fw_client_id;
1097 }
1098 EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);

/* [<][>][^][v][top][bottom][index][help] */