root/drivers/char/tpm/tpm-dev-common.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tpm_dev_transmit
  2. tpm_dev_async_work
  3. user_reader_timeout
  4. tpm_timeout_work
  5. tpm_common_open
  6. tpm_common_read
  7. tpm_common_write
  8. tpm_common_poll
  9. tpm_common_release
  10. tpm_dev_common_init
  11. tpm_dev_common_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2004 IBM Corporation
   4  * Authors:
   5  * Leendert van Doorn <leendert@watson.ibm.com>
   6  * Dave Safford <safford@watson.ibm.com>
   7  * Reiner Sailer <sailer@watson.ibm.com>
   8  * Kylene Hall <kjhall@us.ibm.com>
   9  *
  10  * Copyright (C) 2013 Obsidian Research Corp
  11  * Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
  12  *
  13  * Device file system interface to the TPM
  14  */
  15 #include <linux/poll.h>
  16 #include <linux/slab.h>
  17 #include <linux/uaccess.h>
  18 #include <linux/workqueue.h>
  19 #include "tpm.h"
  20 #include "tpm-dev.h"
  21 
  22 static struct workqueue_struct *tpm_dev_wq;
  23 static DEFINE_MUTEX(tpm_dev_wq_lock);
  24 
  25 static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space,
  26                                 u8 *buf, size_t bufsiz)
  27 {
  28         struct tpm_header *header = (void *)buf;
  29         ssize_t ret, len;
  30 
  31         ret = tpm2_prepare_space(chip, space, buf, bufsiz);
  32         /* If the command is not implemented by the TPM, synthesize a
  33          * response with a TPM2_RC_COMMAND_CODE return for user-space.
  34          */
  35         if (ret == -EOPNOTSUPP) {
  36                 header->length = cpu_to_be32(sizeof(*header));
  37                 header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
  38                 header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
  39                                                   TSS2_RESMGR_TPM_RC_LAYER);
  40                 ret = sizeof(*header);
  41         }
  42         if (ret)
  43                 goto out_rc;
  44 
  45         len = tpm_transmit(chip, buf, bufsiz);
  46         if (len < 0)
  47                 ret = len;
  48 
  49         if (!ret)
  50                 ret = tpm2_commit_space(chip, space, buf, &len);
  51 
  52 out_rc:
  53         return ret ? ret : len;
  54 }
  55 
  56 static void tpm_dev_async_work(struct work_struct *work)
  57 {
  58         struct file_priv *priv =
  59                         container_of(work, struct file_priv, async_work);
  60         ssize_t ret;
  61 
  62         mutex_lock(&priv->buffer_mutex);
  63         priv->command_enqueued = false;
  64         ret = tpm_try_get_ops(priv->chip);
  65         if (ret) {
  66                 priv->response_length = ret;
  67                 goto out;
  68         }
  69 
  70         ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
  71                                sizeof(priv->data_buffer));
  72         tpm_put_ops(priv->chip);
  73         if (ret > 0) {
  74                 priv->response_length = ret;
  75                 mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
  76         }
  77 out:
  78         mutex_unlock(&priv->buffer_mutex);
  79         wake_up_interruptible(&priv->async_wait);
  80 }
  81 
  82 static void user_reader_timeout(struct timer_list *t)
  83 {
  84         struct file_priv *priv = from_timer(priv, t, user_read_timer);
  85 
  86         pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
  87                 task_tgid_nr(current));
  88 
  89         schedule_work(&priv->timeout_work);
  90 }
  91 
  92 static void tpm_timeout_work(struct work_struct *work)
  93 {
  94         struct file_priv *priv = container_of(work, struct file_priv,
  95                                               timeout_work);
  96 
  97         mutex_lock(&priv->buffer_mutex);
  98         priv->response_read = true;
  99         priv->response_length = 0;
 100         memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
 101         mutex_unlock(&priv->buffer_mutex);
 102         wake_up_interruptible(&priv->async_wait);
 103 }
 104 
 105 void tpm_common_open(struct file *file, struct tpm_chip *chip,
 106                      struct file_priv *priv, struct tpm_space *space)
 107 {
 108         priv->chip = chip;
 109         priv->space = space;
 110         priv->response_read = true;
 111 
 112         mutex_init(&priv->buffer_mutex);
 113         timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
 114         INIT_WORK(&priv->timeout_work, tpm_timeout_work);
 115         INIT_WORK(&priv->async_work, tpm_dev_async_work);
 116         init_waitqueue_head(&priv->async_wait);
 117         file->private_data = priv;
 118 }
 119 
 120 ssize_t tpm_common_read(struct file *file, char __user *buf,
 121                         size_t size, loff_t *off)
 122 {
 123         struct file_priv *priv = file->private_data;
 124         ssize_t ret_size = 0;
 125         int rc;
 126 
 127         mutex_lock(&priv->buffer_mutex);
 128 
 129         if (priv->response_length) {
 130                 priv->response_read = true;
 131 
 132                 ret_size = min_t(ssize_t, size, priv->response_length);
 133                 if (ret_size <= 0) {
 134                         priv->response_length = 0;
 135                         goto out;
 136                 }
 137 
 138                 rc = copy_to_user(buf, priv->data_buffer + *off, ret_size);
 139                 if (rc) {
 140                         memset(priv->data_buffer, 0, TPM_BUFSIZE);
 141                         priv->response_length = 0;
 142                         ret_size = -EFAULT;
 143                 } else {
 144                         memset(priv->data_buffer + *off, 0, ret_size);
 145                         priv->response_length -= ret_size;
 146                         *off += ret_size;
 147                 }
 148         }
 149 
 150 out:
 151         if (!priv->response_length) {
 152                 *off = 0;
 153                 del_singleshot_timer_sync(&priv->user_read_timer);
 154                 flush_work(&priv->timeout_work);
 155         }
 156         mutex_unlock(&priv->buffer_mutex);
 157         return ret_size;
 158 }
 159 
 160 ssize_t tpm_common_write(struct file *file, const char __user *buf,
 161                          size_t size, loff_t *off)
 162 {
 163         struct file_priv *priv = file->private_data;
 164         int ret = 0;
 165 
 166         if (size > TPM_BUFSIZE)
 167                 return -E2BIG;
 168 
 169         mutex_lock(&priv->buffer_mutex);
 170 
 171         /* Cannot perform a write until the read has cleared either via
 172          * tpm_read or a user_read_timer timeout. This also prevents split
 173          * buffered writes from blocking here.
 174          */
 175         if ((!priv->response_read && priv->response_length) ||
 176             priv->command_enqueued) {
 177                 ret = -EBUSY;
 178                 goto out;
 179         }
 180 
 181         if (copy_from_user(priv->data_buffer, buf, size)) {
 182                 ret = -EFAULT;
 183                 goto out;
 184         }
 185 
 186         if (size < 6 ||
 187             size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
 188                 ret = -EINVAL;
 189                 goto out;
 190         }
 191 
 192         /* atomic tpm command send and result receive. We only hold the ops
 193          * lock during this period so that the tpm can be unregistered even if
 194          * the char dev is held open.
 195          */
 196         if (tpm_try_get_ops(priv->chip)) {
 197                 ret = -EPIPE;
 198                 goto out;
 199         }
 200 
 201         priv->response_length = 0;
 202         priv->response_read = false;
 203         *off = 0;
 204 
 205         /*
 206          * If in nonblocking mode schedule an async job to send
 207          * the command return the size.
 208          * In case of error the err code will be returned in
 209          * the subsequent read call.
 210          */
 211         if (file->f_flags & O_NONBLOCK) {
 212                 priv->command_enqueued = true;
 213                 queue_work(tpm_dev_wq, &priv->async_work);
 214                 tpm_put_ops(priv->chip);
 215                 mutex_unlock(&priv->buffer_mutex);
 216                 return size;
 217         }
 218 
 219         ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
 220                                sizeof(priv->data_buffer));
 221         tpm_put_ops(priv->chip);
 222 
 223         if (ret > 0) {
 224                 priv->response_length = ret;
 225                 mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
 226                 ret = size;
 227         }
 228 out:
 229         mutex_unlock(&priv->buffer_mutex);
 230         return ret;
 231 }
 232 
 233 __poll_t tpm_common_poll(struct file *file, poll_table *wait)
 234 {
 235         struct file_priv *priv = file->private_data;
 236         __poll_t mask = 0;
 237 
 238         poll_wait(file, &priv->async_wait, wait);
 239         mutex_lock(&priv->buffer_mutex);
 240 
 241         /*
 242          * The response_length indicates if there is still response
 243          * (or part of it) to be consumed. Partial reads decrease it
 244          * by the number of bytes read, and write resets it the zero.
 245          */
 246         if (priv->response_length)
 247                 mask = EPOLLIN | EPOLLRDNORM;
 248         else
 249                 mask = EPOLLOUT | EPOLLWRNORM;
 250 
 251         mutex_unlock(&priv->buffer_mutex);
 252         return mask;
 253 }
 254 
 255 /*
 256  * Called on file close
 257  */
 258 void tpm_common_release(struct file *file, struct file_priv *priv)
 259 {
 260         flush_work(&priv->async_work);
 261         del_singleshot_timer_sync(&priv->user_read_timer);
 262         flush_work(&priv->timeout_work);
 263         file->private_data = NULL;
 264         priv->response_length = 0;
 265 }
 266 
 267 int __init tpm_dev_common_init(void)
 268 {
 269         tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
 270 
 271         return !tpm_dev_wq ? -ENOMEM : 0;
 272 }
 273 
 274 void __exit tpm_dev_common_exit(void)
 275 {
 276         if (tpm_dev_wq) {
 277                 destroy_workqueue(tpm_dev_wq);
 278                 tpm_dev_wq = NULL;
 279         }
 280 }

/* [<][>][^][v][top][bottom][index][help] */