root/drivers/s390/cio/ccwreq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lpm_adjust
  2. ccwreq_next_path
  3. ccwreq_stop
  4. ccwreq_do
  5. ccw_request_start
  6. ccw_request_cancel
  7. ccwreq_status
  8. ccwreq_log_status
  9. ccw_request_handler
  10. ccw_request_timeout
  11. ccw_request_notoper

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  Handling of internal CCW device requests.
   4  *
   5  *    Copyright IBM Corp. 2009, 2011
   6  *    Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
   7  */
   8 
   9 #define KMSG_COMPONENT "cio"
  10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11 
  12 #include <linux/types.h>
  13 #include <linux/err.h>
  14 #include <asm/ccwdev.h>
  15 #include <asm/cio.h>
  16 
  17 #include "io_sch.h"
  18 #include "cio.h"
  19 #include "device.h"
  20 #include "cio_debug.h"
  21 
  22 /**
  23  * lpm_adjust - adjust path mask
  24  * @lpm: path mask to adjust
  25  * @mask: mask of available paths
  26  *
  27  * Shift @lpm right until @lpm and @mask have at least one bit in common or
  28  * until @lpm is zero. Return the resulting lpm.
  29  */
  30 int lpm_adjust(int lpm, int mask)
  31 {
  32         while (lpm && ((lpm & mask) == 0))
  33                 lpm >>= 1;
  34         return lpm;
  35 }
  36 
  37 /*
  38  * Adjust path mask to use next path and reset retry count. Return resulting
  39  * path mask.
  40  */
  41 static u16 ccwreq_next_path(struct ccw_device *cdev)
  42 {
  43         struct ccw_request *req = &cdev->private->req;
  44 
  45         if (!req->singlepath) {
  46                 req->mask = 0;
  47                 goto out;
  48         }
  49         req->retries    = req->maxretries;
  50         req->mask       = lpm_adjust(req->mask >> 1, req->lpm);
  51 out:
  52         return req->mask;
  53 }
  54 
  55 /*
  56  * Clean up device state and report to callback.
  57  */
  58 static void ccwreq_stop(struct ccw_device *cdev, int rc)
  59 {
  60         struct ccw_request *req = &cdev->private->req;
  61 
  62         if (req->done)
  63                 return;
  64         req->done = 1;
  65         ccw_device_set_timeout(cdev, 0);
  66         memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
  67         if (rc && rc != -ENODEV && req->drc)
  68                 rc = req->drc;
  69         req->callback(cdev, req->data, rc);
  70 }
  71 
  72 /*
  73  * (Re-)Start the operation until retries and paths are exhausted.
  74  */
  75 static void ccwreq_do(struct ccw_device *cdev)
  76 {
  77         struct ccw_request *req = &cdev->private->req;
  78         struct subchannel *sch = to_subchannel(cdev->dev.parent);
  79         struct ccw1 *cp = req->cp;
  80         int rc = -EACCES;
  81 
  82         while (req->mask) {
  83                 if (req->retries-- == 0) {
  84                         /* Retries exhausted, try next path. */
  85                         ccwreq_next_path(cdev);
  86                         continue;
  87                 }
  88                 /* Perform start function. */
  89                 memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
  90                 rc = cio_start(sch, cp, (u8) req->mask);
  91                 if (rc == 0) {
  92                         /* I/O started successfully. */
  93                         ccw_device_set_timeout(cdev, req->timeout);
  94                         return;
  95                 }
  96                 if (rc == -ENODEV) {
  97                         /* Permanent device error. */
  98                         break;
  99                 }
 100                 if (rc == -EACCES) {
 101                         /* Permant path error. */
 102                         ccwreq_next_path(cdev);
 103                         continue;
 104                 }
 105                 /* Temporary improper status. */
 106                 rc = cio_clear(sch);
 107                 if (rc)
 108                         break;
 109                 return;
 110         }
 111         ccwreq_stop(cdev, rc);
 112 }
 113 
 114 /**
 115  * ccw_request_start - perform I/O request
 116  * @cdev: ccw device
 117  *
 118  * Perform the I/O request specified by cdev->req.
 119  */
 120 void ccw_request_start(struct ccw_device *cdev)
 121 {
 122         struct ccw_request *req = &cdev->private->req;
 123 
 124         if (req->singlepath) {
 125                 /* Try all paths twice to counter link flapping. */
 126                 req->mask = 0x8080;
 127         } else
 128                 req->mask = req->lpm;
 129 
 130         req->retries    = req->maxretries;
 131         req->mask       = lpm_adjust(req->mask, req->lpm);
 132         req->drc        = 0;
 133         req->done       = 0;
 134         req->cancel     = 0;
 135         if (!req->mask)
 136                 goto out_nopath;
 137         ccwreq_do(cdev);
 138         return;
 139 
 140 out_nopath:
 141         ccwreq_stop(cdev, -EACCES);
 142 }
 143 
 144 /**
 145  * ccw_request_cancel - cancel running I/O request
 146  * @cdev: ccw device
 147  *
 148  * Cancel the I/O request specified by cdev->req. Return non-zero if request
 149  * has already finished, zero otherwise.
 150  */
 151 int ccw_request_cancel(struct ccw_device *cdev)
 152 {
 153         struct subchannel *sch = to_subchannel(cdev->dev.parent);
 154         struct ccw_request *req = &cdev->private->req;
 155         int rc;
 156 
 157         if (req->done)
 158                 return 1;
 159         req->cancel = 1;
 160         rc = cio_clear(sch);
 161         if (rc)
 162                 ccwreq_stop(cdev, rc);
 163         return 0;
 164 }
 165 
 166 /*
 167  * Return the status of the internal I/O started on the specified ccw device.
 168  * Perform BASIC SENSE if required.
 169  */
 170 static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
 171 {
 172         struct irb *irb = &cdev->private->dma_area->irb;
 173         struct cmd_scsw *scsw = &irb->scsw.cmd;
 174         enum uc_todo todo;
 175 
 176         /* Perform BASIC SENSE if needed. */
 177         if (ccw_device_accumulate_and_sense(cdev, lcirb))
 178                 return IO_RUNNING;
 179         /* Check for halt/clear interrupt. */
 180         if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
 181                 return IO_KILLED;
 182         /* Check for path error. */
 183         if (scsw->cc == 3 || scsw->pno)
 184                 return IO_PATH_ERROR;
 185         /* Handle BASIC SENSE data. */
 186         if (irb->esw.esw0.erw.cons) {
 187                 CIO_TRACE_EVENT(2, "sensedata");
 188                 CIO_HEX_EVENT(2, &cdev->private->dev_id,
 189                               sizeof(struct ccw_dev_id));
 190                 CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
 191                               SENSE_MAX_COUNT);
 192                 /* Check for command reject. */
 193                 if (irb->ecw[0] & SNS0_CMD_REJECT)
 194                         return IO_REJECTED;
 195                 /* Ask the driver what to do */
 196                 if (cdev->drv && cdev->drv->uc_handler) {
 197                         todo = cdev->drv->uc_handler(cdev, lcirb);
 198                         CIO_TRACE_EVENT(2, "uc_response");
 199                         CIO_HEX_EVENT(2, &todo, sizeof(todo));
 200                         switch (todo) {
 201                         case UC_TODO_RETRY:
 202                                 return IO_STATUS_ERROR;
 203                         case UC_TODO_RETRY_ON_NEW_PATH:
 204                                 return IO_PATH_ERROR;
 205                         case UC_TODO_STOP:
 206                                 return IO_REJECTED;
 207                         default:
 208                                 return IO_STATUS_ERROR;
 209                         }
 210                 }
 211                 /* Assume that unexpected SENSE data implies an error. */
 212                 return IO_STATUS_ERROR;
 213         }
 214         /* Check for channel errors. */
 215         if (scsw->cstat != 0)
 216                 return IO_STATUS_ERROR;
 217         /* Check for device errors. */
 218         if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
 219                 return IO_STATUS_ERROR;
 220         /* Check for final state. */
 221         if (!(scsw->dstat & DEV_STAT_DEV_END))
 222                 return IO_RUNNING;
 223         /* Check for other improper status. */
 224         if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
 225                 return IO_STATUS_ERROR;
 226         return IO_DONE;
 227 }
 228 
 229 /*
 230  * Log ccw request status.
 231  */
 232 static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
 233 {
 234         struct ccw_request *req = &cdev->private->req;
 235         struct {
 236                 struct ccw_dev_id dev_id;
 237                 u16 retries;
 238                 u8 lpm;
 239                 u8 status;
 240         }  __attribute__ ((packed)) data;
 241         data.dev_id     = cdev->private->dev_id;
 242         data.retries    = req->retries;
 243         data.lpm        = (u8) req->mask;
 244         data.status     = (u8) status;
 245         CIO_TRACE_EVENT(2, "reqstat");
 246         CIO_HEX_EVENT(2, &data, sizeof(data));
 247 }
 248 
 249 /**
 250  * ccw_request_handler - interrupt handler for I/O request procedure.
 251  * @cdev: ccw device
 252  *
 253  * Handle interrupt during I/O request procedure.
 254  */
 255 void ccw_request_handler(struct ccw_device *cdev)
 256 {
 257         struct irb *irb = this_cpu_ptr(&cio_irb);
 258         struct ccw_request *req = &cdev->private->req;
 259         enum io_status status;
 260         int rc = -EOPNOTSUPP;
 261 
 262         /* Check status of I/O request. */
 263         status = ccwreq_status(cdev, irb);
 264         if (req->filter)
 265                 status = req->filter(cdev, req->data, irb, status);
 266         if (status != IO_RUNNING)
 267                 ccw_device_set_timeout(cdev, 0);
 268         if (status != IO_DONE && status != IO_RUNNING)
 269                 ccwreq_log_status(cdev, status);
 270         switch (status) {
 271         case IO_DONE:
 272                 break;
 273         case IO_RUNNING:
 274                 return;
 275         case IO_REJECTED:
 276                 goto err;
 277         case IO_PATH_ERROR:
 278                 goto out_next_path;
 279         case IO_STATUS_ERROR:
 280                 goto out_restart;
 281         case IO_KILLED:
 282                 /* Check if request was cancelled on purpose. */
 283                 if (req->cancel) {
 284                         rc = -EIO;
 285                         goto err;
 286                 }
 287                 goto out_restart;
 288         }
 289         /* Check back with request initiator. */
 290         if (!req->check)
 291                 goto out;
 292         switch (req->check(cdev, req->data)) {
 293         case 0:
 294                 break;
 295         case -EAGAIN:
 296                 goto out_restart;
 297         case -EACCES:
 298                 goto out_next_path;
 299         default:
 300                 goto err;
 301         }
 302 out:
 303         ccwreq_stop(cdev, 0);
 304         return;
 305 
 306 out_next_path:
 307         /* Try next path and restart I/O. */
 308         if (!ccwreq_next_path(cdev)) {
 309                 rc = -EACCES;
 310                 goto err;
 311         }
 312 out_restart:
 313         /* Restart. */
 314         ccwreq_do(cdev);
 315         return;
 316 err:
 317         ccwreq_stop(cdev, rc);
 318 }
 319 
 320 
 321 /**
 322  * ccw_request_timeout - timeout handler for I/O request procedure
 323  * @cdev: ccw device
 324  *
 325  * Handle timeout during I/O request procedure.
 326  */
 327 void ccw_request_timeout(struct ccw_device *cdev)
 328 {
 329         struct subchannel *sch = to_subchannel(cdev->dev.parent);
 330         struct ccw_request *req = &cdev->private->req;
 331         int rc = -ENODEV, chp;
 332 
 333         if (cio_update_schib(sch))
 334                 goto err;
 335 
 336         for (chp = 0; chp < 8; chp++) {
 337                 if ((0x80 >> chp) & sch->schib.pmcw.lpum)
 338                         pr_warn("%s: No interrupt was received within %lus (CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
 339                                 dev_name(&cdev->dev), req->timeout / HZ,
 340                                 scsw_cstat(&sch->schib.scsw),
 341                                 scsw_dstat(&sch->schib.scsw),
 342                                 sch->schid.cssid,
 343                                 sch->schib.pmcw.chpid[chp]);
 344         }
 345 
 346         if (!ccwreq_next_path(cdev)) {
 347                 /* set the final return code for this request */
 348                 req->drc = -ETIME;
 349         }
 350         rc = cio_clear(sch);
 351         if (rc)
 352                 goto err;
 353         return;
 354 
 355 err:
 356         ccwreq_stop(cdev, rc);
 357 }
 358 
 359 /**
 360  * ccw_request_notoper - notoper handler for I/O request procedure
 361  * @cdev: ccw device
 362  *
 363  * Handle notoper during I/O request procedure.
 364  */
 365 void ccw_request_notoper(struct ccw_device *cdev)
 366 {
 367         ccwreq_stop(cdev, -ENODEV);
 368 }

/* [<][>][^][v][top][bottom][index][help] */