root/drivers/char/agp/isoch.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. agp_3_5_dev_list_insert
  2. agp_3_5_dev_list_sort
  3. agp_3_5_isochronous_node_enable
  4. agp_3_5_nonisochronous_node_enable
  5. agp_3_5_enable

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Setup routines for AGP 3.5 compliant bridges.
   4  */
   5 
   6 #include <linux/list.h>
   7 #include <linux/pci.h>
   8 #include <linux/agp_backend.h>
   9 #include <linux/module.h>
  10 #include <linux/slab.h>
  11 
  12 #include "agp.h"
  13 
  14 /* Generic AGP 3.5 enabling routines */
  15 
  16 struct agp_3_5_dev {
  17         struct list_head list;
  18         u8 capndx;
  19         u32 maxbw;
  20         struct pci_dev *dev;
  21 };
  22 
  23 static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
  24 {
  25         struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
  26         struct list_head *pos;
  27 
  28         list_for_each(pos, head) {
  29                 cur = list_entry(pos, struct agp_3_5_dev, list);
  30                 if (cur->maxbw > n->maxbw)
  31                         break;
  32         }
  33         list_add_tail(new, pos);
  34 }
  35 
  36 static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
  37 {
  38         struct agp_3_5_dev *cur;
  39         struct pci_dev *dev;
  40         struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
  41         u32 nistat;
  42 
  43         INIT_LIST_HEAD(head);
  44 
  45         for (pos=start; pos!=head; ) {
  46                 cur = list_entry(pos, struct agp_3_5_dev, list);
  47                 dev = cur->dev;
  48 
  49                 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
  50                 cur->maxbw = (nistat >> 16) & 0xff;
  51 
  52                 tmp = pos;
  53                 pos = pos->next;
  54                 agp_3_5_dev_list_insert(head, tmp);
  55         }
  56 }
  57 
  58 /*
  59  * Initialize all isochronous transfer parameters for an AGP 3.0
  60  * node (i.e. a host bridge in combination with the adapters
  61  * lying behind it...)
  62  */
  63 
  64 static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
  65                 struct agp_3_5_dev *dev_list, unsigned int ndevs)
  66 {
  67         /*
  68          * Convenience structure to make the calculations clearer
  69          * here.  The field names come straight from the AGP 3.0 spec.
  70          */
  71         struct isoch_data {
  72                 u32 maxbw;
  73                 u32 n;
  74                 u32 y;
  75                 u32 l;
  76                 u32 rq;
  77                 struct agp_3_5_dev *dev;
  78         };
  79 
  80         struct pci_dev *td = bridge->dev, *dev;
  81         struct list_head *head = &dev_list->list, *pos;
  82         struct agp_3_5_dev *cur;
  83         struct isoch_data *master, target;
  84         unsigned int cdev = 0;
  85         u32 mnistat, tnistat, tstatus, mcmd;
  86         u16 tnicmd, mnicmd;
  87         u8 mcapndx;
  88         u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
  89         u32 step, rem, rem_isoch, rem_async;
  90         int ret = 0;
  91 
  92         /*
  93          * We'll work with an array of isoch_data's (one for each
  94          * device in dev_list) throughout this function.
  95          */
  96         master = kmalloc_array(ndevs, sizeof(*master), GFP_KERNEL);
  97         if (master == NULL) {
  98                 ret = -ENOMEM;
  99                 goto get_out;
 100         }
 101 
 102         /*
 103          * Sort the device list by maxbw.  We need to do this because the
 104          * spec suggests that the devices with the smallest requirements
 105          * have their resources allocated first, with all remaining resources
 106          * falling to the device with the largest requirement.
 107          *
 108          * We don't exactly do this, we divide target resources by ndevs
 109          * and split them amongst the AGP 3.0 devices.  The remainder of such
 110          * division operations are dropped on the last device, sort of like
 111          * the spec mentions it should be done.
 112          *
 113          * We can't do this sort when we initially construct the dev_list
 114          * because we don't know until this function whether isochronous
 115          * transfers are enabled and consequently whether maxbw will mean
 116          * anything.
 117          */
 118         agp_3_5_dev_list_sort(dev_list, ndevs);
 119 
 120         pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
 121         pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
 122 
 123         /* Extract power-on defaults from the target */
 124         target.maxbw = (tnistat >> 16) & 0xff;
 125         target.n     = (tnistat >> 8)  & 0xff;
 126         target.y     = (tnistat >> 6)  & 0x3;
 127         target.l     = (tnistat >> 3)  & 0x7;
 128         target.rq    = (tstatus >> 24) & 0xff;
 129 
 130         y_max = target.y;
 131 
 132         /*
 133          * Extract power-on defaults for each device in dev_list.  Along
 134          * the way, calculate the total isochronous bandwidth required
 135          * by these devices and the largest requested payload size.
 136          */
 137         list_for_each(pos, head) {
 138                 cur = list_entry(pos, struct agp_3_5_dev, list);
 139                 dev = cur->dev;
 140 
 141                 mcapndx = cur->capndx;
 142 
 143                 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
 144 
 145                 master[cdev].maxbw = (mnistat >> 16) & 0xff;
 146                 master[cdev].n     = (mnistat >> 8)  & 0xff;
 147                 master[cdev].y     = (mnistat >> 6)  & 0x3;
 148                 master[cdev].dev   = cur;
 149 
 150                 tot_bw += master[cdev].maxbw;
 151                 y_max = max(y_max, master[cdev].y);
 152 
 153                 cdev++;
 154         }
 155 
 156         /* Check if this configuration has any chance of working */
 157         if (tot_bw > target.maxbw) {
 158                 dev_err(&td->dev, "isochronous bandwidth required "
 159                         "by AGP 3.0 devices exceeds that which is supported by "
 160                         "the AGP 3.0 bridge!\n");
 161                 ret = -ENODEV;
 162                 goto free_and_exit;
 163         }
 164 
 165         target.y = y_max;
 166 
 167         /*
 168          * Write the calculated payload size into the target's NICMD
 169          * register.  Doing this directly effects the ISOCH_N value
 170          * in the target's NISTAT register, so we need to do this now
 171          * to get an accurate value for ISOCH_N later.
 172          */
 173         pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
 174         tnicmd &= ~(0x3 << 6);
 175         tnicmd |= target.y << 6;
 176         pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
 177 
 178         /* Reread the target's ISOCH_N */
 179         pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
 180         target.n = (tnistat >> 8) & 0xff;
 181 
 182         /* Calculate the minimum ISOCH_N needed by each master */
 183         for (cdev=0; cdev<ndevs; cdev++) {
 184                 master[cdev].y = target.y;
 185                 master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
 186 
 187                 tot_n += master[cdev].n;
 188         }
 189 
 190         /* Exit if the minimal ISOCH_N allocation among the masters is more
 191          * than the target can handle. */
 192         if (tot_n > target.n) {
 193                 dev_err(&td->dev, "number of isochronous "
 194                         "transactions per period required by AGP 3.0 devices "
 195                         "exceeds that which is supported by the AGP 3.0 "
 196                         "bridge!\n");
 197                 ret = -ENODEV;
 198                 goto free_and_exit;
 199         }
 200 
 201         /* Calculate left over ISOCH_N capability in the target.  We'll give
 202          * this to the hungriest device (as per the spec) */
 203         rem  = target.n - tot_n;
 204 
 205         /*
 206          * Calculate the minimum isochronous RQ depth needed by each master.
 207          * Along the way, distribute the extra ISOCH_N capability calculated
 208          * above.
 209          */
 210         for (cdev=0; cdev<ndevs; cdev++) {
 211                 /*
 212                  * This is a little subtle.  If ISOCH_Y > 64B, then ISOCH_Y
 213                  * byte isochronous writes will be broken into 64B pieces.
 214                  * This means we need to budget more RQ depth to account for
 215                  * these kind of writes (each isochronous write is actually
 216                  * many writes on the AGP bus).
 217                  */
 218                 master[cdev].rq = master[cdev].n;
 219                 if (master[cdev].y > 0x1)
 220                         master[cdev].rq *= (1 << (master[cdev].y - 1));
 221 
 222                 tot_rq += master[cdev].rq;
 223         }
 224         master[ndevs-1].n += rem;
 225 
 226         /* Figure the number of isochronous and asynchronous RQ slots the
 227          * target is providing. */
 228         rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
 229         rq_async = target.rq - rq_isoch;
 230 
 231         /* Exit if the minimal RQ needs of the masters exceeds what the target
 232          * can provide. */
 233         if (tot_rq > rq_isoch) {
 234                 dev_err(&td->dev, "number of request queue slots "
 235                         "required by the isochronous bandwidth requested by "
 236                         "AGP 3.0 devices exceeds the number provided by the "
 237                         "AGP 3.0 bridge!\n");
 238                 ret = -ENODEV;
 239                 goto free_and_exit;
 240         }
 241 
 242         /* Calculate asynchronous RQ capability in the target (per master) as
 243          * well as the total number of leftover isochronous RQ slots. */
 244         step      = rq_async / ndevs;
 245         rem_async = step + (rq_async % ndevs);
 246         rem_isoch = rq_isoch - tot_rq;
 247 
 248         /* Distribute the extra RQ slots calculated above and write our
 249          * isochronous settings out to the actual devices. */
 250         for (cdev=0; cdev<ndevs; cdev++) {
 251                 cur = master[cdev].dev;
 252                 dev = cur->dev;
 253 
 254                 mcapndx = cur->capndx;
 255 
 256                 master[cdev].rq += (cdev == ndevs - 1)
 257                               ? (rem_async + rem_isoch) : step;
 258 
 259                 pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
 260                 pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
 261 
 262                 mnicmd &= ~(0xff << 8);
 263                 mnicmd &= ~(0x3  << 6);
 264                 mcmd   &= ~(0xff << 24);
 265 
 266                 mnicmd |= master[cdev].n  << 8;
 267                 mnicmd |= master[cdev].y  << 6;
 268                 mcmd   |= master[cdev].rq << 24;
 269 
 270                 pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
 271                 pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
 272         }
 273 
 274 free_and_exit:
 275         kfree(master);
 276 
 277 get_out:
 278         return ret;
 279 }
 280 
 281 /*
 282  * This function basically allocates request queue slots among the
 283  * AGP 3.0 systems in nonisochronous nodes.  The algorithm is
 284  * pretty stupid, divide the total number of RQ slots provided by the
 285  * target by ndevs.  Distribute this many slots to each AGP 3.0 device,
 286  * giving any left over slots to the last device in dev_list.
 287  */
 288 static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
 289                 struct agp_3_5_dev *dev_list, unsigned int ndevs)
 290 {
 291         struct agp_3_5_dev *cur;
 292         struct list_head *head = &dev_list->list, *pos;
 293         u32 tstatus, mcmd;
 294         u32 trq, mrq, rem;
 295         unsigned int cdev = 0;
 296 
 297         pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
 298 
 299         trq = (tstatus >> 24) & 0xff;
 300         mrq = trq / ndevs;
 301 
 302         rem = mrq + (trq % ndevs);
 303 
 304         for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
 305                 cur = list_entry(pos, struct agp_3_5_dev, list);
 306 
 307                 pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
 308                 mcmd &= ~(0xff << 24);
 309                 mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
 310                 pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
 311         }
 312 }
 313 
 314 /*
 315  * Fully configure and enable an AGP 3.0 host bridge and all the devices
 316  * lying behind it.
 317  */
 318 int agp_3_5_enable(struct agp_bridge_data *bridge)
 319 {
 320         struct pci_dev *td = bridge->dev, *dev = NULL;
 321         u8 mcapndx;
 322         u32 isoch, arqsz;
 323         u32 tstatus, mstatus, ncapid;
 324         u32 mmajor;
 325         u16 mpstat;
 326         struct agp_3_5_dev *dev_list, *cur;
 327         struct list_head *head, *pos;
 328         unsigned int ndevs = 0;
 329         int ret = 0;
 330 
 331         /* Extract some power-on defaults from the target */
 332         pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
 333         isoch     = (tstatus >> 17) & 0x1;
 334         if (isoch == 0) /* isoch xfers not available, bail out. */
 335                 return -ENODEV;
 336 
 337         arqsz     = (tstatus >> 13) & 0x7;
 338 
 339         /*
 340          * Allocate a head for our AGP 3.5 device list
 341          * (multiple AGP v3 devices are allowed behind a single bridge).
 342          */
 343         if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
 344                 ret = -ENOMEM;
 345                 goto get_out;
 346         }
 347         head = &dev_list->list;
 348         INIT_LIST_HEAD(head);
 349 
 350         /* Find all AGP devices, and add them to dev_list. */
 351         for_each_pci_dev(dev) {
 352                 mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
 353                 if (mcapndx == 0)
 354                         continue;
 355 
 356                 switch ((dev->class >>8) & 0xff00) {
 357                         case 0x0600:    /* Bridge */
 358                                 /* Skip bridges. We should call this function for each one. */
 359                                 continue;
 360 
 361                         case 0x0001:    /* Unclassified device */
 362                                 /* Don't know what this is, but log it for investigation. */
 363                                 if (mcapndx != 0) {
 364                                         dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
 365                                                  pci_name(dev),
 366                                                  dev->vendor, dev->device);
 367                                 }
 368                                 continue;
 369 
 370                         case 0x0300:    /* Display controller */
 371                         case 0x0400:    /* Multimedia controller */
 372                                 if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
 373                                         ret = -ENOMEM;
 374                                         goto free_and_exit;
 375                                 }
 376                                 cur->dev = dev;
 377 
 378                                 pos = &cur->list;
 379                                 list_add(pos, head);
 380                                 ndevs++;
 381                                 continue;
 382 
 383                         default:
 384                                 continue;
 385                 }
 386         }
 387 
 388         /*
 389          * Take an initial pass through the devices lying behind our host
 390          * bridge.  Make sure each one is actually an AGP 3.0 device, otherwise
 391          * exit with an error message.  Along the way store the AGP 3.0
 392          * cap_ptr for each device
 393          */
 394         list_for_each(pos, head) {
 395                 cur = list_entry(pos, struct agp_3_5_dev, list);
 396                 dev = cur->dev;
 397 
 398                 pci_read_config_word(dev, PCI_STATUS, &mpstat);
 399                 if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
 400                         continue;
 401 
 402                 pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
 403                 if (mcapndx != 0) {
 404                         do {
 405                                 pci_read_config_dword(dev, mcapndx, &ncapid);
 406                                 if ((ncapid & 0xff) != 2)
 407                                         mcapndx = (ncapid >> 8) & 0xff;
 408                         }
 409                         while (((ncapid & 0xff) != 2) && (mcapndx != 0));
 410                 }
 411 
 412                 if (mcapndx == 0) {
 413                         dev_err(&td->dev, "woah!  Non-AGP device %s on "
 414                                 "secondary bus of AGP 3.5 bridge!\n",
 415                                 pci_name(dev));
 416                         ret = -ENODEV;
 417                         goto free_and_exit;
 418                 }
 419 
 420                 mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
 421                 if (mmajor < 3) {
 422                         dev_err(&td->dev, "woah!  AGP 2.0 device %s on "
 423                                 "secondary bus of AGP 3.5 bridge operating "
 424                                 "with AGP 3.0 electricals!\n", pci_name(dev));
 425                         ret = -ENODEV;
 426                         goto free_and_exit;
 427                 }
 428 
 429                 cur->capndx = mcapndx;
 430 
 431                 pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
 432 
 433                 if (((mstatus >> 3) & 0x1) == 0) {
 434                         dev_err(&td->dev, "woah!  AGP 3.x device %s not "
 435                                 "operating in AGP 3.x mode on secondary bus "
 436                                 "of AGP 3.5 bridge operating with AGP 3.0 "
 437                                 "electricals!\n", pci_name(dev));
 438                         ret = -ENODEV;
 439                         goto free_and_exit;
 440                 }
 441         }               
 442 
 443         /*
 444          * Call functions to divide target resources amongst the AGP 3.0
 445          * masters.  This process is dramatically different depending on
 446          * whether isochronous transfers are supported.
 447          */
 448         if (isoch) {
 449                 ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
 450                 if (ret) {
 451                         dev_info(&td->dev, "something bad happened setting "
 452                                  "up isochronous xfers; falling back to "
 453                                  "non-isochronous xfer mode\n");
 454                 } else {
 455                         goto free_and_exit;
 456                 }
 457         }
 458         agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
 459 
 460 free_and_exit:
 461         /* Be sure to free the dev_list */
 462         for (pos=head->next; pos!=head; ) {
 463                 cur = list_entry(pos, struct agp_3_5_dev, list);
 464 
 465                 pos = pos->next;
 466                 kfree(cur);
 467         }
 468         kfree(dev_list);
 469 
 470 get_out:
 471         return ret;
 472 }

/* [<][>][^][v][top][bottom][index][help] */