This source file includes following definitions.
- fw_iso_buffer_alloc
- fw_iso_buffer_map_dma
- fw_iso_buffer_init
- fw_iso_buffer_map_vma
- fw_iso_buffer_destroy
- fw_iso_buffer_lookup
- fw_iso_context_create
- fw_iso_context_destroy
- fw_iso_context_start
- fw_iso_context_set_channels
- fw_iso_context_queue
- fw_iso_context_queue_flush
- fw_iso_context_flush_completions
- fw_iso_context_stop
- manage_bandwidth
- manage_channel
- deallocate_channel
- fw_iso_resource_manage
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 #include <linux/dma-mapping.h>
  11 #include <linux/errno.h>
  12 #include <linux/firewire.h>
  13 #include <linux/firewire-constants.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/slab.h>
  17 #include <linux/spinlock.h>
  18 #include <linux/vmalloc.h>
  19 #include <linux/export.h>
  20 
  21 #include <asm/byteorder.h>
  22 
  23 #include "core.h"
  24 
  25 
  26 
  27 
  28 
  29 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
  30 {
  31         int i;
  32 
  33         buffer->page_count = 0;
  34         buffer->page_count_mapped = 0;
  35         buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]),
  36                                       GFP_KERNEL);
  37         if (buffer->pages == NULL)
  38                 return -ENOMEM;
  39 
  40         for (i = 0; i < page_count; i++) {
  41                 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
  42                 if (buffer->pages[i] == NULL)
  43                         break;
  44         }
  45         buffer->page_count = i;
  46         if (i < page_count) {
  47                 fw_iso_buffer_destroy(buffer, NULL);
  48                 return -ENOMEM;
  49         }
  50 
  51         return 0;
  52 }
  53 
  54 int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
  55                           enum dma_data_direction direction)
  56 {
  57         dma_addr_t address;
  58         int i;
  59 
  60         buffer->direction = direction;
  61 
  62         for (i = 0; i < buffer->page_count; i++) {
  63                 address = dma_map_page(card->device, buffer->pages[i],
  64                                        0, PAGE_SIZE, direction);
  65                 if (dma_mapping_error(card->device, address))
  66                         break;
  67 
  68                 set_page_private(buffer->pages[i], address);
  69         }
  70         buffer->page_count_mapped = i;
  71         if (i < buffer->page_count)
  72                 return -ENOMEM;
  73 
  74         return 0;
  75 }
  76 
  77 int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
  78                        int page_count, enum dma_data_direction direction)
  79 {
  80         int ret;
  81 
  82         ret = fw_iso_buffer_alloc(buffer, page_count);
  83         if (ret < 0)
  84                 return ret;
  85 
  86         ret = fw_iso_buffer_map_dma(buffer, card, direction);
  87         if (ret < 0)
  88                 fw_iso_buffer_destroy(buffer, card);
  89 
  90         return ret;
  91 }
  92 EXPORT_SYMBOL(fw_iso_buffer_init);
  93 
  94 int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
  95                           struct vm_area_struct *vma)
  96 {
  97         return vm_map_pages_zero(vma, buffer->pages,
  98                                         buffer->page_count);
  99 }
 100 
 101 void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
 102                            struct fw_card *card)
 103 {
 104         int i;
 105         dma_addr_t address;
 106 
 107         for (i = 0; i < buffer->page_count_mapped; i++) {
 108                 address = page_private(buffer->pages[i]);
 109                 dma_unmap_page(card->device, address,
 110                                PAGE_SIZE, buffer->direction);
 111         }
 112         for (i = 0; i < buffer->page_count; i++)
 113                 __free_page(buffer->pages[i]);
 114 
 115         kfree(buffer->pages);
 116         buffer->pages = NULL;
 117         buffer->page_count = 0;
 118         buffer->page_count_mapped = 0;
 119 }
 120 EXPORT_SYMBOL(fw_iso_buffer_destroy);
 121 
 122 
 123 size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
 124 {
 125         size_t i;
 126         dma_addr_t address;
 127         ssize_t offset;
 128 
 129         for (i = 0; i < buffer->page_count; i++) {
 130                 address = page_private(buffer->pages[i]);
 131                 offset = (ssize_t)completed - (ssize_t)address;
 132                 if (offset > 0 && offset <= PAGE_SIZE)
 133                         return (i << PAGE_SHIFT) + offset;
 134         }
 135 
 136         return 0;
 137 }
 138 
 139 struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
 140                 int type, int channel, int speed, size_t header_size,
 141                 fw_iso_callback_t callback, void *callback_data)
 142 {
 143         struct fw_iso_context *ctx;
 144 
 145         ctx = card->driver->allocate_iso_context(card,
 146                                                  type, channel, header_size);
 147         if (IS_ERR(ctx))
 148                 return ctx;
 149 
 150         ctx->card = card;
 151         ctx->type = type;
 152         ctx->channel = channel;
 153         ctx->speed = speed;
 154         ctx->header_size = header_size;
 155         ctx->callback.sc = callback;
 156         ctx->callback_data = callback_data;
 157 
 158         return ctx;
 159 }
 160 EXPORT_SYMBOL(fw_iso_context_create);
 161 
 162 void fw_iso_context_destroy(struct fw_iso_context *ctx)
 163 {
 164         ctx->card->driver->free_iso_context(ctx);
 165 }
 166 EXPORT_SYMBOL(fw_iso_context_destroy);
 167 
 168 int fw_iso_context_start(struct fw_iso_context *ctx,
 169                          int cycle, int sync, int tags)
 170 {
 171         return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
 172 }
 173 EXPORT_SYMBOL(fw_iso_context_start);
 174 
 175 int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
 176 {
 177         return ctx->card->driver->set_iso_channels(ctx, channels);
 178 }
 179 
 180 int fw_iso_context_queue(struct fw_iso_context *ctx,
 181                          struct fw_iso_packet *packet,
 182                          struct fw_iso_buffer *buffer,
 183                          unsigned long payload)
 184 {
 185         return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
 186 }
 187 EXPORT_SYMBOL(fw_iso_context_queue);
 188 
 189 void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
 190 {
 191         ctx->card->driver->flush_queue_iso(ctx);
 192 }
 193 EXPORT_SYMBOL(fw_iso_context_queue_flush);
 194 
 195 int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
 196 {
 197         return ctx->card->driver->flush_iso_completions(ctx);
 198 }
 199 EXPORT_SYMBOL(fw_iso_context_flush_completions);
 200 
 201 int fw_iso_context_stop(struct fw_iso_context *ctx)
 202 {
 203         return ctx->card->driver->stop_iso(ctx);
 204 }
 205 EXPORT_SYMBOL(fw_iso_context_stop);
 206 
 207 
 208 
 209 
 210 
 211 static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
 212                             int bandwidth, bool allocate)
 213 {
 214         int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
 215         __be32 data[2];
 216 
 217         
 218 
 219 
 220 
 221 
 222         for (try = 0; try < 5; try++) {
 223                 new = allocate ? old - bandwidth : old + bandwidth;
 224                 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
 225                         return -EBUSY;
 226 
 227                 data[0] = cpu_to_be32(old);
 228                 data[1] = cpu_to_be32(new);
 229                 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
 230                                 irm_id, generation, SCODE_100,
 231                                 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
 232                                 data, 8)) {
 233                 case RCODE_GENERATION:
 234                         
 235                         return allocate ? -EAGAIN : bandwidth;
 236 
 237                 case RCODE_COMPLETE:
 238                         if (be32_to_cpup(data) == old)
 239                                 return bandwidth;
 240 
 241                         old = be32_to_cpup(data);
 242                         
 243                 }
 244         }
 245 
 246         return -EIO;
 247 }
 248 
 249 static int manage_channel(struct fw_card *card, int irm_id, int generation,
 250                 u32 channels_mask, u64 offset, bool allocate)
 251 {
 252         __be32 bit, all, old;
 253         __be32 data[2];
 254         int channel, ret = -EIO, retry = 5;
 255 
 256         old = all = allocate ? cpu_to_be32(~0) : 0;
 257 
 258         for (channel = 0; channel < 32; channel++) {
 259                 if (!(channels_mask & 1 << channel))
 260                         continue;
 261 
 262                 ret = -EBUSY;
 263 
 264                 bit = cpu_to_be32(1 << (31 - channel));
 265                 if ((old & bit) != (all & bit))
 266                         continue;
 267 
 268                 data[0] = old;
 269                 data[1] = old ^ bit;
 270                 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
 271                                            irm_id, generation, SCODE_100,
 272                                            offset, data, 8)) {
 273                 case RCODE_GENERATION:
 274                         
 275                         return allocate ? -EAGAIN : channel;
 276 
 277                 case RCODE_COMPLETE:
 278                         if (data[0] == old)
 279                                 return channel;
 280 
 281                         old = data[0];
 282 
 283                         
 284                         if ((data[0] & bit) == (data[1] & bit))
 285                                 continue;
 286 
 287                         
 288                 default:
 289                         if (retry) {
 290                                 retry--;
 291                                 channel--;
 292                         } else {
 293                                 ret = -EIO;
 294                         }
 295                 }
 296         }
 297 
 298         return ret;
 299 }
 300 
 301 static void deallocate_channel(struct fw_card *card, int irm_id,
 302                                int generation, int channel)
 303 {
 304         u32 mask;
 305         u64 offset;
 306 
 307         mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
 308         offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
 309                                 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
 310 
 311         manage_channel(card, irm_id, generation, mask, offset, false);
 312 }
 313 
 314 
 315 
 316 
 317 
 318 
 319 
 320 
 321 
 322 
 323 
 324 
 325 
 326 
 327 
 328 
 329 
 330 
 331 
 332 
 333 
 334 
 335 
 336 
 337 
 338 
 339 
 340 
 341 
 342 
 343 
 344 
 345 void fw_iso_resource_manage(struct fw_card *card, int generation,
 346                             u64 channels_mask, int *channel, int *bandwidth,
 347                             bool allocate)
 348 {
 349         u32 channels_hi = channels_mask;        
 350         u32 channels_lo = channels_mask >> 32;  
 351         int irm_id, ret, c = -EINVAL;
 352 
 353         spin_lock_irq(&card->lock);
 354         irm_id = card->irm_node->node_id;
 355         spin_unlock_irq(&card->lock);
 356 
 357         if (channels_hi)
 358                 c = manage_channel(card, irm_id, generation, channels_hi,
 359                                 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
 360                                 allocate);
 361         if (channels_lo && c < 0) {
 362                 c = manage_channel(card, irm_id, generation, channels_lo,
 363                                 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
 364                                 allocate);
 365                 if (c >= 0)
 366                         c += 32;
 367         }
 368         *channel = c;
 369 
 370         if (allocate && channels_mask != 0 && c < 0)
 371                 *bandwidth = 0;
 372 
 373         if (*bandwidth == 0)
 374                 return;
 375 
 376         ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
 377         if (ret < 0)
 378                 *bandwidth = 0;
 379 
 380         if (allocate && ret < 0) {
 381                 if (c >= 0)
 382                         deallocate_channel(card, irm_id, generation, c);
 383                 *channel = ret;
 384         }
 385 }
 386 EXPORT_SYMBOL(fw_iso_resource_manage);