running_ios 105 drivers/md/raid5-cache.c struct list_head running_ios; /* io_units which are still running, running_ios 535 drivers/md/raid5-cache.c list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { running_ios 551 drivers/md/raid5-cache.c list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { running_ios 591 drivers/md/raid5-cache.c if (!list_empty(&log->running_ios)) { running_ios 596 drivers/md/raid5-cache.c io_deferred = list_first_entry(&log->running_ios, running_ios 669 drivers/md/raid5-cache.c if (!list_empty(&log->running_ios)) { running_ios 670 drivers/md/raid5-cache.c io = list_first_entry(&log->running_ios, struct r5l_io_unit, running_ios 727 drivers/md/raid5-cache.c if (io != list_first_entry(&log->running_ios, running_ios 803 drivers/md/raid5-cache.c list_add_tail(&io->log_sibling, &log->running_ios); running_ios 1518 drivers/md/raid5-cache.c (list_empty(&log->running_ios) && running_ios 3105 drivers/md/raid5-cache.c INIT_LIST_HEAD(&log->running_ios);