clone 50 arch/um/drivers/ubd_user.c pid = clone(io_thread, (void *) sp, CLONE_FILES | CLONE_VM, NULL); clone 74 arch/um/os-Linux/helper.c pid = clone(helper_child, (void *) sp, CLONE_VM, &data); clone 124 arch/um/os-Linux/helper.c pid = clone(proc, (void *) sp, flags, arg); clone 287 arch/um/os-Linux/skas/process.c pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack); clone 333 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone) clone 343 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c r = amdgpu_sync_fence(NULL, clone, f, e->explicit); clone 353 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c dma_fence_put(clone->last_vm_update); clone 354 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c clone->last_vm_update = dma_fence_get(source->last_vm_update); clone 53 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone); clone 1855 drivers/gpu/drm/i915/gem/i915_gem_context.c struct i915_gem_engines *clone; clone 1859 drivers/gpu/drm/i915/gem/i915_gem_context.c clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); clone 1860 drivers/gpu/drm/i915/gem/i915_gem_context.c if (!clone) clone 1863 drivers/gpu/drm/i915/gem/i915_gem_context.c init_rcu_head(&clone->rcu); clone 1868 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->engines[n] = NULL; clone 1883 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->engines[n] = clone 1886 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->engines[n] = intel_context_create(dst, engine); clone 1887 drivers/gpu/drm/i915/gem/i915_gem_context.c if (IS_ERR_OR_NULL(clone->engines[n])) { clone 1888 drivers/gpu/drm/i915/gem/i915_gem_context.c __free_engines(clone, n); clone 1892 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->num_engines = n; clone 1898 drivers/gpu/drm/i915/gem/i915_gem_context.c RCU_INIT_POINTER(dst->engines, clone); clone 1928 drivers/gpu/drm/i915/gem/i915_gem_context.c struct i915_gem_engines *clone; clone 1932 drivers/gpu/drm/i915/gem/i915_gem_context.c clone = dst->engines; /* no locking required; sole access */ clone 1933 drivers/gpu/drm/i915/gem/i915_gem_context.c if (e->num_engines != clone->num_engines) { clone 1941 drivers/gpu/drm/i915/gem/i915_gem_context.c if (clone->engines[n]->engine->class != ce->engine->class) { clone 1952 drivers/gpu/drm/i915/gem/i915_gem_context.c clone->engines[n]->sseu = ce->sseu; clone 1371 drivers/gpu/drm/i915/i915_gem_gtt.c struct i915_address_space *clone = vm->i915->kernel_context->vm; clone 1373 drivers/gpu/drm/i915/i915_gem_gtt.c GEM_BUG_ON(!clone->has_read_only); clone 1375 drivers/gpu/drm/i915/i915_gem_gtt.c vm->scratch_order = clone->scratch_order; clone 1376 drivers/gpu/drm/i915/i915_gem_gtt.c memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch)); clone 35 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c bool clone; clone 51 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c if (dmaobj->clone) { clone 99 drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c dmaobj->clone = true; clone 159 drivers/md/dm-clone-target.c static enum clone_metadata_mode get_clone_mode(struct clone *clone) clone 161 drivers/md/dm-clone-target.c return READ_ONCE(clone->mode); clone 164 drivers/md/dm-clone-target.c static const char *clone_device_name(struct clone *clone) clone 166 drivers/md/dm-clone-target.c return dm_table_device_name(clone->ti->table); clone 169 drivers/md/dm-clone-target.c static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode) clone 177 drivers/md/dm-clone-target.c enum clone_metadata_mode old_mode = get_clone_mode(clone); clone 186 drivers/md/dm-clone-target.c dm_clone_metadata_set_read_only(clone->cmd); clone 190 drivers/md/dm-clone-target.c dm_clone_metadata_set_read_write(clone->cmd); clone 194 drivers/md/dm-clone-target.c WRITE_ONCE(clone->mode, new_mode); clone 197 drivers/md/dm-clone-target.c dm_table_event(clone->ti->table); clone 198 drivers/md/dm-clone-target.c DMINFO("%s: Switching to %s mode", clone_device_name(clone), clone 203 drivers/md/dm-clone-target.c static void __abort_transaction(struct clone *clone) clone 205 drivers/md/dm-clone-target.c const char *dev_name = clone_device_name(clone); clone 207 drivers/md/dm-clone-target.c if (get_clone_mode(clone) >= CM_READ_ONLY) clone 211 drivers/md/dm-clone-target.c if (dm_clone_metadata_abort(clone->cmd)) { clone 213 drivers/md/dm-clone-target.c __set_clone_mode(clone, CM_FAIL); clone 217 drivers/md/dm-clone-target.c static void __reload_in_core_bitset(struct clone *clone) clone 219 drivers/md/dm-clone-target.c const char *dev_name = clone_device_name(clone); clone 221 drivers/md/dm-clone-target.c if (get_clone_mode(clone) == CM_FAIL) clone 226 drivers/md/dm-clone-target.c if (dm_clone_reload_in_core_bitset(clone->cmd)) { clone 228 drivers/md/dm-clone-target.c __set_clone_mode(clone, CM_FAIL); clone 232 drivers/md/dm-clone-target.c static void __metadata_operation_failed(struct clone *clone, const char *op, int r) clone 235 drivers/md/dm-clone-target.c clone_device_name(clone), op, r); clone 237 drivers/md/dm-clone-target.c __abort_transaction(clone); clone 238 drivers/md/dm-clone-target.c __set_clone_mode(clone, CM_READ_ONLY); clone 245 drivers/md/dm-clone-target.c __reload_in_core_bitset(clone); clone 251 drivers/md/dm-clone-target.c static inline void wakeup_hydration_waiters(struct clone *clone) clone 253 drivers/md/dm-clone-target.c wake_up_all(&clone->hydration_stopped); clone 256 drivers/md/dm-clone-target.c static inline void wake_worker(struct clone *clone) clone 258 drivers/md/dm-clone-target.c queue_work(clone->wq, &clone->worker); clone 266 drivers/md/dm-clone-target.c static inline void remap_to_source(struct clone *clone, struct bio *bio) clone 268 drivers/md/dm-clone-target.c bio_set_dev(bio, clone->source_dev->bdev); clone 271 drivers/md/dm-clone-target.c static inline void remap_to_dest(struct clone *clone, struct bio *bio) clone 273 drivers/md/dm-clone-target.c bio_set_dev(bio, clone->dest_dev->bdev); clone 276 drivers/md/dm-clone-target.c static bool bio_triggers_commit(struct clone *clone, struct bio *bio) clone 279 drivers/md/dm-clone-target.c dm_clone_changed_this_transaction(clone->cmd); clone 283 drivers/md/dm-clone-target.c static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr) clone 285 drivers/md/dm-clone-target.c return ((sector_t)region_nr << clone->region_shift); clone 289 drivers/md/dm-clone-target.c static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio) clone 291 drivers/md/dm-clone-target.c return (bio->bi_iter.bi_sector >> clone->region_shift); clone 295 drivers/md/dm-clone-target.c static void bio_region_range(struct clone *clone, struct bio *bio, clone 300 drivers/md/dm-clone-target.c *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size); clone 301 drivers/md/dm-clone-target.c end = bio_end_sector(bio) >> clone->region_shift; clone 310 drivers/md/dm-clone-target.c static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio) clone 312 drivers/md/dm-clone-target.c return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size); clone 346 drivers/md/dm-clone-target.c static void issue_bio(struct clone *clone, struct bio *bio) clone 348 drivers/md/dm-clone-target.c if (!bio_triggers_commit(clone, bio)) { clone 357 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) { clone 366 drivers/md/dm-clone-target.c spin_lock_irq(&clone->lock); clone 367 drivers/md/dm-clone-target.c bio_list_add(&clone->deferred_flush_bios, bio); clone 368 drivers/md/dm-clone-target.c spin_unlock_irq(&clone->lock); clone 370 drivers/md/dm-clone-target.c wake_worker(clone); clone 379 drivers/md/dm-clone-target.c static void remap_and_issue(struct clone *clone, struct bio *bio) clone 381 drivers/md/dm-clone-target.c remap_to_dest(clone, bio); clone 382 drivers/md/dm-clone-target.c issue_bio(clone, bio); clone 392 drivers/md/dm-clone-target.c static void issue_deferred_bios(struct clone *clone, struct bio_list *bios) clone 403 drivers/md/dm-clone-target.c if (bio_triggers_commit(clone, bio)) clone 409 drivers/md/dm-clone-target.c spin_lock_irqsave(&clone->lock, flags); clone 410 drivers/md/dm-clone-target.c bio_list_merge(&clone->deferred_bios, &normal_bios); clone 411 drivers/md/dm-clone-target.c bio_list_merge(&clone->deferred_flush_bios, &flush_bios); clone 412 drivers/md/dm-clone-target.c spin_unlock_irqrestore(&clone->lock, flags); clone 414 drivers/md/dm-clone-target.c wake_worker(clone); clone 417 drivers/md/dm-clone-target.c static void complete_overwrite_bio(struct clone *clone, struct bio *bio) clone 440 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) { clone 449 drivers/md/dm-clone-target.c spin_lock_irqsave(&clone->lock, flags); clone 450 drivers/md/dm-clone-target.c bio_list_add(&clone->deferred_flush_completions, bio); clone 451 drivers/md/dm-clone-target.c spin_unlock_irqrestore(&clone->lock, flags); clone 453 drivers/md/dm-clone-target.c wake_worker(clone); clone 462 drivers/md/dm-clone-target.c static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success) clone 471 drivers/md/dm-clone-target.c if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) { clone 472 drivers/md/dm-clone-target.c remap_to_dest(clone, bio); clone 473 drivers/md/dm-clone-target.c bio_region_range(clone, bio, &rs, &nr_regions); clone 474 drivers/md/dm-clone-target.c trim_bio(bio, region_to_sector(clone, rs), clone 475 drivers/md/dm-clone-target.c nr_regions << clone->region_shift); clone 481 drivers/md/dm-clone-target.c static void process_discard_bio(struct clone *clone, struct bio *bio) clone 485 drivers/md/dm-clone-target.c bio_region_range(clone, bio, &rs, &nr_regions); clone 491 drivers/md/dm-clone-target.c if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs || clone 492 drivers/md/dm-clone-target.c (rs + nr_regions) > clone->nr_regions)) { clone 494 drivers/md/dm-clone-target.c clone_device_name(clone), rs, nr_regions, clone 495 drivers/md/dm-clone-target.c clone->nr_regions, clone 506 drivers/md/dm-clone-target.c if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) { clone 507 drivers/md/dm-clone-target.c complete_discard_bio(clone, bio, true); clone 516 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) { clone 524 drivers/md/dm-clone-target.c spin_lock_irq(&clone->lock); clone 525 drivers/md/dm-clone-target.c bio_list_add(&clone->deferred_discard_bios, bio); clone 526 drivers/md/dm-clone-target.c spin_unlock_irq(&clone->lock); clone 528 drivers/md/dm-clone-target.c wake_worker(clone); clone 537 drivers/md/dm-clone-target.c struct clone *clone; clone 577 drivers/md/dm-clone-target.c static int hash_table_init(struct clone *clone) clone 584 drivers/md/dm-clone-target.c clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL); clone 585 drivers/md/dm-clone-target.c if (!clone->ht) clone 589 drivers/md/dm-clone-target.c bucket = clone->ht + i; clone 598 drivers/md/dm-clone-target.c static void hash_table_exit(struct clone *clone) clone 600 drivers/md/dm-clone-target.c kvfree(clone->ht); clone 603 drivers/md/dm-clone-target.c static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone, clone 606 drivers/md/dm-clone-target.c return &clone->ht[hash_long(region_nr, HASH_TABLE_BITS)]; clone 663 drivers/md/dm-clone-target.c static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone) clone 671 drivers/md/dm-clone-target.c hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO); clone 672 drivers/md/dm-clone-target.c hd->clone = clone; clone 679 drivers/md/dm-clone-target.c mempool_free(hd, &hd->clone->hydration_pool); clone 705 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; clone 707 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) clone 712 drivers/md/dm-clone-target.c r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr); clone 714 drivers/md/dm-clone-target.c bucket = get_hash_table_bucket(clone, hd->region_nr); clone 738 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; clone 744 drivers/md/dm-clone-target.c complete_overwrite_bio(clone, hd->overwrite_bio); clone 746 drivers/md/dm-clone-target.c issue_deferred_bios(clone, &hd->deferred_bios); clone 758 drivers/md/dm-clone-target.c if (atomic_dec_and_test(&clone->hydrations_in_flight)) clone 759 drivers/md/dm-clone-target.c wakeup_hydration_waiters(clone); clone 767 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; clone 772 drivers/md/dm-clone-target.c DMERR_LIMIT("%s: hydration failed", clone_device_name(clone)); clone 789 drivers/md/dm-clone-target.c if (test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) && clone 790 drivers/md/dm-clone-target.c !atomic_read(&clone->ios_in_flight)) clone 791 drivers/md/dm-clone-target.c wake_worker(clone); clone 799 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; clone 804 drivers/md/dm-clone-target.c region_size = clone->region_size; clone 808 drivers/md/dm-clone-target.c total_size = region_to_sector(clone, nr_regions - 1); clone 810 drivers/md/dm-clone-target.c if (region_end == clone->nr_regions - 1) { clone 815 drivers/md/dm-clone-target.c tail_size = clone->ti->len & (region_size - 1); clone 824 drivers/md/dm-clone-target.c from.bdev = clone->source_dev->bdev; clone 825 drivers/md/dm-clone-target.c from.sector = region_to_sector(clone, region_start); clone 828 drivers/md/dm-clone-target.c to.bdev = clone->dest_dev->bdev; clone 833 drivers/md/dm-clone-target.c atomic_add(nr_regions, &clone->hydrations_in_flight); clone 834 drivers/md/dm-clone-target.c dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0, clone 861 drivers/md/dm-clone-target.c atomic_inc(&hd->clone->hydrations_in_flight); clone 875 drivers/md/dm-clone-target.c static void hydrate_bio_region(struct clone *clone, struct bio *bio) clone 882 drivers/md/dm-clone-target.c region_nr = bio_to_region(clone, bio); clone 883 drivers/md/dm-clone-target.c bucket = get_hash_table_bucket(clone, region_nr); clone 895 drivers/md/dm-clone-target.c if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) { clone 898 drivers/md/dm-clone-target.c issue_bio(clone, bio); clone 908 drivers/md/dm-clone-target.c hd = alloc_hydration(clone); clone 914 drivers/md/dm-clone-target.c if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) { clone 917 drivers/md/dm-clone-target.c issue_bio(clone, bio); clone 935 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) { clone 950 drivers/md/dm-clone-target.c if (is_overwrite_bio(clone, bio)) { clone 983 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; clone 984 drivers/md/dm-clone-target.c unsigned int max_batch_size = READ_ONCE(clone->hydration_batch_size); clone 1018 drivers/md/dm-clone-target.c static unsigned long __start_next_hydration(struct clone *clone, clone 1025 drivers/md/dm-clone-target.c unsigned long nr_regions = clone->nr_regions; clone 1027 drivers/md/dm-clone-target.c hd = alloc_hydration(clone); clone 1031 drivers/md/dm-clone-target.c offset = dm_clone_find_next_unhydrated_region(clone->cmd, offset); clone 1035 drivers/md/dm-clone-target.c bucket = get_hash_table_bucket(clone, offset); clone 1038 drivers/md/dm-clone-target.c if (!dm_clone_is_region_hydrated(clone->cmd, offset) && clone 1064 drivers/md/dm-clone-target.c static void do_hydration(struct clone *clone) clone 1067 drivers/md/dm-clone-target.c unsigned long offset, nr_regions = clone->nr_regions; clone 1074 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) clone 1077 drivers/md/dm-clone-target.c if (dm_clone_is_hydration_done(clone->cmd)) clone 1083 drivers/md/dm-clone-target.c atomic_inc(&clone->hydrations_in_flight); clone 1095 drivers/md/dm-clone-target.c offset = clone->hydration_offset; clone 1096 drivers/md/dm-clone-target.c while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags)) && clone 1097 drivers/md/dm-clone-target.c !atomic_read(&clone->ios_in_flight) && clone 1098 drivers/md/dm-clone-target.c test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) && clone 1100 drivers/md/dm-clone-target.c current_volume = atomic_read(&clone->hydrations_in_flight); clone 1103 drivers/md/dm-clone-target.c if (current_volume > READ_ONCE(clone->hydration_threshold)) clone 1106 drivers/md/dm-clone-target.c offset = __start_next_hydration(clone, offset, &batch); clone 1115 drivers/md/dm-clone-target.c clone->hydration_offset = offset; clone 1117 drivers/md/dm-clone-target.c if (atomic_dec_and_test(&clone->hydrations_in_flight)) clone 1118 drivers/md/dm-clone-target.c wakeup_hydration_waiters(clone); clone 1123 drivers/md/dm-clone-target.c static bool need_commit_due_to_time(struct clone *clone) clone 1125 drivers/md/dm-clone-target.c return !time_in_range(jiffies, clone->last_commit_jiffies, clone 1126 drivers/md/dm-clone-target.c clone->last_commit_jiffies + COMMIT_PERIOD); clone 1132 drivers/md/dm-clone-target.c static int commit_metadata(struct clone *clone, bool *dest_dev_flushed) clone 1139 drivers/md/dm-clone-target.c mutex_lock(&clone->commit_lock); clone 1141 drivers/md/dm-clone-target.c if (!dm_clone_changed_this_transaction(clone->cmd)) clone 1144 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) { clone 1149 drivers/md/dm-clone-target.c r = dm_clone_metadata_pre_commit(clone->cmd); clone 1151 drivers/md/dm-clone-target.c __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r); clone 1155 drivers/md/dm-clone-target.c bio_reset(&clone->flush_bio); clone 1156 drivers/md/dm-clone-target.c bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev); clone 1157 drivers/md/dm-clone-target.c clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; clone 1159 drivers/md/dm-clone-target.c r = submit_bio_wait(&clone->flush_bio); clone 1161 drivers/md/dm-clone-target.c __metadata_operation_failed(clone, "flush destination device", r); clone 1168 drivers/md/dm-clone-target.c r = dm_clone_metadata_commit(clone->cmd); clone 1170 drivers/md/dm-clone-target.c __metadata_operation_failed(clone, "dm_clone_metadata_commit", r); clone 1174 drivers/md/dm-clone-target.c if (dm_clone_is_hydration_done(clone->cmd)) clone 1175 drivers/md/dm-clone-target.c dm_table_event(clone->ti->table); clone 1177 drivers/md/dm-clone-target.c mutex_unlock(&clone->commit_lock); clone 1182 drivers/md/dm-clone-target.c static void process_deferred_discards(struct clone *clone) clone 1190 drivers/md/dm-clone-target.c spin_lock_irq(&clone->lock); clone 1191 drivers/md/dm-clone-target.c bio_list_merge(&discards, &clone->deferred_discard_bios); clone 1192 drivers/md/dm-clone-target.c bio_list_init(&clone->deferred_discard_bios); clone 1193 drivers/md/dm-clone-target.c spin_unlock_irq(&clone->lock); clone 1198 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) clone 1203 drivers/md/dm-clone-target.c bio_region_range(clone, bio, &rs, &nr_regions); clone 1209 drivers/md/dm-clone-target.c r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions); clone 1216 drivers/md/dm-clone-target.c complete_discard_bio(clone, bio, r == 0); clone 1220 drivers/md/dm-clone-target.c static void process_deferred_bios(struct clone *clone) clone 1224 drivers/md/dm-clone-target.c spin_lock_irq(&clone->lock); clone 1225 drivers/md/dm-clone-target.c bio_list_merge(&bios, &clone->deferred_bios); clone 1226 drivers/md/dm-clone-target.c bio_list_init(&clone->deferred_bios); clone 1227 drivers/md/dm-clone-target.c spin_unlock_irq(&clone->lock); clone 1235 drivers/md/dm-clone-target.c static void process_deferred_flush_bios(struct clone *clone) clone 1246 drivers/md/dm-clone-target.c spin_lock_irq(&clone->lock); clone 1247 drivers/md/dm-clone-target.c bio_list_merge(&bios, &clone->deferred_flush_bios); clone 1248 drivers/md/dm-clone-target.c bio_list_init(&clone->deferred_flush_bios); clone 1250 drivers/md/dm-clone-target.c bio_list_merge(&bio_completions, &clone->deferred_flush_completions); clone 1251 drivers/md/dm-clone-target.c bio_list_init(&clone->deferred_flush_completions); clone 1252 drivers/md/dm-clone-target.c spin_unlock_irq(&clone->lock); clone 1255 drivers/md/dm-clone-target.c !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone))) clone 1258 drivers/md/dm-clone-target.c if (commit_metadata(clone, &dest_dev_flushed)) { clone 1267 drivers/md/dm-clone-target.c clone->last_commit_jiffies = jiffies; clone 1287 drivers/md/dm-clone-target.c struct clone *clone = container_of(work, typeof(*clone), worker); clone 1289 drivers/md/dm-clone-target.c process_deferred_bios(clone); clone 1290 drivers/md/dm-clone-target.c process_deferred_discards(clone); clone 1301 drivers/md/dm-clone-target.c process_deferred_flush_bios(clone); clone 1304 drivers/md/dm-clone-target.c do_hydration(clone); clone 1314 drivers/md/dm-clone-target.c struct clone *clone = container_of(to_delayed_work(work), struct clone, waker); clone 1316 drivers/md/dm-clone-target.c wake_worker(clone); clone 1317 drivers/md/dm-clone-target.c queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD); clone 1327 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 1330 drivers/md/dm-clone-target.c atomic_inc(&clone->ios_in_flight); clone 1332 drivers/md/dm-clone-target.c if (unlikely(get_clone_mode(clone) == CM_FAIL)) clone 1343 drivers/md/dm-clone-target.c remap_and_issue(clone, bio); clone 1355 drivers/md/dm-clone-target.c process_discard_bio(clone, bio); clone 1369 drivers/md/dm-clone-target.c region_nr = bio_to_region(clone, bio); clone 1370 drivers/md/dm-clone-target.c if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) { clone 1371 drivers/md/dm-clone-target.c remap_and_issue(clone, bio); clone 1374 drivers/md/dm-clone-target.c remap_to_source(clone, bio); clone 1378 drivers/md/dm-clone-target.c remap_to_dest(clone, bio); clone 1379 drivers/md/dm-clone-target.c hydrate_bio_region(clone, bio); clone 1386 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 1388 drivers/md/dm-clone-target.c atomic_dec(&clone->ios_in_flight); clone 1393 drivers/md/dm-clone-target.c static void emit_flags(struct clone *clone, char *result, unsigned int maxlen, clone 1399 drivers/md/dm-clone-target.c count = !test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags); clone 1400 drivers/md/dm-clone-target.c count += !test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags); clone 1404 drivers/md/dm-clone-target.c if (!test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags)) clone 1407 drivers/md/dm-clone-target.c if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) clone 1413 drivers/md/dm-clone-target.c static void emit_core_args(struct clone *clone, char *result, clone 1420 drivers/md/dm-clone-target.c READ_ONCE(clone->hydration_threshold), clone 1421 drivers/md/dm-clone-target.c READ_ONCE(clone->hydration_batch_size)); clone 1443 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 1447 drivers/md/dm-clone-target.c if (get_clone_mode(clone) == CM_FAIL) { clone 1454 drivers/md/dm-clone-target.c (void) commit_metadata(clone, NULL); clone 1456 drivers/md/dm-clone-target.c r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks); clone 1460 drivers/md/dm-clone-target.c clone_device_name(clone), r); clone 1464 drivers/md/dm-clone-target.c r = dm_clone_get_metadata_dev_size(clone->cmd, &nr_metadata_blocks); clone 1468 drivers/md/dm-clone-target.c clone_device_name(clone), r); clone 1476 drivers/md/dm-clone-target.c (unsigned long long)clone->region_size, clone 1477 drivers/md/dm-clone-target.c dm_clone_nr_of_hydrated_regions(clone->cmd), clone 1478 drivers/md/dm-clone-target.c clone->nr_regions, clone 1479 drivers/md/dm-clone-target.c atomic_read(&clone->hydrations_in_flight)); clone 1481 drivers/md/dm-clone-target.c emit_flags(clone, result, maxlen, &sz); clone 1482 drivers/md/dm-clone-target.c emit_core_args(clone, result, maxlen, &sz); clone 1484 drivers/md/dm-clone-target.c switch (get_clone_mode(clone)) { clone 1498 drivers/md/dm-clone-target.c format_dev_t(buf, clone->metadata_dev->bdev->bd_dev); clone 1501 drivers/md/dm-clone-target.c format_dev_t(buf, clone->dest_dev->bdev->bd_dev); clone 1504 drivers/md/dm-clone-target.c format_dev_t(buf, clone->source_dev->bdev->bd_dev); clone 1507 drivers/md/dm-clone-target.c for (i = 0; i < clone->nr_ctr_args; i++) clone 1508 drivers/md/dm-clone-target.c DMEMIT(" %s", clone->ctr_args[i]); clone 1520 drivers/md/dm-clone-target.c struct clone *clone = container_of(cb, struct clone, callbacks); clone 1522 drivers/md/dm-clone-target.c source_q = bdev_get_queue(clone->source_dev->bdev); clone 1523 drivers/md/dm-clone-target.c dest_q = bdev_get_queue(clone->dest_dev->bdev); clone 1555 drivers/md/dm-clone-target.c static int parse_feature_args(struct dm_arg_set *as, struct clone *clone) clone 1560 drivers/md/dm-clone-target.c struct dm_target *ti = clone->ti; clone 1581 drivers/md/dm-clone-target.c __clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags); clone 1583 drivers/md/dm-clone-target.c __clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags); clone 1593 drivers/md/dm-clone-target.c static int parse_core_args(struct dm_arg_set *as, struct clone *clone) clone 1599 drivers/md/dm-clone-target.c struct dm_target *ti = clone->ti; clone 1608 drivers/md/dm-clone-target.c clone->hydration_batch_size = DEFAULT_HYDRATION_BATCH_SIZE; clone 1609 drivers/md/dm-clone-target.c clone->hydration_threshold = DEFAULT_HYDRATION_THRESHOLD; clone 1633 drivers/md/dm-clone-target.c clone->hydration_threshold = value; clone 1639 drivers/md/dm-clone-target.c clone->hydration_batch_size = value; clone 1649 drivers/md/dm-clone-target.c static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error) clone 1670 drivers/md/dm-clone-target.c if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) || clone 1671 drivers/md/dm-clone-target.c region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) { clone 1676 drivers/md/dm-clone-target.c clone->region_size = region_size; clone 1695 drivers/md/dm-clone-target.c static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error) clone 1701 drivers/md/dm-clone-target.c r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, clone 1702 drivers/md/dm-clone-target.c &clone->metadata_dev); clone 1708 drivers/md/dm-clone-target.c metadata_dev_size = get_dev_size(clone->metadata_dev); clone 1711 drivers/md/dm-clone-target.c bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS); clone 1716 drivers/md/dm-clone-target.c static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error) clone 1721 drivers/md/dm-clone-target.c r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE, clone 1722 drivers/md/dm-clone-target.c &clone->dest_dev); clone 1728 drivers/md/dm-clone-target.c dest_dev_size = get_dev_size(clone->dest_dev); clone 1729 drivers/md/dm-clone-target.c if (dest_dev_size < clone->ti->len) { clone 1730 drivers/md/dm-clone-target.c dm_put_device(clone->ti, clone->dest_dev); clone 1738 drivers/md/dm-clone-target.c static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error) clone 1743 drivers/md/dm-clone-target.c r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ, clone 1744 drivers/md/dm-clone-target.c &clone->source_dev); clone 1750 drivers/md/dm-clone-target.c source_dev_size = get_dev_size(clone->source_dev); clone 1751 drivers/md/dm-clone-target.c if (source_dev_size < clone->ti->len) { clone 1752 drivers/md/dm-clone-target.c dm_put_device(clone->ti, clone->source_dev); clone 1760 drivers/md/dm-clone-target.c static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error) clone 1780 drivers/md/dm-clone-target.c clone->nr_ctr_args = argc; clone 1781 drivers/md/dm-clone-target.c clone->ctr_args = copy; clone 1793 drivers/md/dm-clone-target.c struct clone *clone; clone 1804 drivers/md/dm-clone-target.c clone = kzalloc(sizeof(*clone), GFP_KERNEL); clone 1805 drivers/md/dm-clone-target.c if (!clone) { clone 1810 drivers/md/dm-clone-target.c clone->ti = ti; clone 1813 drivers/md/dm-clone-target.c __set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags); clone 1814 drivers/md/dm-clone-target.c __set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags); clone 1815 drivers/md/dm-clone-target.c __set_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags); clone 1817 drivers/md/dm-clone-target.c r = parse_metadata_dev(clone, &as, &ti->error); clone 1821 drivers/md/dm-clone-target.c r = parse_dest_dev(clone, &as, &ti->error); clone 1825 drivers/md/dm-clone-target.c r = parse_source_dev(clone, &as, &ti->error); clone 1829 drivers/md/dm-clone-target.c r = parse_region_size(clone, &as, &ti->error); clone 1833 drivers/md/dm-clone-target.c clone->region_shift = __ffs(clone->region_size); clone 1834 drivers/md/dm-clone-target.c nr_regions = dm_sector_div_up(ti->len, clone->region_size); clone 1843 drivers/md/dm-clone-target.c clone->nr_regions = nr_regions; clone 1845 drivers/md/dm-clone-target.c r = validate_nr_regions(clone->nr_regions, &ti->error); clone 1849 drivers/md/dm-clone-target.c r = dm_set_target_max_io_len(ti, clone->region_size); clone 1855 drivers/md/dm-clone-target.c r = parse_feature_args(&as, clone); clone 1859 drivers/md/dm-clone-target.c r = parse_core_args(&as, clone); clone 1864 drivers/md/dm-clone-target.c clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len, clone 1865 drivers/md/dm-clone-target.c clone->region_size); clone 1866 drivers/md/dm-clone-target.c if (IS_ERR(clone->cmd)) { clone 1868 drivers/md/dm-clone-target.c r = PTR_ERR(clone->cmd); clone 1872 drivers/md/dm-clone-target.c __set_clone_mode(clone, CM_WRITE); clone 1874 drivers/md/dm-clone-target.c if (get_clone_mode(clone) != CM_WRITE) { clone 1880 drivers/md/dm-clone-target.c clone->last_commit_jiffies = jiffies; clone 1883 drivers/md/dm-clone-target.c r = hash_table_init(clone); clone 1889 drivers/md/dm-clone-target.c atomic_set(&clone->ios_in_flight, 0); clone 1890 drivers/md/dm-clone-target.c init_waitqueue_head(&clone->hydration_stopped); clone 1891 drivers/md/dm-clone-target.c spin_lock_init(&clone->lock); clone 1892 drivers/md/dm-clone-target.c bio_list_init(&clone->deferred_bios); clone 1893 drivers/md/dm-clone-target.c bio_list_init(&clone->deferred_discard_bios); clone 1894 drivers/md/dm-clone-target.c bio_list_init(&clone->deferred_flush_bios); clone 1895 drivers/md/dm-clone-target.c bio_list_init(&clone->deferred_flush_completions); clone 1896 drivers/md/dm-clone-target.c clone->hydration_offset = 0; clone 1897 drivers/md/dm-clone-target.c atomic_set(&clone->hydrations_in_flight, 0); clone 1898 drivers/md/dm-clone-target.c bio_init(&clone->flush_bio, NULL, 0); clone 1900 drivers/md/dm-clone-target.c clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); clone 1901 drivers/md/dm-clone-target.c if (!clone->wq) { clone 1907 drivers/md/dm-clone-target.c INIT_WORK(&clone->worker, do_worker); clone 1908 drivers/md/dm-clone-target.c INIT_DELAYED_WORK(&clone->waker, do_waker); clone 1910 drivers/md/dm-clone-target.c clone->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); clone 1911 drivers/md/dm-clone-target.c if (IS_ERR(clone->kcopyd_client)) { clone 1912 drivers/md/dm-clone-target.c r = PTR_ERR(clone->kcopyd_client); clone 1916 drivers/md/dm-clone-target.c r = mempool_init_slab_pool(&clone->hydration_pool, MIN_HYDRATIONS, clone 1924 drivers/md/dm-clone-target.c r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error); clone 1928 drivers/md/dm-clone-target.c mutex_init(&clone->commit_lock); clone 1929 drivers/md/dm-clone-target.c clone->callbacks.congested_fn = clone_is_congested; clone 1930 drivers/md/dm-clone-target.c dm_table_add_target_callbacks(ti->table, &clone->callbacks); clone 1940 drivers/md/dm-clone-target.c ti->private = clone; clone 1945 drivers/md/dm-clone-target.c mempool_exit(&clone->hydration_pool); clone 1947 drivers/md/dm-clone-target.c dm_kcopyd_client_destroy(clone->kcopyd_client); clone 1949 drivers/md/dm-clone-target.c destroy_workqueue(clone->wq); clone 1951 drivers/md/dm-clone-target.c hash_table_exit(clone); clone 1953 drivers/md/dm-clone-target.c dm_clone_metadata_close(clone->cmd); clone 1955 drivers/md/dm-clone-target.c dm_put_device(ti, clone->source_dev); clone 1957 drivers/md/dm-clone-target.c dm_put_device(ti, clone->dest_dev); clone 1959 drivers/md/dm-clone-target.c dm_put_device(ti, clone->metadata_dev); clone 1961 drivers/md/dm-clone-target.c kfree(clone); clone 1969 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 1971 drivers/md/dm-clone-target.c mutex_destroy(&clone->commit_lock); clone 1972 drivers/md/dm-clone-target.c bio_uninit(&clone->flush_bio); clone 1974 drivers/md/dm-clone-target.c for (i = 0; i < clone->nr_ctr_args; i++) clone 1975 drivers/md/dm-clone-target.c kfree(clone->ctr_args[i]); clone 1976 drivers/md/dm-clone-target.c kfree(clone->ctr_args); clone 1978 drivers/md/dm-clone-target.c mempool_exit(&clone->hydration_pool); clone 1979 drivers/md/dm-clone-target.c dm_kcopyd_client_destroy(clone->kcopyd_client); clone 1980 drivers/md/dm-clone-target.c destroy_workqueue(clone->wq); clone 1981 drivers/md/dm-clone-target.c hash_table_exit(clone); clone 1982 drivers/md/dm-clone-target.c dm_clone_metadata_close(clone->cmd); clone 1983 drivers/md/dm-clone-target.c dm_put_device(ti, clone->source_dev); clone 1984 drivers/md/dm-clone-target.c dm_put_device(ti, clone->dest_dev); clone 1985 drivers/md/dm-clone-target.c dm_put_device(ti, clone->metadata_dev); clone 1987 drivers/md/dm-clone-target.c kfree(clone); clone 1994 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 2011 drivers/md/dm-clone-target.c cancel_delayed_work_sync(&clone->waker); clone 2013 drivers/md/dm-clone-target.c set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags); clone 2024 drivers/md/dm-clone-target.c wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight)); clone 2025 drivers/md/dm-clone-target.c flush_workqueue(clone->wq); clone 2027 drivers/md/dm-clone-target.c (void) commit_metadata(clone, NULL); clone 2032 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 2034 drivers/md/dm-clone-target.c clear_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags); clone 2035 drivers/md/dm-clone-target.c do_waker(&clone->waker.work); clone 2049 drivers/md/dm-clone-target.c static void disable_passdown_if_not_supported(struct clone *clone) clone 2051 drivers/md/dm-clone-target.c struct block_device *dest_dev = clone->dest_dev->bdev; clone 2056 drivers/md/dm-clone-target.c if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) clone 2061 drivers/md/dm-clone-target.c else if (dest_limits->max_discard_sectors < clone->region_size) clone 2067 drivers/md/dm-clone-target.c clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags); clone 2071 drivers/md/dm-clone-target.c static void set_discard_limits(struct clone *clone, struct queue_limits *limits) clone 2073 drivers/md/dm-clone-target.c struct block_device *dest_bdev = clone->dest_dev->bdev; clone 2076 drivers/md/dm-clone-target.c if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) { clone 2078 drivers/md/dm-clone-target.c limits->discard_granularity = clone->region_size << SECTOR_SHIFT; clone 2079 drivers/md/dm-clone-target.c limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size); clone 2098 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 2105 drivers/md/dm-clone-target.c if (io_opt_sectors < clone->region_size || clone 2106 drivers/md/dm-clone-target.c do_div(io_opt_sectors, clone->region_size)) { clone 2107 drivers/md/dm-clone-target.c blk_limits_io_min(limits, clone->region_size << SECTOR_SHIFT); clone 2108 drivers/md/dm-clone-target.c blk_limits_io_opt(limits, clone->region_size << SECTOR_SHIFT); clone 2111 drivers/md/dm-clone-target.c disable_passdown_if_not_supported(clone); clone 2112 drivers/md/dm-clone-target.c set_discard_limits(clone, limits); clone 2119 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 2120 drivers/md/dm-clone-target.c struct dm_dev *dest_dev = clone->dest_dev; clone 2121 drivers/md/dm-clone-target.c struct dm_dev *source_dev = clone->source_dev; clone 2132 drivers/md/dm-clone-target.c static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions) clone 2134 drivers/md/dm-clone-target.c WRITE_ONCE(clone->hydration_threshold, nr_regions); clone 2141 drivers/md/dm-clone-target.c wake_worker(clone); clone 2144 drivers/md/dm-clone-target.c static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions) clone 2146 drivers/md/dm-clone-target.c WRITE_ONCE(clone->hydration_batch_size, nr_regions); clone 2149 drivers/md/dm-clone-target.c static void enable_hydration(struct clone *clone) clone 2151 drivers/md/dm-clone-target.c if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags)) clone 2152 drivers/md/dm-clone-target.c wake_worker(clone); clone 2155 drivers/md/dm-clone-target.c static void disable_hydration(struct clone *clone) clone 2157 drivers/md/dm-clone-target.c clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags); clone 2163 drivers/md/dm-clone-target.c struct clone *clone = ti->private; clone 2170 drivers/md/dm-clone-target.c enable_hydration(clone); clone 2175 drivers/md/dm-clone-target.c disable_hydration(clone); clone 2186 drivers/md/dm-clone-target.c set_hydration_threshold(clone, value); clone 2195 drivers/md/dm-clone-target.c set_hydration_batch_size(clone, value); clone 2200 drivers/md/dm-clone-target.c DMERR("%s: Unsupported message `%s'", clone_device_name(clone), argv[0]); clone 1277 drivers/md/dm-crypt.c static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); clone 1299 drivers/md/dm-crypt.c struct bio *clone; clone 1309 drivers/md/dm-crypt.c clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, &cc->bs); clone 1310 drivers/md/dm-crypt.c if (!clone) clone 1313 drivers/md/dm-crypt.c clone_init(io, clone); clone 1320 drivers/md/dm-crypt.c crypt_free_buffer_pages(cc, clone); clone 1321 drivers/md/dm-crypt.c bio_put(clone); clone 1328 drivers/md/dm-crypt.c bio_add_page(clone, page, len, 0); clone 1334 drivers/md/dm-crypt.c if (dm_crypt_integrity_io_alloc(io, clone)) { clone 1335 drivers/md/dm-crypt.c crypt_free_buffer_pages(cc, clone); clone 1336 drivers/md/dm-crypt.c bio_put(clone); clone 1337 drivers/md/dm-crypt.c clone = NULL; clone 1343 drivers/md/dm-crypt.c return clone; clone 1346 drivers/md/dm-crypt.c static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) clone 1351 drivers/md/dm-crypt.c bio_for_each_segment_all(bv, clone, iter_all) { clone 1417 drivers/md/dm-crypt.c static void crypt_endio(struct bio *clone) clone 1419 drivers/md/dm-crypt.c struct dm_crypt_io *io = clone->bi_private; clone 1421 drivers/md/dm-crypt.c unsigned rw = bio_data_dir(clone); clone 1428 drivers/md/dm-crypt.c crypt_free_buffer_pages(cc, clone); clone 1430 drivers/md/dm-crypt.c error = clone->bi_status; clone 1431 drivers/md/dm-crypt.c bio_put(clone); clone 1444 drivers/md/dm-crypt.c static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone 1448 drivers/md/dm-crypt.c clone->bi_private = io; clone 1449 drivers/md/dm-crypt.c clone->bi_end_io = crypt_endio; clone 1450 drivers/md/dm-crypt.c bio_set_dev(clone, cc->dev->bdev); clone 1451 drivers/md/dm-crypt.c clone->bi_opf = io->base_bio->bi_opf; clone 1457 drivers/md/dm-crypt.c struct bio *clone; clone 1465 drivers/md/dm-crypt.c clone = bio_clone_fast(io->base_bio, gfp, &cc->bs); clone 1466 drivers/md/dm-crypt.c if (!clone) clone 1471 drivers/md/dm-crypt.c clone_init(io, clone); clone 1472 drivers/md/dm-crypt.c clone->bi_iter.bi_sector = cc->start + io->sector; clone 1474 drivers/md/dm-crypt.c if (dm_crypt_integrity_io_alloc(io, clone)) { clone 1476 drivers/md/dm-crypt.c bio_put(clone); clone 1480 drivers/md/dm-crypt.c generic_make_request(clone); clone 1504 drivers/md/dm-crypt.c struct bio *clone = io->ctx.bio_out; clone 1506 drivers/md/dm-crypt.c generic_make_request(clone); clone 1565 drivers/md/dm-crypt.c struct bio *clone = io->ctx.bio_out; clone 1572 drivers/md/dm-crypt.c crypt_free_buffer_pages(cc, clone); clone 1573 drivers/md/dm-crypt.c bio_put(clone); clone 1581 drivers/md/dm-crypt.c clone->bi_iter.bi_sector = cc->start + io->sector; clone 1584 drivers/md/dm-crypt.c generic_make_request(clone); clone 1609 drivers/md/dm-crypt.c struct bio *clone; clone 1620 drivers/md/dm-crypt.c clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); clone 1621 drivers/md/dm-crypt.c if (unlikely(!clone)) { clone 1626 drivers/md/dm-crypt.c io->ctx.bio_out = clone; clone 1627 drivers/md/dm-crypt.c io->ctx.iter_out = clone->bi_iter; clone 1629 drivers/md/dm-crypt.c sector += bio_sectors(clone); clone 1001 drivers/md/dm-era-target.c struct dm_block *clone; clone 1027 drivers/md/dm-era-target.c &sb_validator, &clone, &inc); clone 1038 drivers/md/dm-era-target.c dm_tm_unlock(md->tm, clone); clone 1046 drivers/md/dm-era-target.c dm_tm_unlock(md->tm, clone); clone 1050 drivers/md/dm-era-target.c md->metadata_snap = dm_block_location(clone); clone 1052 drivers/md/dm-era-target.c dm_tm_unlock(md->tm, clone); clone 1061 drivers/md/dm-era-target.c struct dm_block *clone; clone 1069 drivers/md/dm-era-target.c r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone); clone 1081 drivers/md/dm-era-target.c disk = dm_block_data(clone); clone 1086 drivers/md/dm-era-target.c dm_tm_unlock(md->tm, clone); clone 1093 drivers/md/dm-era-target.c dm_tm_unlock(md->tm, clone); clone 1097 drivers/md/dm-era-target.c location = dm_block_location(clone); clone 1098 drivers/md/dm-era-target.c dm_tm_unlock(md->tm, clone); clone 493 drivers/md/dm-mpath.c struct request *clone; clone 516 drivers/md/dm-mpath.c clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, clone 518 drivers/md/dm-mpath.c if (IS_ERR(clone)) { clone 535 drivers/md/dm-mpath.c clone->bio = clone->biotail = NULL; clone 536 drivers/md/dm-mpath.c clone->rq_disk = bdev->bd_disk; clone 537 drivers/md/dm-mpath.c clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; clone 538 drivers/md/dm-mpath.c *__clone = clone; clone 547 drivers/md/dm-mpath.c static void multipath_release_clone(struct request *clone, clone 564 drivers/md/dm-mpath.c blk_put_request(clone); clone 1521 drivers/md/dm-mpath.c static int multipath_end_io(struct dm_target *ti, struct request *clone, clone 1569 drivers/md/dm-mpath.c static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, clone 1573 drivers/md/dm-mpath.c struct dm_mpath_io *mpio = get_mpio_from_bio(clone); clone 1596 drivers/md/dm-mpath.c bio_list_add(&m->queued_bios, clone); clone 21 drivers/md/dm-rq.c struct request *orig, *clone; clone 82 drivers/md/dm-rq.c static void end_clone_bio(struct bio *clone) clone 85 drivers/md/dm-rq.c container_of(clone, struct dm_rq_clone_bio_info, clone); clone 88 drivers/md/dm-rq.c blk_status_t error = clone->bi_status; clone 89 drivers/md/dm-rq.c bool is_last = !clone->bi_next; clone 91 drivers/md/dm-rq.c bio_put(clone); clone 164 drivers/md/dm-rq.c static void dm_end_request(struct request *clone, blk_status_t error) clone 166 drivers/md/dm-rq.c struct dm_rq_target_io *tio = clone->end_io_data; clone 170 drivers/md/dm-rq.c blk_rq_unprep_clone(clone); clone 171 drivers/md/dm-rq.c tio->ti->type->release_clone_rq(clone, NULL); clone 202 drivers/md/dm-rq.c if (tio->clone) { clone 203 drivers/md/dm-rq.c blk_rq_unprep_clone(tio->clone); clone 204 drivers/md/dm-rq.c tio->ti->type->release_clone_rq(tio->clone, NULL); clone 211 drivers/md/dm-rq.c static void dm_done(struct request *clone, blk_status_t error, bool mapped) clone 214 drivers/md/dm-rq.c struct dm_rq_target_io *tio = clone->end_io_data; clone 221 drivers/md/dm-rq.c r = rq_end_io(tio->ti, clone, error, &tio->info); clone 225 drivers/md/dm-rq.c if (req_op(clone) == REQ_OP_DISCARD && clone 226 drivers/md/dm-rq.c !clone->q->limits.max_discard_sectors) clone 228 drivers/md/dm-rq.c else if (req_op(clone) == REQ_OP_WRITE_SAME && clone 229 drivers/md/dm-rq.c !clone->q->limits.max_write_same_sectors) clone 231 drivers/md/dm-rq.c else if (req_op(clone) == REQ_OP_WRITE_ZEROES && clone 232 drivers/md/dm-rq.c !clone->q->limits.max_write_zeroes_sectors) clone 239 drivers/md/dm-rq.c dm_end_request(clone, error); clone 265 drivers/md/dm-rq.c struct request *clone = tio->clone; clone 267 drivers/md/dm-rq.c if (!clone) { clone 279 drivers/md/dm-rq.c dm_done(clone, tio->error, mapped); clone 306 drivers/md/dm-rq.c static void end_clone_request(struct request *clone, blk_status_t error) clone 308 drivers/md/dm-rq.c struct dm_rq_target_io *tio = clone->end_io_data; clone 313 drivers/md/dm-rq.c static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) clone 317 drivers/md/dm-rq.c if (blk_queue_io_stat(clone->q)) clone 318 drivers/md/dm-rq.c clone->rq_flags |= RQF_IO_STAT; clone 320 drivers/md/dm-rq.c clone->start_time_ns = ktime_get_ns(); clone 321 drivers/md/dm-rq.c r = blk_insert_cloned_request(clone->q, clone); clone 333 drivers/md/dm-rq.c container_of(bio, struct dm_rq_clone_bio_info, clone); clone 342 drivers/md/dm-rq.c static int setup_clone(struct request *clone, struct request *rq, clone 347 drivers/md/dm-rq.c r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, clone 352 drivers/md/dm-rq.c clone->end_io = end_clone_request; clone 353 drivers/md/dm-rq.c clone->end_io_data = tio; clone 355 drivers/md/dm-rq.c tio->clone = clone; clone 365 drivers/md/dm-rq.c tio->clone = NULL; clone 390 drivers/md/dm-rq.c struct request *clone = NULL; clone 393 drivers/md/dm-rq.c r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); clone 399 drivers/md/dm-rq.c if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { clone 401 drivers/md/dm-rq.c ti->type->release_clone_rq(clone, &tio->info); clone 406 drivers/md/dm-rq.c trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), clone 408 drivers/md/dm-rq.c ret = dm_dispatch_clone_request(clone, rq); clone 410 drivers/md/dm-rq.c blk_rq_unprep_clone(clone); clone 411 drivers/md/dm-rq.c blk_mq_cleanup_rq(clone); clone 412 drivers/md/dm-rq.c tio->ti->type->release_clone_rq(clone, &tio->info); clone 413 drivers/md/dm-rq.c tio->clone = NULL; clone 30 drivers/md/dm-rq.h struct bio clone; clone 134 drivers/md/dm-target.c struct request **clone) clone 139 drivers/md/dm-target.c static void io_err_release_clone_rq(struct request *clone, clone 104 drivers/md/dm-zoned-target.c static void dmz_clone_endio(struct bio *clone) clone 106 drivers/md/dm-zoned-target.c struct dmz_bioctx *bioctx = clone->bi_private; clone 107 drivers/md/dm-zoned-target.c blk_status_t status = clone->bi_status; clone 109 drivers/md/dm-zoned-target.c bio_put(clone); clone 122 drivers/md/dm-zoned-target.c struct bio *clone; clone 124 drivers/md/dm-zoned-target.c clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); clone 125 drivers/md/dm-zoned-target.c if (!clone) clone 128 drivers/md/dm-zoned-target.c bio_set_dev(clone, dmz->dev->bdev); clone 129 drivers/md/dm-zoned-target.c clone->bi_iter.bi_sector = clone 131 drivers/md/dm-zoned-target.c clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; clone 132 drivers/md/dm-zoned-target.c clone->bi_end_io = dmz_clone_endio; clone 133 drivers/md/dm-zoned-target.c clone->bi_private = bioctx; clone 135 drivers/md/dm-zoned-target.c bio_advance(bio, clone->bi_iter.bi_size); clone 138 drivers/md/dm-zoned-target.c generic_make_request(clone); clone 84 drivers/md/dm.c struct bio clone; clone 107 drivers/md/dm.c struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); clone 109 drivers/md/dm.c return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; clone 110 drivers/md/dm.c return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; clone 118 drivers/md/dm.c return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); clone 120 drivers/md/dm.c return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); clone 126 drivers/md/dm.c return container_of(bio, struct dm_target_io, clone)->target_bio_nr; clone 569 drivers/md/dm.c struct bio *clone; clone 571 drivers/md/dm.c clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); clone 572 drivers/md/dm.c if (!clone) clone 575 drivers/md/dm.c tio = container_of(clone, struct dm_target_io, clone); clone 594 drivers/md/dm.c bio_put(&io->tio.clone); clone 606 drivers/md/dm.c struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); clone 607 drivers/md/dm.c if (!clone) clone 610 drivers/md/dm.c tio = container_of(clone, struct dm_target_io, clone); clone 626 drivers/md/dm.c bio_put(&tio->clone); clone 976 drivers/md/dm.c struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); clone 1205 drivers/md/dm.c struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); clone 1267 drivers/md/dm.c struct bio *clone = &tio->clone; clone 1273 drivers/md/dm.c clone->bi_end_io = clone_endio; clone 1281 drivers/md/dm.c sector = clone->bi_iter.bi_sector; clone 1283 drivers/md/dm.c r = ti->type->map(ti, clone); clone 1289 drivers/md/dm.c trace_block_bio_remap(clone->bi_disk->queue, clone, clone 1292 drivers/md/dm.c ret = direct_make_request(clone); clone 1294 drivers/md/dm.c ret = generic_make_request(clone); clone 1324 drivers/md/dm.c struct bio *clone = &tio->clone; clone 1326 drivers/md/dm.c __bio_clone_fast(clone, bio); clone 1339 drivers/md/dm.c r = bio_integrity_clone(clone, bio, GFP_NOIO); clone 1344 drivers/md/dm.c bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); clone 1345 drivers/md/dm.c clone->bi_iter.bi_size = to_bytes(len); clone 1348 drivers/md/dm.c bio_integrity_trim(clone); clone 1364 drivers/md/dm.c bio_list_add(blist, &tio->clone); clone 1379 drivers/md/dm.c bio_list_add(blist, &tio->clone); clone 1387 drivers/md/dm.c tio = container_of(bio, struct dm_target_io, clone); clone 1396 drivers/md/dm.c struct bio *clone = &tio->clone; clone 1400 drivers/md/dm.c __bio_clone_fast(clone, ci->bio); clone 1402 drivers/md/dm.c bio_setup_sector(clone, ci->sector, *len); clone 1417 drivers/md/dm.c tio = container_of(bio, struct dm_target_io, clone); clone 3004 drivers/md/dm.c front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); clone 3014 drivers/md/dm.c front_pad = offsetof(struct dm_rq_clone_bio_info, clone); clone 502 drivers/net/dsa/mv88e6xxx/hwtstamp.c struct sk_buff *clone, unsigned int type) clone 509 drivers/net/dsa/mv88e6xxx/hwtstamp.c if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP)) clone 512 drivers/net/dsa/mv88e6xxx/hwtstamp.c hdr = mv88e6xxx_should_tstamp(chip, port, clone, type); clone 522 drivers/net/dsa/mv88e6xxx/hwtstamp.c ps->tx_skb = clone; clone 119 drivers/net/dsa/mv88e6xxx/hwtstamp.h struct sk_buff *clone, unsigned int type); clone 121 drivers/net/dsa/mv88e6xxx/hwtstamp.h struct sk_buff *clone, unsigned int type); clone 148 drivers/net/dsa/mv88e6xxx/hwtstamp.h struct sk_buff *clone, clone 155 drivers/net/dsa/mv88e6xxx/hwtstamp.h struct sk_buff *clone, clone 1826 drivers/net/dsa/sja1105/sja1105_main.c struct sk_buff *clone; clone 1847 drivers/net/dsa/sja1105/sja1105_main.c clone = DSA_SKB_CB(skb)->clone; clone 1849 drivers/net/dsa/sja1105/sja1105_main.c sja1105_mgmt_xmit(ds, port, slot, skb, !!clone); clone 1851 drivers/net/dsa/sja1105/sja1105_main.c if (!clone) clone 1854 drivers/net/dsa/sja1105/sja1105_main.c skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; clone 1863 drivers/net/dsa/sja1105/sja1105_main.c kfree_skb(clone); clone 1871 drivers/net/dsa/sja1105/sja1105_main.c skb_complete_tx_timestamp(clone, &shwt); clone 103 drivers/net/usb/lg-vl600.c struct sk_buff *clone; clone 200 drivers/net/usb/lg-vl600.c clone = skb_clone(buf, GFP_ATOMIC); clone 201 drivers/net/usb/lg-vl600.c if (!clone) clone 204 drivers/net/usb/lg-vl600.c skb_trim(clone, packet_len); clone 205 drivers/net/usb/lg-vl600.c usbnet_skb_return(dev, clone); clone 5630 drivers/s390/net/qeth_core_main.c struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); clone 5632 drivers/s390/net/qeth_core_main.c if (!clone) clone 5635 drivers/s390/net/qeth_core_main.c clone->dev_port = orig->dev_port; clone 5636 drivers/s390/net/qeth_core_main.c return clone; clone 128 drivers/scsi/cxlflash/superpipe.h struct dk_cxlflash_clone *clone); clone 46 drivers/scsi/cxlflash/vlun.c static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone, clone 49 drivers/scsi/cxlflash/vlun.c release->hdr = clone->hdr; clone 50 drivers/scsi/cxlflash/vlun.c release->context_id = clone->context_id_dst; clone 1191 drivers/scsi/cxlflash/vlun.c struct dk_cxlflash_clone *clone) clone 1205 drivers/scsi/cxlflash/vlun.c u64 ctxid_src = DECODE_CTXID(clone->context_id_src), clone 1206 drivers/scsi/cxlflash/vlun.c ctxid_dst = DECODE_CTXID(clone->context_id_dst), clone 1207 drivers/scsi/cxlflash/vlun.c rctxid_src = clone->context_id_src, clone 1208 drivers/scsi/cxlflash/vlun.c rctxid_dst = clone->context_id_dst; clone 1306 drivers/scsi/cxlflash/vlun.c marshal_clone_to_rele(clone, &release); clone 106 fs/9p/fid.c int i, n, l, clone, access; clone 163 fs/9p/fid.c clone = 1; clone 171 fs/9p/fid.c fid = p9_client_walk(fid, l, &wnames[i], clone); clone 186 fs/9p/fid.c clone = 0; clone 266 fs/f2fs/acl.c struct posix_acl *clone = NULL; clone 271 fs/f2fs/acl.c clone = kmemdup(acl, size, flags); clone 272 fs/f2fs/acl.c if (clone) clone 273 fs/f2fs/acl.c refcount_set(&clone->a_refcount, 1); clone 275 fs/f2fs/acl.c return clone; clone 337 fs/f2fs/acl.c struct posix_acl *clone; clone 354 fs/f2fs/acl.c clone = f2fs_acl_clone(p, GFP_NOFS); clone 355 fs/f2fs/acl.c if (!clone) { clone 360 fs/f2fs/acl.c ret = f2fs_acl_create_masq(clone, mode); clone 365 fs/f2fs/acl.c posix_acl_release(clone); clone 367 fs/f2fs/acl.c *acl = clone; clone 377 fs/f2fs/acl.c posix_acl_release(clone); clone 290 fs/gfs2/bmap.c static void clone_metapath(struct metapath *clone, struct metapath *mp) clone 294 fs/gfs2/bmap.c *clone = *mp; clone 296 fs/gfs2/bmap.c get_bh(clone->mp_bh[hgt]); clone 588 fs/gfs2/bmap.c struct metapath clone; clone 592 fs/gfs2/bmap.c clone_metapath(&clone, mp); clone 593 fs/gfs2/bmap.c ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker); clone 598 fs/gfs2/bmap.c hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock; clone 605 fs/gfs2/bmap.c release_metapath(&clone); clone 1332 fs/gfs2/rgrp.c const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data; clone 1333 fs/gfs2/rgrp.c clone += bi->bi_offset; clone 1334 fs/gfs2/rgrp.c clone += x; clone 1337 fs/gfs2/rgrp.c diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); clone 1339 fs/gfs2/rgrp.c diff = ~(*clone | (*clone >> 1)); clone 1070 fs/nfsd/nfs4proc.c struct nfsd4_clone *clone = &u->clone; clone 1074 fs/nfsd/nfs4proc.c status = nfsd4_verify_copy(rqstp, cstate, &clone->cl_src_stateid, &src, clone 1075 fs/nfsd/nfs4proc.c &clone->cl_dst_stateid, &dst); clone 1079 fs/nfsd/nfs4proc.c status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos, clone 1080 fs/nfsd/nfs4proc.c dst->nf_file, clone->cl_dst_pos, clone->cl_count, clone 1729 fs/nfsd/nfs4xdr.c nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone) clone 1733 fs/nfsd/nfs4xdr.c status = nfsd4_decode_stateid(argp, &clone->cl_src_stateid); clone 1736 fs/nfsd/nfs4xdr.c status = nfsd4_decode_stateid(argp, &clone->cl_dst_stateid); clone 1741 fs/nfsd/nfs4xdr.c p = xdr_decode_hyper(p, &clone->cl_src_pos); clone 1742 fs/nfsd/nfs4xdr.c p = xdr_decode_hyper(p, &clone->cl_dst_pos); clone 1743 fs/nfsd/nfs4xdr.c p = xdr_decode_hyper(p, &clone->cl_count); clone 627 fs/nfsd/xdr4.h struct nfsd4_clone clone; clone 194 fs/posix_acl.c struct posix_acl *clone = NULL; clone 199 fs/posix_acl.c clone = kmemdup(acl, size, flags); clone 200 fs/posix_acl.c if (clone) clone 201 fs/posix_acl.c refcount_set(&clone->a_refcount, 1); clone 203 fs/posix_acl.c return clone; clone 521 fs/posix_acl.c struct posix_acl *clone = posix_acl_clone(*acl, gfp); clone 523 fs/posix_acl.c if (clone) { clone 524 fs/posix_acl.c err = posix_acl_create_masq(clone, mode_p); clone 526 fs/posix_acl.c posix_acl_release(clone); clone 527 fs/posix_acl.c clone = NULL; clone 531 fs/posix_acl.c *acl = clone; clone 539 fs/posix_acl.c struct posix_acl *clone = posix_acl_clone(*acl, gfp); clone 541 fs/posix_acl.c if (clone) { clone 542 fs/posix_acl.c err = __posix_acl_chmod_masq(clone, mode); clone 544 fs/posix_acl.c posix_acl_release(clone); clone 545 fs/posix_acl.c clone = NULL; clone 549 fs/posix_acl.c *acl = clone; clone 586 fs/posix_acl.c struct posix_acl *clone; clone 604 fs/posix_acl.c clone = posix_acl_clone(p, GFP_NOFS); clone 605 fs/posix_acl.c if (!clone) clone 608 fs/posix_acl.c ret = posix_acl_create_masq(clone, mode); clone 613 fs/posix_acl.c posix_acl_release(clone); clone 615 fs/posix_acl.c *acl = clone; clone 625 fs/posix_acl.c posix_acl_release(clone); clone 64 include/linux/device-mapper.h struct request **clone); clone 65 include/linux/device-mapper.h typedef void (*dm_release_clone_request_fn) (struct request *clone, clone 79 include/linux/device-mapper.h struct request *clone, blk_status_t error, clone 189 include/net/9p/client.h const unsigned char * const *wnames, int clone); clone 88 include/net/dsa.h struct sk_buff *clone; clone 537 include/net/dsa.h struct sk_buff *clone, unsigned int type); clone 780 include/net/netfilter/nf_tables.h int (*clone)(struct nft_expr *dst, clone 178 include/uapi/scsi/cxlflash_ioctl.h struct dk_cxlflash_clone clone; clone 1070 kernel/bpf/core.c struct bpf_prog *clone, *tmp; clone 1078 kernel/bpf/core.c clone = bpf_prog_clone_create(prog, GFP_USER); clone 1079 kernel/bpf/core.c if (!clone) clone 1082 kernel/bpf/core.c insn_cnt = clone->len; clone 1083 kernel/bpf/core.c insn = clone->insnsi; clone 1095 kernel/bpf/core.c clone->aux->verifier_zext); clone 1099 kernel/bpf/core.c tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); clone 1105 kernel/bpf/core.c bpf_jit_prog_release_other(prog, clone); clone 1109 kernel/bpf/core.c clone = tmp; clone 1113 kernel/bpf/core.c insn = clone->insnsi + i + insn_delta; clone 1118 kernel/bpf/core.c clone->blinded = 1; clone 1119 kernel/bpf/core.c return clone; clone 2477 kernel/fork.c SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, clone 2482 kernel/fork.c SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, clone 2487 kernel/fork.c SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, clone 2493 kernel/fork.c SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, clone 1164 net/9p/client.c const unsigned char * const *wnames, int clone) clone 1176 net/9p/client.c if (clone) { clone 2115 net/core/filter.c struct sk_buff *clone; clone 2125 net/core/filter.c clone = skb_clone(skb, GFP_ATOMIC); clone 2126 net/core/filter.c if (unlikely(!clone)) clone 2136 net/core/filter.c kfree_skb(clone); clone 2140 net/core/filter.c return __bpf_redirect(clone, dev, flags); clone 2092 net/core/skbuff.c struct sk_buff *clone = NULL; clone 2106 net/core/skbuff.c clone = skb_clone(list, GFP_ATOMIC); clone 2107 net/core/skbuff.c if (!clone) clone 2110 net/core/skbuff.c list = clone; clone 2117 net/core/skbuff.c kfree_skb(clone); clone 2130 net/core/skbuff.c if (clone) { clone 2131 net/core/skbuff.c clone->next = list; clone 2132 net/core/skbuff.c skb_shinfo(skb)->frag_list = clone; clone 4485 net/core/skbuff.c struct sk_buff *clone; clone 4490 net/core/skbuff.c clone = skb_clone(skb, GFP_ATOMIC); clone 4491 net/core/skbuff.c if (!clone) { clone 4496 net/core/skbuff.c clone->sk = sk; clone 4497 net/core/skbuff.c clone->destructor = sock_efree; clone 4499 net/core/skbuff.c return clone; clone 5780 net/core/skbuff.c struct sk_buff *clone = NULL; clone 5796 net/core/skbuff.c clone = skb_clone(list, gfp_mask); clone 5797 net/core/skbuff.c if (!clone) clone 5800 net/core/skbuff.c list = clone; clone 5806 net/core/skbuff.c kfree_skb(clone); clone 5819 net/core/skbuff.c if (clone) { clone 5820 net/core/skbuff.c clone->next = list; clone 5821 net/core/skbuff.c shinfo->frag_list = clone; clone 5927 net/core/skbuff.c struct sk_buff *clone = skb_clone(skb, gfp); clone 5929 net/core/skbuff.c if (!clone) clone 5932 net/core/skbuff.c if (pskb_carve(clone, off, gfp) < 0 || clone 5933 net/core/skbuff.c pskb_trim(clone, to_copy)) { clone 5934 net/core/skbuff.c kfree_skb(clone); clone 5937 net/core/skbuff.c return clone; clone 25 net/core/timestamping.c struct sk_buff *clone; clone 37 net/core/timestamping.c clone = skb_clone_sk(skb); clone 38 net/core/timestamping.c if (!clone) clone 40 net/core/timestamping.c phydev->drv->txtstamp(phydev, clone, type); clone 465 net/dsa/slave.c struct sk_buff *clone; clone 475 net/dsa/slave.c clone = skb_clone_sk(skb); clone 476 net/dsa/slave.c if (!clone) clone 479 net/dsa/slave.c DSA_SKB_CB(skb)->clone = clone; clone 481 net/dsa/slave.c if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type)) clone 484 net/dsa/slave.c kfree_skb(clone); clone 518 net/dsa/slave.c DSA_SKB_CB(skb)->clone = NULL; clone 367 net/ieee802154/socket.c struct sk_buff *clone; clone 369 net/ieee802154/socket.c clone = skb_clone(skb, GFP_ATOMIC); clone 370 net/ieee802154/socket.c if (clone) clone 371 net/ieee802154/socket.c raw_rcv_skb(sk, clone); clone 809 net/ieee802154/socket.c struct sk_buff *clone; clone 811 net/ieee802154/socket.c clone = skb_clone(skb, GFP_ATOMIC); clone 812 net/ieee802154/socket.c if (clone) clone 813 net/ieee802154/socket.c dgram_rcv_skb(prev, clone); clone 451 net/ipv4/inet_fragment.c struct sk_buff *clone; clone 454 net/ipv4/inet_fragment.c clone = alloc_skb(0, GFP_ATOMIC); clone 455 net/ipv4/inet_fragment.c if (!clone) clone 457 net/ipv4/inet_fragment.c skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; clone 461 net/ipv4/inet_fragment.c clone->data_len = head->data_len - plen; clone 462 net/ipv4/inet_fragment.c clone->len = clone->data_len; clone 463 net/ipv4/inet_fragment.c head->truesize += clone->truesize; clone 464 net/ipv4/inet_fragment.c clone->csum = 0; clone 465 net/ipv4/inet_fragment.c clone->ip_summed = head->ip_summed; clone 466 net/ipv4/inet_fragment.c add_frag_mem_limit(q->fqdir, clone->truesize); clone 467 net/ipv4/inet_fragment.c skb_shinfo(head)->frag_list = clone; clone 468 net/ipv4/inet_fragment.c nextp = &clone->next; clone 192 net/ipv4/raw.c struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); clone 195 net/ipv4/raw.c if (clone) clone 196 net/ipv4/raw.c raw_rcv(sk, clone); clone 296 net/ipv4/tcp_cong.c char *saved_clone, *clone, *name; clone 299 net/ipv4/tcp_cong.c saved_clone = clone = kstrdup(val, GFP_USER); clone 300 net/ipv4/tcp_cong.c if (!clone) clone 305 net/ipv4/tcp_cong.c while ((name = strsep(&clone, " ")) && *name) { clone 214 net/ipv6/raw.c struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); clone 217 net/ipv6/raw.c if (clone) { clone 218 net/ipv6/raw.c nf_reset_ct(clone); clone 219 net/ipv6/raw.c rawv6_rcv(sk, clone); clone 237 net/netfilter/nft_connlimit.c .clone = nft_connlimit_clone, clone 260 net/netfilter/nft_counter.c .clone = nft_counter_clone, clone 31 net/netfilter/nft_dynset.c if (src->ops->clone) { clone 33 net/netfilter/nft_dynset.c err = src->ops->clone(dst, src); clone 1194 net/openvswitch/actions.c struct sk_buff *clone; clone 1207 net/openvswitch/actions.c clone = skb_clone(skb, GFP_ATOMIC); clone 1208 net/openvswitch/actions.c if (clone) clone 1209 net/openvswitch/actions.c do_output(dp, clone, port, key); clone 1334 net/openvswitch/actions.c err = clone(dp, skb, key, a, last); clone 1374 net/openvswitch/actions.c struct sw_flow_key *clone; clone 1389 net/openvswitch/actions.c clone = clone_flow_key ? clone_key(key) : key; clone 1390 net/openvswitch/actions.c if (clone) { clone 1397 net/openvswitch/actions.c err = do_execute_actions(dp, skb, clone, clone 1403 net/openvswitch/actions.c clone->recirc_id = recirc_id; clone 1404 net/openvswitch/actions.c ovs_dp_process_packet(skb, clone); clone 114 net/phonet/socket.c struct sk_buff *clone; clone 121 net/phonet/socket.c clone = skb_clone(skb, GFP_ATOMIC); clone 122 net/phonet/socket.c if (clone) { clone 124 net/phonet/socket.c sk_receive_skb(sknode, clone, 0); clone 161 net/rds/tcp_recv.c struct sk_buff *clone; clone 218 net/rds/tcp_recv.c clone = pskb_extract(skb, offset, to_copy, arg->gfp); clone 219 net/rds/tcp_recv.c if (!clone) { clone 224 net/rds/tcp_recv.c skb_queue_tail(&tinc->ti_skb_list, clone); clone 229 net/rds/tcp_recv.c clone, clone->data, clone->len); clone 138 net/sched/sch_etf.c struct sk_buff *clone; clone 145 net/sched/sch_etf.c clone = skb_clone(skb, GFP_ATOMIC); clone 146 net/sched/sch_etf.c if (!clone) clone 149 net/sched/sch_etf.c serr = SKB_EXT_ERR(clone); clone 158 net/sched/sch_etf.c if (sock_queue_err_skb(sk, clone)) clone 159 net/sched/sch_etf.c kfree_skb(clone); clone 42 samples/pidfd/pidfd-metadata.c return clone(do_child, stack + stack_size, flags | SIGCHLD, NULL, pidfd); clone 123 scripts/gcc-plugins/gcc-generate-gimple-pass.h virtual opt_pass * clone () { return new _PASS_NAME_PASS(); } clone 212 scripts/gcc-plugins/gcc-generate-ipa-pass.h virtual opt_pass *clone() { return new _PASS_NAME_PASS(); } clone 123 scripts/gcc-plugins/gcc-generate-rtl-pass.h virtual opt_pass *clone() { return new _PASS_NAME_PASS(); } clone 123 scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h virtual opt_pass *clone() { return new _PASS_NAME_PASS(); } clone 210 tools/testing/selftests/memfd/fuse_test.c pid = clone(sealing_thread_fn, clone 586 tools/testing/selftests/memfd/memfd_test.c pid = clone(idle_thread_fn, clone 50 tools/testing/selftests/nsfs/pidns.c pid = clone(child, ca.stack_ptr, CLONE_NEWUSER | CLONE_NEWPID | SIGCHLD, NULL); clone 38 tools/testing/selftests/pidfd/pidfd_test.c return clone(fn, stack + stack_size, flags | SIGCHLD, NULL, pidfd); clone 59 tools/testing/selftests/proc/thread-self.c pid = clone(f, stack + PAGE_SIZE, CLONE_THREAD|CLONE_SIGHAND|CLONE_VM, (void *)1);