Searched refs:new_chunk (Results 1 – 9 of 9) sorted by relevance
/linux-4.1.27/drivers/md/ |
D | dm-snap-persistent.c | 85 __le64 new_chunk; member 90 uint64_t new_chunk; member 418 result->new_chunk = le64_to_cpu(de->new_chunk); in read_exception() 428 de->new_chunk = cpu_to_le64(e->new_chunk); in write_exception() 437 de->new_chunk = 0; in clear_exception() 467 if (e.new_chunk == 0LL) { in insert_exceptions() 476 if (ps->next_free <= e.new_chunk) in insert_exceptions() 477 ps->next_free = e.new_chunk + 1; in insert_exceptions() 482 r = callback(callback_context, e.old_chunk, e.new_chunk); in insert_exceptions() 683 e->new_chunk = ps->next_free; in persistent_prepare_exception() [all …]
|
D | dm-exception-store.h | 33 chunk_t new_chunk; member 148 return e->new_chunk >> DM_CHUNK_NUMBER_BITS; in dm_consecutive_chunk_count() 153 e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS); in dm_consecutive_chunk_count_inc() 162 e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS); in dm_consecutive_chunk_count_dec()
|
D | dm-snap.c | 714 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + in dm_insert_exception() 723 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { in dm_insert_exception() 726 e->new_chunk--; in dm_insert_exception() 755 e->new_chunk = new; in dm_add_exception() 881 e->new_chunk++; in __remove_single_exception_chunk() 957 chunk_t old_chunk, new_chunk; in snapshot_merge_next_chunks() local 975 &new_chunk); in snapshot_merge_next_chunks() 989 new_chunk = new_chunk + 1 - linear_chunks; in snapshot_merge_next_chunks() 1002 src.sector = chunk_to_sector(s->store, new_chunk); in snapshot_merge_next_chunks() 1567 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); in start_copy() [all …]
|
D | dm-snap-transient.c | 48 e->new_chunk = sector_to_chunk(store, tc->next_free); in transient_prepare_exception()
|
D | raid5.c | 7624 int new_chunk = mddev->new_chunk_sectors; in raid5_check_reshape() local 7628 if (new_chunk > 0) { in raid5_check_reshape() 7629 if (!is_power_of_2(new_chunk)) in raid5_check_reshape() 7631 if (new_chunk < (PAGE_SIZE>>9)) in raid5_check_reshape() 7633 if (mddev->array_sectors & (new_chunk-1)) in raid5_check_reshape() 7646 if (new_chunk > 0) { in raid5_check_reshape() 7647 conf->chunk_sectors = new_chunk ; in raid5_check_reshape() 7648 mddev->chunk_sectors = new_chunk; in raid5_check_reshape() 7658 int new_chunk = mddev->new_chunk_sectors; in raid6_check_reshape() local 7662 if (new_chunk > 0) { in raid6_check_reshape() [all …]
|
D | md.c | 1098 mddev->new_chunk_sectors = sb->new_chunk >> 9; in super_90_validate() 1238 sb->new_chunk = mddev->new_chunk_sectors << 9; in super_90_sync() 1611 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); in super_1_validate() 1745 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); in super_1_sync()
|
/linux-4.1.27/include/uapi/linux/raid/ |
D | md_p.h | 175 __u32 new_chunk; /* 17 new chunk size (bytes) */ member 245 __le32 new_chunk; /* new chunk size (512byte sectors) */ member
|
/linux-4.1.27/drivers/infiniband/ulp/iser/ |
D | iser_memory.c | 254 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1; in iser_sg_to_page_vec() local 259 new_chunk = 1; in iser_sg_to_page_vec() 263 if (new_chunk) in iser_sg_to_page_vec() 271 new_chunk = 0; in iser_sg_to_page_vec() 274 new_chunk = 1; in iser_sg_to_page_vec()
|
/linux-4.1.27/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 2524 int i = 0, new_chunk, last_ent, n_pages; in isert_map_fr_pagelist() local 2527 new_chunk = 1; in isert_map_fr_pagelist() 2531 if (new_chunk) in isert_map_fr_pagelist() 2540 new_chunk = 0; in isert_map_fr_pagelist() 2543 new_chunk = 1; in isert_map_fr_pagelist()
|