/linux-4.4.14/drivers/video/fbdev/omap/ |
H A D | lcd_mipid.c | 68 static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf, mipid_transfer() argument 76 BUG_ON(md->spi == NULL); mipid_transfer() 117 r = spi_sync(md->spi, &m); mipid_transfer() 119 dev_dbg(&md->spi->dev, "spi_sync %d\n", r); mipid_transfer() 125 static inline void mipid_cmd(struct mipid_device *md, int cmd) mipid_cmd() argument 127 mipid_transfer(md, cmd, NULL, 0, NULL, 0); mipid_cmd() 130 static inline void mipid_write(struct mipid_device *md, mipid_write() argument 133 mipid_transfer(md, reg, buf, len, NULL, 0); mipid_write() 136 static inline void mipid_read(struct mipid_device *md, mipid_read() argument 139 mipid_transfer(md, reg, NULL, 0, buf, len); mipid_read() 142 static void set_data_lines(struct mipid_device *md, int data_lines) set_data_lines() argument 157 mipid_write(md, 0x3a, (u8 *)&par, 2); set_data_lines() 160 static void send_init_string(struct mipid_device *md) send_init_string() argument 164 mipid_write(md, 0xc2, (u8 *)initpar, sizeof(initpar)); send_init_string() 165 set_data_lines(md, md->panel.data_lines); send_init_string() 168 static void hw_guard_start(struct mipid_device *md, int guard_msec) hw_guard_start() argument 170 md->hw_guard_wait = msecs_to_jiffies(guard_msec); hw_guard_start() 171 md->hw_guard_end = jiffies + md->hw_guard_wait; hw_guard_start() 174 static void hw_guard_wait(struct mipid_device *md) hw_guard_wait() argument 176 unsigned long wait = md->hw_guard_end - jiffies; hw_guard_wait() 178 if ((long)wait > 0 && wait <= md->hw_guard_wait) { hw_guard_wait() 184 static void set_sleep_mode(struct mipid_device *md, int on) set_sleep_mode() argument 192 hw_guard_wait(md); set_sleep_mode() 193 mipid_cmd(md, cmd); set_sleep_mode() 194 hw_guard_start(md, 120); set_sleep_mode() 206 static void set_display_state(struct mipid_device *md, int enabled) set_display_state() argument 210 mipid_cmd(md, cmd); set_display_state() 215 struct mipid_device *md = to_mipid_device(panel); mipid_set_bklight_level() local 216 struct mipid_platform_data *pd = md->spi->dev.platform_data; mipid_set_bklight_level() 222 if (!md->enabled) { mipid_set_bklight_level() 223 md->saved_bklight_level = level; mipid_set_bklight_level() 233 struct mipid_device *md = to_mipid_device(panel); mipid_get_bklight_level() local 234 struct mipid_platform_data *pd = md->spi->dev.platform_data; mipid_get_bklight_level() 243 struct mipid_device *md = to_mipid_device(panel); mipid_get_bklight_max() local 244 struct mipid_platform_data *pd = md->spi->dev.platform_data; mipid_get_bklight_max() 257 static u16 read_first_pixel(struct mipid_device *md) read_first_pixel() argument 262 mutex_lock(&md->mutex); read_first_pixel() 263 mipid_read(md, MIPID_CMD_READ_RED, &red, 1); read_first_pixel() 264 mipid_read(md, MIPID_CMD_READ_GREEN, &green, 1); read_first_pixel() 265 mipid_read(md, MIPID_CMD_READ_BLUE, &blue, 1); read_first_pixel() 266 mutex_unlock(&md->mutex); read_first_pixel() 268 switch (md->panel.data_lines) { read_first_pixel() 287 struct mipid_device *md = to_mipid_device(panel); mipid_run_test() local 300 omapfb_write_first_pixel(md->fbdev, test_values[i]); mipid_run_test() 307 pixel = read_first_pixel(md); mipid_run_test() 311 dev_err(&md->spi->dev, mipid_run_test() 324 static void ls041y3_esd_recover(struct mipid_device *md) ls041y3_esd_recover() argument 326 dev_err(&md->spi->dev, "performing LCD ESD recovery\n"); ls041y3_esd_recover() 327 set_sleep_mode(md, 1); ls041y3_esd_recover() 328 set_sleep_mode(md, 0); ls041y3_esd_recover() 331 static void ls041y3_esd_check_mode1(struct mipid_device *md) ls041y3_esd_check_mode1() argument 335 mipid_read(md, MIPID_CMD_RDDSDR, &state1, 1); ls041y3_esd_check_mode1() 336 set_sleep_mode(md, 0); ls041y3_esd_check_mode1() 337 mipid_read(md, MIPID_CMD_RDDSDR, &state2, 1); ls041y3_esd_check_mode1() 338 dev_dbg(&md->spi->dev, "ESD mode 1 state1 %02x state2 %02x\n", ls041y3_esd_check_mode1() 344 ls041y3_esd_recover(md); ls041y3_esd_check_mode1() 347 static void ls041y3_esd_check_mode2(struct mipid_device *md) ls041y3_esd_check_mode2() argument 367 mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen); ls041y3_esd_check_mode2() 370 mipid_read(md, rd->cmd, rbuf, 2); ls041y3_esd_check_mode2() 375 mipid_write(md, rd->cmd, (u8 *)rd->wbuf, rd->wlen); ls041y3_esd_check_mode2() 378 dev_dbg(&md->spi->dev, "ESD mode 2 state %02x\n", rbuf[1]); ls041y3_esd_check_mode2() 380 ls041y3_esd_recover(md); ls041y3_esd_check_mode2() 383 static void ls041y3_esd_check(struct mipid_device *md) ls041y3_esd_check() argument 385 ls041y3_esd_check_mode1(md); ls041y3_esd_check() 386 if (md->revision >= 0x88) ls041y3_esd_check() 387 ls041y3_esd_check_mode2(md); ls041y3_esd_check() 390 static void mipid_esd_start_check(struct mipid_device *md) mipid_esd_start_check() argument 392 if (md->esd_check != NULL) mipid_esd_start_check() 393 queue_delayed_work(md->esd_wq, &md->esd_work, mipid_esd_start_check() 397 static void mipid_esd_stop_check(struct mipid_device *md) mipid_esd_stop_check() argument 399 if (md->esd_check != NULL) mipid_esd_stop_check() 400 cancel_delayed_work_sync(&md->esd_work); mipid_esd_stop_check() 405 struct mipid_device *md = container_of(work, struct mipid_device, mipid_esd_work() local 408 mutex_lock(&md->mutex); mipid_esd_work() 409 md->esd_check(md); mipid_esd_work() 410 mutex_unlock(&md->mutex); mipid_esd_work() 411 mipid_esd_start_check(md); mipid_esd_work() 416 struct mipid_device *md = to_mipid_device(panel); mipid_enable() local 418 mutex_lock(&md->mutex); mipid_enable() 420 if (md->enabled) { mipid_enable() 421 mutex_unlock(&md->mutex); mipid_enable() 424 set_sleep_mode(md, 0); mipid_enable() 425 md->enabled = 1; mipid_enable() 426 send_init_string(md); mipid_enable() 427 set_display_state(md, 1); mipid_enable() 428 mipid_set_bklight_level(panel, md->saved_bklight_level); mipid_enable() 429 mipid_esd_start_check(md); mipid_enable() 431 mutex_unlock(&md->mutex); mipid_enable() 437 struct mipid_device *md = to_mipid_device(panel); mipid_disable() local 443 mipid_esd_stop_check(md); mipid_disable() 444 mutex_lock(&md->mutex); mipid_disable() 446 if (!md->enabled) { mipid_disable() 447 mutex_unlock(&md->mutex); mipid_disable() 450 md->saved_bklight_level = mipid_get_bklight_level(panel); mipid_disable() 452 set_display_state(md, 0); mipid_disable() 453 set_sleep_mode(md, 1); mipid_disable() 454 md->enabled = 0; mipid_disable() 456 mutex_unlock(&md->mutex); mipid_disable() 459 static int panel_enabled(struct mipid_device *md) panel_enabled() argument 464 mipid_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4); panel_enabled() 467 dev_dbg(&md->spi->dev, panel_enabled() 476 struct mipid_device *md = to_mipid_device(panel); mipid_init() local 478 md->fbdev = fbdev; mipid_init() 479 md->esd_wq = create_singlethread_workqueue("mipid_esd"); mipid_init() 480 if (md->esd_wq == NULL) { mipid_init() 481 dev_err(&md->spi->dev, "can't create ESD workqueue\n"); mipid_init() 484 INIT_DELAYED_WORK(&md->esd_work, mipid_esd_work); mipid_init() 485 mutex_init(&md->mutex); mipid_init() 487 md->enabled = panel_enabled(md); mipid_init() 489 if (md->enabled) mipid_init() 490 mipid_esd_start_check(md); mipid_init() 492 md->saved_bklight_level = mipid_get_bklight_level(panel); mipid_init() 499 struct mipid_device *md = to_mipid_device(panel); mipid_cleanup() local 501 if (md->enabled) mipid_cleanup() 502 mipid_esd_stop_check(md); mipid_cleanup() 503 destroy_workqueue(md->esd_wq); mipid_cleanup() 531 static int mipid_detect(struct mipid_device *md) mipid_detect() argument 536 pdata = md->spi->dev.platform_data; mipid_detect() 538 dev_err(&md->spi->dev, "missing platform data\n"); mipid_detect() 542 mipid_read(md, MIPID_CMD_READ_DISP_ID, display_id, 3); mipid_detect() 543 dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n", mipid_detect() 548 md->panel.name = "lph8923"; mipid_detect() 551 md->panel.name = "ls041y3"; mipid_detect() 552 md->esd_check = ls041y3_esd_check; mipid_detect() 555 md->panel.name = "unknown"; mipid_detect() 556 dev_err(&md->spi->dev, "invalid display ID\n"); mipid_detect() 560 md->revision = display_id[1]; mipid_detect() 561 md->panel.data_lines = pdata->data_lines; mipid_detect() 563 md->panel.name, md->revision, md->panel.data_lines); mipid_detect() 570 struct mipid_device *md; mipid_spi_probe() local 573 md = kzalloc(sizeof(*md), GFP_KERNEL); mipid_spi_probe() 574 if (md == NULL) { mipid_spi_probe() 580 md->spi = spi; mipid_spi_probe() 581 dev_set_drvdata(&spi->dev, md); mipid_spi_probe() 582 md->panel = mipid_panel; mipid_spi_probe() 584 r = mipid_detect(md); mipid_spi_probe() 588 omapfb_register_panel(&md->panel); mipid_spi_probe() 595 struct mipid_device *md = dev_get_drvdata(&spi->dev); mipid_spi_remove() local 597 mipid_disable(&md->panel); mipid_spi_remove() 598 kfree(md); mipid_spi_remove()
|
/linux-4.4.14/drivers/md/ |
H A D | dm.c | 70 struct mapped_device *md; member in struct:dm_io 84 struct mapped_device *md; member in struct:dm_rq_target_io 120 * Bits for the md->flags field. 243 bool dm_use_blk_mq(struct mapped_device *md) dm_use_blk_mq() argument 245 return md->use_blk_mq; dm_use_blk_mq() 440 int dm_deleting_md(struct mapped_device *md) dm_deleting_md() argument 442 return test_bit(DMF_DELETING, &md->flags); dm_deleting_md() 447 struct mapped_device *md; dm_blk_open() local 451 md = bdev->bd_disk->private_data; dm_blk_open() 452 if (!md) dm_blk_open() 455 if (test_bit(DMF_FREEING, &md->flags) || dm_blk_open() 456 dm_deleting_md(md)) { dm_blk_open() 457 md = NULL; dm_blk_open() 461 dm_get(md); dm_blk_open() 462 atomic_inc(&md->open_count); dm_blk_open() 466 return md ? 0 : -ENXIO; dm_blk_open() 471 struct mapped_device *md; dm_blk_close() local 475 md = disk->private_data; dm_blk_close() 476 if (WARN_ON(!md)) dm_blk_close() 479 if (atomic_dec_and_test(&md->open_count) && dm_blk_close() 480 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) dm_blk_close() 483 dm_put(md); dm_blk_close() 488 int dm_open_count(struct mapped_device *md) dm_open_count() argument 490 return atomic_read(&md->open_count); dm_open_count() 496 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) dm_lock_for_deletion() argument 502 if (dm_open_count(md)) { dm_lock_for_deletion() 505 set_bit(DMF_DEFERRED_REMOVE, &md->flags); dm_lock_for_deletion() 506 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) dm_lock_for_deletion() 509 set_bit(DMF_DELETING, &md->flags); dm_lock_for_deletion() 516 int dm_cancel_deferred_remove(struct mapped_device *md) dm_cancel_deferred_remove() argument 522 if (test_bit(DMF_DELETING, &md->flags)) dm_cancel_deferred_remove() 525 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); dm_cancel_deferred_remove() 537 sector_t dm_get_size(struct mapped_device *md) dm_get_size() argument 539 return get_capacity(md->disk); dm_get_size() 542 struct request_queue *dm_get_md_queue(struct mapped_device *md) dm_get_md_queue() argument 544 return md->queue; dm_get_md_queue() 547 struct dm_stats *dm_get_stats(struct mapped_device *md) dm_get_stats() argument 549 return &md->stats; dm_get_stats() 554 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_getgeo() local 556 return dm_get_geometry(md, geo); dm_blk_getgeo() 559 static int dm_get_live_table_for_ioctl(struct mapped_device *md, dm_get_live_table_for_ioctl() argument 568 map = dm_get_live_table(md, srcu_idx); dm_get_live_table_for_ioctl() 581 if (dm_suspended_md(md)) { dm_get_live_table_for_ioctl() 593 dm_put_live_table(md, *srcu_idx); dm_get_live_table_for_ioctl() 604 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_ioctl() local 609 r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx); dm_blk_ioctl() 626 dm_put_live_table(md, srcu_idx); dm_blk_ioctl() 630 static struct dm_io *alloc_io(struct mapped_device *md) alloc_io() argument 632 return mempool_alloc(md->io_pool, GFP_NOIO); alloc_io() 635 static void free_io(struct mapped_device *md, struct dm_io *io) free_io() argument 637 mempool_free(io, md->io_pool); free_io() 640 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) free_tio() argument 645 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, alloc_rq_tio() argument 648 return mempool_alloc(md->io_pool, gfp_mask); alloc_rq_tio() 653 mempool_free(tio, tio->md->io_pool); free_rq_tio() 656 static struct request *alloc_clone_request(struct mapped_device *md, alloc_clone_request() argument 659 return mempool_alloc(md->rq_pool, gfp_mask); alloc_clone_request() 662 static void free_clone_request(struct mapped_device *md, struct request *rq) free_clone_request() argument 664 mempool_free(rq, md->rq_pool); free_clone_request() 667 static int md_in_flight(struct mapped_device *md) md_in_flight() argument 669 return atomic_read(&md->pending[READ]) + md_in_flight() 670 atomic_read(&md->pending[WRITE]); md_in_flight() 675 struct mapped_device *md = io->md; start_io_acct() local 683 part_round_stats(cpu, &dm_disk(md)->part0); start_io_acct() 685 atomic_set(&dm_disk(md)->part0.in_flight[rw], start_io_acct() 686 atomic_inc_return(&md->pending[rw])); start_io_acct() 688 if (unlikely(dm_stats_used(&md->stats))) start_io_acct() 689 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, start_io_acct() 695 struct mapped_device *md = io->md; end_io_acct() local 701 generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time); end_io_acct() 703 if (unlikely(dm_stats_used(&md->stats))) end_io_acct() 704 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, end_io_acct() 711 pending = atomic_dec_return(&md->pending[rw]); end_io_acct() 712 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending); end_io_acct() 713 pending += atomic_read(&md->pending[rw^0x1]); end_io_acct() 717 wake_up(&md->wait); end_io_acct() 723 static void queue_io(struct mapped_device *md, struct bio *bio) queue_io() argument 727 spin_lock_irqsave(&md->deferred_lock, flags); queue_io() 728 bio_list_add(&md->deferred, bio); queue_io() 729 spin_unlock_irqrestore(&md->deferred_lock, flags); queue_io() 730 queue_work(md->wq, &md->work); queue_io() 735 * function to access the md->map field, and make sure they call 738 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier) 740 *srcu_idx = srcu_read_lock(&md->io_barrier); 742 return srcu_dereference(md->map, &md->io_barrier); 745 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) 747 srcu_read_unlock(&md->io_barrier, srcu_idx); 750 void dm_sync_table(struct mapped_device *md) dm_sync_table() argument 752 synchronize_srcu(&md->io_barrier); dm_sync_table() 760 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) __acquires() 763 return rcu_dereference(md->map); __acquires() 766 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) __releases() 775 struct mapped_device *md) open_table_device() 788 r = bd_link_disk_holder(bdev, dm_disk(md)); open_table_device() 801 static void close_table_device(struct table_device *td, struct mapped_device *md) close_table_device() argument 806 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); close_table_device() 822 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, dm_get_table_device() argument 827 mutex_lock(&md->table_devices_lock); dm_get_table_device() 828 td = find_table_device(&md->table_devices, dev, mode); dm_get_table_device() 832 mutex_unlock(&md->table_devices_lock); dm_get_table_device() 839 if ((r = open_table_device(td, dev, md))) { dm_get_table_device() 840 mutex_unlock(&md->table_devices_lock); dm_get_table_device() 848 list_add(&td->list, &md->table_devices); dm_get_table_device() 851 mutex_unlock(&md->table_devices_lock); dm_get_table_device() 858 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) dm_put_table_device() argument 862 mutex_lock(&md->table_devices_lock); dm_put_table_device() 864 close_table_device(td, md); dm_put_table_device() 868 mutex_unlock(&md->table_devices_lock); dm_put_table_device() 888 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) dm_get_geometry() argument 890 *geo = md->geometry; dm_get_geometry() 898 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) dm_set_geometry() argument 907 md->geometry = *geo; dm_set_geometry() 921 static int __noflush_suspending(struct mapped_device *md) __noflush_suspending() argument 923 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); __noflush_suspending() 935 struct mapped_device *md = io->md; dec_pending() local 940 if (!(io->error > 0 && __noflush_suspending(md))) dec_pending() 950 spin_lock_irqsave(&md->deferred_lock, flags); dec_pending() 951 if (__noflush_suspending(md)) dec_pending() 952 bio_list_add_head(&md->deferred, io->bio); dec_pending() 956 spin_unlock_irqrestore(&md->deferred_lock, flags); dec_pending() 962 free_io(md, io); dec_pending() 973 queue_io(md, bio); dec_pending() 976 trace_block_bio_complete(md->queue, bio, io_error); dec_pending() 983 static void disable_write_same(struct mapped_device *md) disable_write_same() argument 985 struct queue_limits *limits = dm_get_queue_limits(md); disable_write_same() 997 struct mapped_device *md = tio->io->md; clone_endio() local 1019 disable_write_same(md); clone_endio() 1021 free_tio(md, tio); clone_endio() 1082 static void rq_end_stats(struct mapped_device *md, struct request *orig) rq_end_stats() argument 1084 if (unlikely(dm_stats_used(&md->stats))) { rq_end_stats() 1087 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), rq_end_stats() 1094 * Don't touch any member of the md after calling this function because 1095 * the md may be freed in dm_put() at the end of this function. 1098 static void rq_completed(struct mapped_device *md, int rw, bool run_queue) rq_completed() argument 1100 atomic_dec(&md->pending[rw]); rq_completed() 1103 if (!md_in_flight(md)) rq_completed() 1104 wake_up(&md->wait); rq_completed() 1112 if (!md->queue->mq_ops && run_queue) rq_completed() 1113 blk_run_queue_async(md->queue); rq_completed() 1118 dm_put(md); rq_completed() 1124 struct mapped_device *md = tio->md; free_rq_clone() local 1128 if (md->type == DM_TYPE_MQ_REQUEST_BASED) free_rq_clone() 1131 else if (!md->queue->mq_ops) free_rq_clone() 1133 free_clone_request(md, clone); free_rq_clone() 1140 if (!md->queue->mq_ops) free_rq_clone() 1153 struct mapped_device *md = tio->md; dm_end_request() local 1170 rq_end_stats(md, rq); dm_end_request() 1175 rq_completed(md, rw, true); dm_end_request() 1190 else if (!tio->md->queue->mq_ops) dm_unprep_request() 1208 static void dm_requeue_original_request(struct mapped_device *md, dm_requeue_original_request() argument 1213 rq_end_stats(md, rq); dm_requeue_original_request() 1223 rq_completed(md, rw, false); dm_requeue_original_request() 1279 disable_write_same(tio->md); dm_done() 1289 dm_requeue_original_request(tio->md, tio->orig); dm_done() 1307 rq_end_stats(tio->md, rq); dm_softirq_done() 1311 rq_completed(tio->md, rw, false); dm_softirq_done() 1315 rq_completed(tio->md, rw, false); dm_softirq_done() 1474 struct mapped_device *md; __map_bio() local 1497 md = tio->io->md; __map_bio() 1499 free_tio(md, tio); __map_bio() 1507 struct mapped_device *md; member in struct:clone_info 1548 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); alloc_tio() 1714 static void __split_and_process_bio(struct mapped_device *md, __split_and_process_bio() argument 1726 ci.md = md; __split_and_process_bio() 1727 ci.io = alloc_io(md); __split_and_process_bio() 1731 ci.io->md = md; __split_and_process_bio() 1738 ci.bio = &ci.md->flush_bio; __split_and_process_bio() 1763 struct mapped_device *md = q->queuedata; dm_make_request() local 1767 map = dm_get_live_table(md, &srcu_idx); dm_make_request() 1769 generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0); dm_make_request() 1772 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { dm_make_request() 1773 dm_put_live_table(md, srcu_idx); dm_make_request() 1776 queue_io(md, bio); dm_make_request() 1782 __split_and_process_bio(md, map, bio); dm_make_request() 1783 dm_put_live_table(md, srcu_idx); dm_make_request() 1787 int dm_request_based(struct mapped_device *md) dm_request_based() argument 1789 return blk_queue_stackable(md->queue); dm_request_based() 1825 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, setup_clone() 1841 static struct request *clone_rq(struct request *rq, struct mapped_device *md, clone_rq() argument 1852 clone = alloc_clone_request(md, gfp_mask); clone_rq() 1862 free_clone_request(md, clone); clone_rq() 1872 struct mapped_device *md) init_tio() 1874 tio->md = md; init_tio() 1880 if (md->kworker_task) init_tio() 1885 struct mapped_device *md, gfp_t gfp_mask) prep_tio() 1891 tio = alloc_rq_tio(md, gfp_mask); prep_tio() 1895 init_tio(tio, rq, md); prep_tio() 1897 table = dm_get_live_table(md, &srcu_idx); prep_tio() 1899 if (!clone_rq(rq, md, tio, gfp_mask)) { prep_tio() 1900 dm_put_live_table(md, srcu_idx); prep_tio() 1905 dm_put_live_table(md, srcu_idx); prep_tio() 1915 struct mapped_device *md = q->queuedata; dm_prep_fn() local 1923 tio = prep_tio(rq, md, GFP_ATOMIC); dm_prep_fn() 1940 struct mapped_device *md) map_request() 1971 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), map_request() 1977 dm_requeue_original_request(md, tio->orig); map_request() 1997 struct mapped_device *md = tio->md; map_tio_request() local 1999 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) map_tio_request() 2000 dm_requeue_original_request(md, rq); map_tio_request() 2003 static void dm_start_request(struct mapped_device *md, struct request *orig) dm_start_request() argument 2009 atomic_inc(&md->pending[rq_data_dir(orig)]); dm_start_request() 2011 if (md->seq_rq_merge_deadline_usecs) { dm_start_request() 2012 md->last_rq_pos = rq_end_sector(orig); dm_start_request() 2013 md->last_rq_rw = rq_data_dir(orig); dm_start_request() 2014 md->last_rq_start_time = ktime_get(); dm_start_request() 2017 if (unlikely(dm_stats_used(&md->stats))) { dm_start_request() 2021 dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig), dm_start_request() 2026 * Hold the md reference here for the in-flight I/O. dm_start_request() 2032 dm_get(md); dm_start_request() 2037 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) dm_attr_rq_based_seq_io_merge_deadline_show() argument 2039 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); dm_attr_rq_based_seq_io_merge_deadline_show() 2042 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, dm_attr_rq_based_seq_io_merge_deadline_store() argument 2047 if (!dm_request_based(md) || md->use_blk_mq) dm_attr_rq_based_seq_io_merge_deadline_store() 2056 md->seq_rq_merge_deadline_usecs = deadline; dm_attr_rq_based_seq_io_merge_deadline_store() 2061 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) dm_request_peeked_before_merge_deadline() argument 2065 if (!md->seq_rq_merge_deadline_usecs) dm_request_peeked_before_merge_deadline() 2068 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); dm_request_peeked_before_merge_deadline() 2069 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); dm_request_peeked_before_merge_deadline() 2080 struct mapped_device *md = q->queuedata; dm_request_fn() local 2082 struct dm_table *map = dm_get_live_table(md, &srcu_idx); dm_request_fn() 2111 dm_start_request(md, rq); dm_request_fn() 2116 if (dm_request_peeked_before_merge_deadline(md) && dm_request_fn() 2117 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 && dm_request_fn() 2118 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) dm_request_fn() 2124 dm_start_request(md, rq); dm_request_fn() 2129 queue_kthread_work(&md->kworker, &tio->work); dm_request_fn() 2138 dm_put_live_table(md, srcu_idx); dm_request_fn() 2144 struct mapped_device *md = congested_data; dm_any_congested() local 2147 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { dm_any_congested() 2148 map = dm_get_live_table_fast(md); dm_any_congested() 2154 if (dm_request_based(md)) dm_any_congested() 2155 r = md->queue->backing_dev_info.wb.state & dm_any_congested() 2160 dm_put_live_table_fast(md); dm_any_congested() 2219 static void dm_init_md_queue(struct mapped_device *md) dm_init_md_queue() argument 2230 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); dm_init_md_queue() 2236 md->queue->queuedata = md; dm_init_md_queue() 2237 md->queue->backing_dev_info.congested_data = md; dm_init_md_queue() 2240 static void dm_init_old_md_queue(struct mapped_device *md) dm_init_old_md_queue() argument 2242 md->use_blk_mq = false; dm_init_old_md_queue() 2243 dm_init_md_queue(md); dm_init_old_md_queue() 2248 md->queue->backing_dev_info.congested_fn = dm_any_congested; dm_init_old_md_queue() 2249 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); dm_init_old_md_queue() 2252 static void cleanup_mapped_device(struct mapped_device *md) cleanup_mapped_device() argument 2254 if (md->wq) cleanup_mapped_device() 2255 destroy_workqueue(md->wq); cleanup_mapped_device() 2256 if (md->kworker_task) cleanup_mapped_device() 2257 kthread_stop(md->kworker_task); cleanup_mapped_device() 2258 mempool_destroy(md->io_pool); cleanup_mapped_device() 2259 mempool_destroy(md->rq_pool); cleanup_mapped_device() 2260 if (md->bs) cleanup_mapped_device() 2261 bioset_free(md->bs); cleanup_mapped_device() 2263 cleanup_srcu_struct(&md->io_barrier); cleanup_mapped_device() 2265 if (md->disk) { cleanup_mapped_device() 2267 md->disk->private_data = NULL; cleanup_mapped_device() 2269 del_gendisk(md->disk); cleanup_mapped_device() 2270 put_disk(md->disk); cleanup_mapped_device() 2273 if (md->queue) cleanup_mapped_device() 2274 blk_cleanup_queue(md->queue); cleanup_mapped_device() 2276 if (md->bdev) { cleanup_mapped_device() 2277 bdput(md->bdev); cleanup_mapped_device() 2278 md->bdev = NULL; cleanup_mapped_device() 2288 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); alloc_dev() local 2291 if (!md) { alloc_dev() 2307 r = init_srcu_struct(&md->io_barrier); alloc_dev() 2311 md->use_blk_mq = use_blk_mq; alloc_dev() 2312 md->type = DM_TYPE_NONE; alloc_dev() 2313 mutex_init(&md->suspend_lock); alloc_dev() 2314 mutex_init(&md->type_lock); alloc_dev() 2315 mutex_init(&md->table_devices_lock); alloc_dev() 2316 spin_lock_init(&md->deferred_lock); alloc_dev() 2317 atomic_set(&md->holders, 1); alloc_dev() 2318 atomic_set(&md->open_count, 0); alloc_dev() 2319 atomic_set(&md->event_nr, 0); alloc_dev() 2320 atomic_set(&md->uevent_seq, 0); alloc_dev() 2321 INIT_LIST_HEAD(&md->uevent_list); alloc_dev() 2322 INIT_LIST_HEAD(&md->table_devices); alloc_dev() 2323 spin_lock_init(&md->uevent_lock); alloc_dev() 2325 md->queue = blk_alloc_queue(GFP_KERNEL); alloc_dev() 2326 if (!md->queue) alloc_dev() 2329 dm_init_md_queue(md); alloc_dev() 2331 md->disk = alloc_disk(1); alloc_dev() 2332 if (!md->disk) alloc_dev() 2335 atomic_set(&md->pending[0], 0); alloc_dev() 2336 atomic_set(&md->pending[1], 0); alloc_dev() 2337 init_waitqueue_head(&md->wait); alloc_dev() 2338 INIT_WORK(&md->work, dm_wq_work); alloc_dev() 2339 init_waitqueue_head(&md->eventq); alloc_dev() 2340 init_completion(&md->kobj_holder.completion); alloc_dev() 2341 md->kworker_task = NULL; alloc_dev() 2343 md->disk->major = _major; alloc_dev() 2344 md->disk->first_minor = minor; alloc_dev() 2345 md->disk->fops = &dm_blk_dops; alloc_dev() 2346 md->disk->queue = md->queue; alloc_dev() 2347 md->disk->private_data = md; alloc_dev() 2348 sprintf(md->disk->disk_name, "dm-%d", minor); alloc_dev() 2349 add_disk(md->disk); alloc_dev() 2350 format_dev_t(md->name, MKDEV(_major, minor)); alloc_dev() 2352 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); alloc_dev() 2353 if (!md->wq) alloc_dev() 2356 md->bdev = bdget_disk(md->disk, 0); alloc_dev() 2357 if (!md->bdev) alloc_dev() 2360 bio_init(&md->flush_bio); alloc_dev() 2361 md->flush_bio.bi_bdev = md->bdev; alloc_dev() 2362 md->flush_bio.bi_rw = WRITE_FLUSH; alloc_dev() 2364 dm_stats_init(&md->stats); alloc_dev() 2368 old_md = idr_replace(&_minor_idr, md, minor); alloc_dev() 2373 return md; alloc_dev() 2376 cleanup_mapped_device(md); alloc_dev() 2382 kfree(md); alloc_dev() 2386 static void unlock_fs(struct mapped_device *md); 2388 static void free_dev(struct mapped_device *md) free_dev() argument 2390 int minor = MINOR(disk_devt(md->disk)); free_dev() 2392 unlock_fs(md); free_dev() 2394 cleanup_mapped_device(md); free_dev() 2395 if (md->use_blk_mq) free_dev() 2396 blk_mq_free_tag_set(&md->tag_set); free_dev() 2398 free_table_devices(&md->table_devices); free_dev() 2399 dm_stats_cleanup(&md->stats); free_dev() 2403 kfree(md); free_dev() 2406 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) __bind_mempools() argument 2410 if (md->bs) { __bind_mempools() 2411 /* The md already has necessary mempools. */ __bind_mempools() 2417 bioset_free(md->bs); __bind_mempools() 2418 md->bs = p->bs; __bind_mempools() 2432 BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); __bind_mempools() 2434 md->io_pool = p->io_pool; __bind_mempools() 2436 md->rq_pool = p->rq_pool; __bind_mempools() 2438 md->bs = p->bs; __bind_mempools() 2453 struct mapped_device *md = (struct mapped_device *) context; event_callback() local 2455 spin_lock_irqsave(&md->uevent_lock, flags); event_callback() 2456 list_splice_init(&md->uevent_list, &uevents); event_callback() 2457 spin_unlock_irqrestore(&md->uevent_lock, flags); event_callback() 2459 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); event_callback() 2461 atomic_inc(&md->event_nr); event_callback() 2462 wake_up(&md->eventq); event_callback() 2466 * Protected by md->suspend_lock obtained by dm_swap_table(). 2468 static void __set_size(struct mapped_device *md, sector_t size) __set_size() argument 2470 set_capacity(md->disk, size); __set_size() 2472 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); __set_size() 2478 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, __bind() argument 2482 struct request_queue *q = md->queue; __bind() 2490 if (size != dm_get_size(md)) __bind() 2491 memset(&md->geometry, 0, sizeof(md->geometry)); __bind() 2493 __set_size(md, size); __bind() 2495 dm_table_event_callback(t, event_callback, md); __bind() 2507 __bind_mempools(md, t); __bind() 2509 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); __bind() 2510 rcu_assign_pointer(md->map, t); __bind() 2511 md->immutable_target_type = dm_table_get_immutable_target_type(t); __bind() 2515 dm_sync_table(md); __bind() 2523 static struct dm_table *__unbind(struct mapped_device *md) __unbind() argument 2525 struct dm_table *map = rcu_dereference_protected(md->map, 1); __unbind() 2531 RCU_INIT_POINTER(md->map, NULL); __unbind() 2532 dm_sync_table(md); __unbind() 2542 struct mapped_device *md; dm_create() local 2544 md = alloc_dev(minor); dm_create() 2545 if (!md) dm_create() 2548 dm_sysfs_init(md); dm_create() 2550 *result = md; dm_create() 2555 * Functions to manage md->type. 2556 * All are required to hold md->type_lock. 2558 void dm_lock_md_type(struct mapped_device *md) dm_lock_md_type() argument 2560 mutex_lock(&md->type_lock); dm_lock_md_type() 2563 void dm_unlock_md_type(struct mapped_device *md) dm_unlock_md_type() argument 2565 mutex_unlock(&md->type_lock); dm_unlock_md_type() 2568 void dm_set_md_type(struct mapped_device *md, unsigned type) dm_set_md_type() argument 2570 BUG_ON(!mutex_is_locked(&md->type_lock)); dm_set_md_type() 2571 md->type = type; dm_set_md_type() 2574 unsigned dm_get_md_type(struct mapped_device *md) dm_get_md_type() argument 2576 BUG_ON(!mutex_is_locked(&md->type_lock)); dm_get_md_type() 2577 return md->type; dm_get_md_type() 2580 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) dm_get_immutable_target_type() argument 2582 return md->immutable_target_type; dm_get_immutable_target_type() 2587 * count on 'md'. 2589 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) dm_get_queue_limits() argument 2591 BUG_ON(!atomic_read(&md->holders)); dm_get_queue_limits() 2592 return &md->queue->limits; dm_get_queue_limits() 2596 static void init_rq_based_worker_thread(struct mapped_device *md) init_rq_based_worker_thread() argument 2599 init_kthread_worker(&md->kworker); init_rq_based_worker_thread() 2600 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, init_rq_based_worker_thread() 2601 "kdmwork-%s", dm_device_name(md)); init_rq_based_worker_thread() 2607 static int dm_init_request_based_queue(struct mapped_device *md) dm_init_request_based_queue() argument 2612 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); dm_init_request_based_queue() 2617 md->seq_rq_merge_deadline_usecs = 0; dm_init_request_based_queue() 2619 md->queue = q; dm_init_request_based_queue() 2620 dm_init_old_md_queue(md); dm_init_request_based_queue() 2621 blk_queue_softirq_done(md->queue, dm_softirq_done); dm_init_request_based_queue() 2622 blk_queue_prep_rq(md->queue, dm_prep_fn); dm_init_request_based_queue() 2624 init_rq_based_worker_thread(md); dm_init_request_based_queue() 2626 elv_register_queue(md->queue); dm_init_request_based_queue() 2635 struct mapped_device *md = data; dm_mq_init_request() local 2639 * Must initialize md member of tio, otherwise it won't dm_mq_init_request() 2642 tio->md = md; dm_mq_init_request() 2652 struct mapped_device *md = tio->md; dm_mq_queue_rq() local 2654 struct dm_table *map = dm_get_live_table(md, &srcu_idx); dm_mq_queue_rq() 2665 dm_put_live_table(md, srcu_idx); dm_mq_queue_rq() 2671 dm_start_request(md, rq); dm_mq_queue_rq() 2674 dm_put_live_table(md, srcu_idx); dm_mq_queue_rq() 2679 dm_start_request(md, rq); dm_mq_queue_rq() 2681 /* Init tio using md established in .init_request */ dm_mq_queue_rq() 2682 init_tio(tio, rq, md); dm_mq_queue_rq() 2694 (void) clone_rq(rq, md, tio, GFP_ATOMIC); dm_mq_queue_rq() 2695 queue_kthread_work(&md->kworker, &tio->work); dm_mq_queue_rq() 2698 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { dm_mq_queue_rq() 2700 rq_end_stats(md, rq); dm_mq_queue_rq() 2701 rq_completed(md, rq_data_dir(rq), false); dm_mq_queue_rq() 2716 static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) dm_init_request_based_blk_mq_queue() argument 2718 unsigned md_type = dm_get_md_type(md); dm_init_request_based_blk_mq_queue() 2722 memset(&md->tag_set, 0, sizeof(md->tag_set)); dm_init_request_based_blk_mq_queue() 2723 md->tag_set.ops = &dm_mq_ops; dm_init_request_based_blk_mq_queue() 2724 md->tag_set.queue_depth = BLKDEV_MAX_RQ; dm_init_request_based_blk_mq_queue() 2725 md->tag_set.numa_node = NUMA_NO_NODE; dm_init_request_based_blk_mq_queue() 2726 md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; dm_init_request_based_blk_mq_queue() 2727 md->tag_set.nr_hw_queues = 1; dm_init_request_based_blk_mq_queue() 2730 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request); dm_init_request_based_blk_mq_queue() 2732 md->tag_set.cmd_size = sizeof(struct dm_rq_target_io); dm_init_request_based_blk_mq_queue() 2733 md->tag_set.driver_data = md; dm_init_request_based_blk_mq_queue() 2735 err = blk_mq_alloc_tag_set(&md->tag_set); dm_init_request_based_blk_mq_queue() 2739 q = blk_mq_init_allocated_queue(&md->tag_set, md->queue); dm_init_request_based_blk_mq_queue() 2744 md->queue = q; dm_init_request_based_blk_mq_queue() 2745 dm_init_md_queue(md); dm_init_request_based_blk_mq_queue() 2748 blk_mq_register_disk(md->disk); dm_init_request_based_blk_mq_queue() 2751 init_rq_based_worker_thread(md); dm_init_request_based_blk_mq_queue() 2756 blk_mq_free_tag_set(&md->tag_set); dm_init_request_based_blk_mq_queue() 2760 static unsigned filter_md_type(unsigned type, struct mapped_device *md) filter_md_type() argument 2765 return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED; filter_md_type() 2769 * Setup the DM device's queue based on md's type 2771 int dm_setup_md_queue(struct mapped_device *md) dm_setup_md_queue() argument 2774 unsigned md_type = filter_md_type(dm_get_md_type(md), md); dm_setup_md_queue() 2778 r = dm_init_request_based_queue(md); dm_setup_md_queue() 2785 r = dm_init_request_based_blk_mq_queue(md); dm_setup_md_queue() 2792 dm_init_old_md_queue(md); dm_setup_md_queue() 2793 blk_queue_make_request(md->queue, dm_make_request); dm_setup_md_queue() 2798 bioset_free(md->queue->bio_split); dm_setup_md_queue() 2799 md->queue->bio_split = NULL; dm_setup_md_queue() 2808 struct mapped_device *md; dm_get_md() local 2816 md = idr_find(&_minor_idr, minor); dm_get_md() 2817 if (md) { dm_get_md() 2818 if ((md == MINOR_ALLOCED || dm_get_md() 2819 (MINOR(disk_devt(dm_disk(md))) != minor) || dm_get_md() 2820 dm_deleting_md(md) || dm_get_md() 2821 test_bit(DMF_FREEING, &md->flags))) { dm_get_md() 2822 md = NULL; dm_get_md() 2825 dm_get(md); dm_get_md() 2831 return md; dm_get_md() 2835 void *dm_get_mdptr(struct mapped_device *md) dm_get_mdptr() argument 2837 return md->interface_ptr; dm_get_mdptr() 2840 void dm_set_mdptr(struct mapped_device *md, void *ptr) dm_set_mdptr() argument 2842 md->interface_ptr = ptr; dm_set_mdptr() 2845 void dm_get(struct mapped_device *md) dm_get() argument 2847 atomic_inc(&md->holders); dm_get() 2848 BUG_ON(test_bit(DMF_FREEING, &md->flags)); dm_get() 2851 int dm_hold(struct mapped_device *md) dm_hold() argument 2854 if (test_bit(DMF_FREEING, &md->flags)) { dm_hold() 2858 dm_get(md); dm_hold() 2864 const char *dm_device_name(struct mapped_device *md) dm_device_name() argument 2866 return md->name; dm_device_name() 2870 static void __dm_destroy(struct mapped_device *md, bool wait) __dm_destroy() argument 2878 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); __dm_destroy() 2879 set_bit(DMF_FREEING, &md->flags); __dm_destroy() 2882 if (dm_request_based(md) && md->kworker_task) __dm_destroy() 2883 flush_kthread_worker(&md->kworker); __dm_destroy() 2889 mutex_lock(&md->suspend_lock); __dm_destroy() 2890 map = dm_get_live_table(md, &srcu_idx); __dm_destroy() 2891 if (!dm_suspended_md(md)) { __dm_destroy() 2896 dm_put_live_table(md, srcu_idx); __dm_destroy() 2897 mutex_unlock(&md->suspend_lock); __dm_destroy() 2906 while (atomic_read(&md->holders)) __dm_destroy() 2908 else if (atomic_read(&md->holders)) __dm_destroy() 2910 dm_device_name(md), atomic_read(&md->holders)); __dm_destroy() 2912 dm_sysfs_exit(md); __dm_destroy() 2913 dm_table_destroy(__unbind(md)); __dm_destroy() 2914 free_dev(md); __dm_destroy() 2917 void dm_destroy(struct mapped_device *md) dm_destroy() argument 2919 __dm_destroy(md, true); dm_destroy() 2922 void dm_destroy_immediate(struct mapped_device *md) dm_destroy_immediate() argument 2924 __dm_destroy(md, false); dm_destroy_immediate() 2927 void dm_put(struct mapped_device *md) dm_put() argument 2929 atomic_dec(&md->holders); dm_put() 2933 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) dm_wait_for_completion() argument 2938 add_wait_queue(&md->wait, &wait); dm_wait_for_completion() 2943 if (!md_in_flight(md)) dm_wait_for_completion() 2956 remove_wait_queue(&md->wait, &wait); dm_wait_for_completion() 2966 struct mapped_device *md = container_of(work, struct mapped_device, dm_wq_work() local 2972 map = dm_get_live_table(md, &srcu_idx); dm_wq_work() 2974 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { dm_wq_work() 2975 spin_lock_irq(&md->deferred_lock); dm_wq_work() 2976 c = bio_list_pop(&md->deferred); dm_wq_work() 2977 spin_unlock_irq(&md->deferred_lock); dm_wq_work() 2982 if (dm_request_based(md)) dm_wq_work() 2985 __split_and_process_bio(md, map, c); dm_wq_work() 2988 dm_put_live_table(md, srcu_idx); dm_wq_work() 2991 static void dm_queue_flush(struct mapped_device *md) dm_queue_flush() argument 2993 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); dm_queue_flush() 2995 queue_work(md->wq, &md->work); dm_queue_flush() 3001 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) dm_swap_table() argument 3007 mutex_lock(&md->suspend_lock); dm_swap_table() 3010 if (!dm_suspended_md(md)) dm_swap_table() 3020 live_map = dm_get_live_table_fast(md); dm_swap_table() 3022 limits = md->queue->limits; dm_swap_table() 3023 dm_put_live_table_fast(md); dm_swap_table() 3034 map = __bind(md, table, &limits); dm_swap_table() 3037 mutex_unlock(&md->suspend_lock); dm_swap_table() 3045 static int lock_fs(struct mapped_device *md) lock_fs() argument 3049 WARN_ON(md->frozen_sb); lock_fs() 3051 md->frozen_sb = freeze_bdev(md->bdev); lock_fs() 3052 if (IS_ERR(md->frozen_sb)) { lock_fs() 3053 r = PTR_ERR(md->frozen_sb); lock_fs() 3054 md->frozen_sb = NULL; lock_fs() 3058 set_bit(DMF_FROZEN, &md->flags); lock_fs() 3063 static void unlock_fs(struct mapped_device *md) unlock_fs() argument 3065 if (!test_bit(DMF_FROZEN, &md->flags)) unlock_fs() 3068 thaw_bdev(md->bdev, md->frozen_sb); unlock_fs() 3069 md->frozen_sb = NULL; unlock_fs() 3070 clear_bit(DMF_FROZEN, &md->flags); unlock_fs() 3076 * are being added to md->deferred list. 3078 * Caller must hold md->suspend_lock 3080 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, __dm_suspend() argument 3092 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); __dm_suspend() 3107 r = lock_fs(md); __dm_suspend() 3124 * flush_workqueue(md->wq). __dm_suspend() 3126 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); __dm_suspend() 3128 synchronize_srcu(&md->io_barrier); __dm_suspend() 3131 * Stop md->queue before flushing md->wq in case request-based __dm_suspend() 3132 * dm defers requests to md->wq from md->queue. __dm_suspend() 3134 if (dm_request_based(md)) { __dm_suspend() 3135 stop_queue(md->queue); __dm_suspend() 3136 if (md->kworker_task) __dm_suspend() 3137 flush_kthread_worker(&md->kworker); __dm_suspend() 3140 flush_workqueue(md->wq); __dm_suspend() 3147 r = dm_wait_for_completion(md, interruptible); __dm_suspend() 3150 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); __dm_suspend() 3152 synchronize_srcu(&md->io_barrier); __dm_suspend() 3156 dm_queue_flush(md); __dm_suspend() 3158 if (dm_request_based(md)) __dm_suspend() 3159 start_queue(md->queue); __dm_suspend() 3161 unlock_fs(md); __dm_suspend() 3185 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) dm_suspend() argument 3191 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); dm_suspend() 3193 if (dm_suspended_md(md)) { dm_suspend() 3198 if (dm_suspended_internally_md(md)) { dm_suspend() 3200 mutex_unlock(&md->suspend_lock); dm_suspend() 3201 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); dm_suspend() 3207 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); dm_suspend() 3209 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); dm_suspend() 3213 set_bit(DMF_SUSPENDED, &md->flags); dm_suspend() 3218 mutex_unlock(&md->suspend_lock); dm_suspend() 3222 static int __dm_resume(struct mapped_device *md, struct dm_table *map) __dm_resume() argument 3230 dm_queue_flush(md); __dm_resume() 3237 if (dm_request_based(md)) __dm_resume() 3238 start_queue(md->queue); __dm_resume() 3240 unlock_fs(md); __dm_resume() 3245 int dm_resume(struct mapped_device *md) dm_resume() argument 3251 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); dm_resume() 3253 if (!dm_suspended_md(md)) dm_resume() 3256 if (dm_suspended_internally_md(md)) { dm_resume() 3258 mutex_unlock(&md->suspend_lock); dm_resume() 3259 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); dm_resume() 3265 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); dm_resume() 3269 r = __dm_resume(md, map); dm_resume() 3273 clear_bit(DMF_SUSPENDED, &md->flags); dm_resume() 3277 mutex_unlock(&md->suspend_lock); dm_resume() 3288 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) __dm_internal_suspend() argument 3292 if (md->internal_suspend_count++) __dm_internal_suspend() 3295 if (dm_suspended_md(md)) { __dm_internal_suspend() 3296 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); __dm_internal_suspend() 3300 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); __dm_internal_suspend() 3308 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); __dm_internal_suspend() 3310 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); __dm_internal_suspend() 3315 static void __dm_internal_resume(struct mapped_device *md) __dm_internal_resume() argument 3317 BUG_ON(!md->internal_suspend_count); __dm_internal_resume() 3319 if (--md->internal_suspend_count) __dm_internal_resume() 3322 if (dm_suspended_md(md)) __dm_internal_resume() 3329 (void) __dm_resume(md, NULL); __dm_internal_resume() 3332 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); __dm_internal_resume() 3334 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); __dm_internal_resume() 3337 void dm_internal_suspend_noflush(struct mapped_device *md) dm_internal_suspend_noflush() argument 3339 mutex_lock(&md->suspend_lock); dm_internal_suspend_noflush() 3340 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); dm_internal_suspend_noflush() 3341 mutex_unlock(&md->suspend_lock); dm_internal_suspend_noflush() 3345 void dm_internal_resume(struct mapped_device *md) dm_internal_resume() argument 3347 mutex_lock(&md->suspend_lock); dm_internal_resume() 3348 __dm_internal_resume(md); dm_internal_resume() 3349 mutex_unlock(&md->suspend_lock); dm_internal_resume() 3354 * Fast variants of internal suspend/resume hold md->suspend_lock, 3358 void dm_internal_suspend_fast(struct mapped_device *md) dm_internal_suspend_fast() argument 3360 mutex_lock(&md->suspend_lock); dm_internal_suspend_fast() 3361 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) dm_internal_suspend_fast() 3364 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); dm_internal_suspend_fast() 3365 synchronize_srcu(&md->io_barrier); dm_internal_suspend_fast() 3366 flush_workqueue(md->wq); dm_internal_suspend_fast() 3367 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); dm_internal_suspend_fast() 3371 void dm_internal_resume_fast(struct mapped_device *md) dm_internal_resume_fast() argument 3373 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) dm_internal_resume_fast() 3376 dm_queue_flush(md); dm_internal_resume_fast() 3379 mutex_unlock(&md->suspend_lock); dm_internal_resume_fast() 3386 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, dm_kobject_uevent() argument 3393 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); dm_kobject_uevent() 3397 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, dm_kobject_uevent() 3402 uint32_t dm_next_uevent_seq(struct mapped_device *md) dm_next_uevent_seq() argument 3404 return atomic_add_return(1, &md->uevent_seq); dm_next_uevent_seq() 3407 uint32_t dm_get_event_nr(struct mapped_device *md) dm_get_event_nr() argument 3409 return atomic_read(&md->event_nr); dm_get_event_nr() 3412 int dm_wait_event(struct mapped_device *md, int event_nr) dm_wait_event() argument 3414 return wait_event_interruptible(md->eventq, dm_wait_event() 3415 (event_nr != atomic_read(&md->event_nr))); dm_wait_event() 3418 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) dm_uevent_add() argument 3422 spin_lock_irqsave(&md->uevent_lock, flags); dm_uevent_add() 3423 list_add(elist, &md->uevent_list); dm_uevent_add() 3424 spin_unlock_irqrestore(&md->uevent_lock, flags); dm_uevent_add() 3429 * count on 'md'. 3431 struct gendisk *dm_disk(struct mapped_device *md) dm_disk() argument 3433 return md->disk; dm_disk() 3437 struct kobject *dm_kobject(struct mapped_device *md) dm_kobject() argument 3439 return &md->kobj_holder.kobj; dm_kobject() 3444 struct mapped_device *md; dm_get_from_kobject() local 3446 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); dm_get_from_kobject() 3448 if (test_bit(DMF_FREEING, &md->flags) || dm_get_from_kobject() 3449 dm_deleting_md(md)) dm_get_from_kobject() 3452 dm_get(md); dm_get_from_kobject() 3453 return md; dm_get_from_kobject() 3456 int dm_suspended_md(struct mapped_device *md) dm_suspended_md() argument 3458 return test_bit(DMF_SUSPENDED, &md->flags); dm_suspended_md() 3461 int dm_suspended_internally_md(struct mapped_device *md) dm_suspended_internally_md() argument 3463 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); dm_suspended_internally_md() 3466 int dm_test_deferred_remove_flag(struct mapped_device *md) dm_test_deferred_remove_flag() argument 3468 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); dm_test_deferred_remove_flag() 3483 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, dm_alloc_md_mempools() argument 3494 type = filter_md_type(type, md); dm_alloc_md_mempools() 3558 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_register() local 3564 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); dm_pr_register() 3574 dm_put_live_table(md, srcu_idx); dm_pr_register() 3581 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_reserve() local 3587 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); dm_pr_reserve() 3597 dm_put_live_table(md, srcu_idx); dm_pr_reserve() 3603 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_release() local 3609 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); dm_pr_release() 3619 dm_put_live_table(md, srcu_idx); dm_pr_release() 3626 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_preempt() local 3632 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); dm_pr_preempt() 3642 dm_put_live_table(md, srcu_idx); dm_pr_preempt() 3648 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_clear() local 3654 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); dm_pr_clear() 3664 dm_put_live_table(md, srcu_idx); dm_pr_clear() 774 open_table_device(struct table_device *td, dev_t dev, struct mapped_device *md) open_table_device() argument 1871 init_tio(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) init_tio() argument 1884 prep_tio(struct request *rq, struct mapped_device *md, gfp_t gfp_mask) prep_tio() argument 1939 map_request(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) map_request() argument
|
H A D | dm-era-target.c | 33 struct writeset_metadata md; member in struct:writeset 73 ws->md.nr_bits = nr_blocks; writeset_alloc() 74 ws->md.root = INVALID_WRITESET_ROOT; writeset_alloc() 91 memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); writeset_init() 93 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); writeset_init() 137 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); writeset_test_and_set() 300 static int superblock_read_lock(struct era_metadata *md, superblock_read_lock() argument 303 return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, superblock_read_lock() 307 static int superblock_lock_zero(struct era_metadata *md, superblock_lock_zero() argument 310 return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION, superblock_lock_zero() 314 static int superblock_lock(struct era_metadata *md, superblock_lock() argument 317 return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION, superblock_lock() 367 struct era_metadata *md = context; ws_inc() local 374 dm_tm_inc(md->tm, b); ws_inc() 379 struct era_metadata *md = context; ws_dec() local 386 dm_bitset_del(&md->bitset_info, b); ws_dec() 396 static void setup_writeset_tree_info(struct era_metadata *md) setup_writeset_tree_info() argument 398 struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type; setup_writeset_tree_info() 399 md->writeset_tree_info.tm = md->tm; setup_writeset_tree_info() 400 md->writeset_tree_info.levels = 1; setup_writeset_tree_info() 401 vt->context = md; setup_writeset_tree_info() 408 static void setup_era_array_info(struct era_metadata *md) setup_era_array_info() argument 418 dm_array_info_init(&md->era_array_info, md->tm, &vt); setup_era_array_info() 421 static void setup_infos(struct era_metadata *md) setup_infos() argument 423 dm_disk_bitset_init(md->tm, &md->bitset_info); setup_infos() 424 setup_writeset_tree_info(md); setup_infos() 425 setup_era_array_info(md); setup_infos() 430 static int create_fresh_metadata(struct era_metadata *md) create_fresh_metadata() argument 434 r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION, create_fresh_metadata() 435 &md->tm, &md->sm); create_fresh_metadata() 441 setup_infos(md); create_fresh_metadata() 443 r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root); create_fresh_metadata() 449 r = dm_array_empty(&md->era_array_info, &md->era_array_root); create_fresh_metadata() 458 dm_sm_destroy(md->sm); create_fresh_metadata() 459 dm_tm_destroy(md->tm); create_fresh_metadata() 464 static int save_sm_root(struct era_metadata *md) save_sm_root() argument 469 r = dm_sm_root_size(md->sm, &metadata_len); save_sm_root() 473 return dm_sm_copy_root(md->sm, &md->metadata_space_map_root, save_sm_root() 477 static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk) copy_sm_root() argument 480 &md->metadata_space_map_root, copy_sm_root() 481 sizeof(md->metadata_space_map_root)); copy_sm_root() 486 * with every commit (possible optimisation here). 'md' should be fully 489 static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk) prepare_superblock() argument 498 copy_sm_root(md, disk); prepare_superblock() 500 disk->data_block_size = cpu_to_le32(md->block_size); prepare_superblock() 502 disk->nr_blocks = cpu_to_le32(md->nr_blocks); prepare_superblock() 503 disk->current_era = cpu_to_le32(md->current_era); prepare_superblock() 505 ws_pack(&md->current_writeset->md, &disk->current_writeset); prepare_superblock() 506 disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root); prepare_superblock() 507 disk->era_array_root = cpu_to_le64(md->era_array_root); prepare_superblock() 508 disk->metadata_snap = cpu_to_le64(md->metadata_snap); prepare_superblock() 511 static int write_superblock(struct era_metadata *md) write_superblock() argument 517 r = save_sm_root(md); write_superblock() 523 r = superblock_lock_zero(md, &sblock); write_superblock() 528 prepare_superblock(md, disk); write_superblock() 530 return dm_tm_commit(md->tm, sblock); write_superblock() 536 static int format_metadata(struct era_metadata *md) format_metadata() argument 540 r = create_fresh_metadata(md); format_metadata() 544 r = write_superblock(md); format_metadata() 546 dm_sm_destroy(md->sm); format_metadata() 547 dm_tm_destroy(md->tm); format_metadata() 554 static int open_metadata(struct era_metadata *md) open_metadata() argument 560 r = superblock_read_lock(md, &sblock); open_metadata() 567 r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, open_metadata() 570 &md->tm, &md->sm); open_metadata() 576 setup_infos(md); open_metadata() 578 md->block_size = le32_to_cpu(disk->data_block_size); open_metadata() 579 md->nr_blocks = le32_to_cpu(disk->nr_blocks); open_metadata() 580 md->current_era = le32_to_cpu(disk->current_era); open_metadata() 582 md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); open_metadata() 583 md->era_array_root = le64_to_cpu(disk->era_array_root); open_metadata() 584 md->metadata_snap = le64_to_cpu(disk->metadata_snap); open_metadata() 585 md->archived_writesets = true; open_metadata() 596 static int open_or_format_metadata(struct era_metadata *md, open_or_format_metadata() argument 602 r = superblock_all_zeroes(md->bm, &unformatted); open_or_format_metadata() 607 return may_format ? format_metadata(md) : -EPERM; open_or_format_metadata() 609 return open_metadata(md); open_or_format_metadata() 612 static int create_persistent_data_objects(struct era_metadata *md, create_persistent_data_objects() argument 617 md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE, create_persistent_data_objects() 620 if (IS_ERR(md->bm)) { create_persistent_data_objects() 622 return PTR_ERR(md->bm); create_persistent_data_objects() 625 r = open_or_format_metadata(md, may_format); create_persistent_data_objects() 627 dm_block_manager_destroy(md->bm); create_persistent_data_objects() 632 static void destroy_persistent_data_objects(struct era_metadata *md) destroy_persistent_data_objects() argument 634 dm_sm_destroy(md->sm); destroy_persistent_data_objects() 635 dm_tm_destroy(md->tm); destroy_persistent_data_objects() 636 dm_block_manager_destroy(md->bm); destroy_persistent_data_objects() 642 static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset) swap_writeset() argument 644 rcu_assign_pointer(md->current_writeset, new_writeset); swap_writeset() 665 static int metadata_digest_lookup_writeset(struct era_metadata *md, 668 static int metadata_digest_remove_writeset(struct era_metadata *md, metadata_digest_remove_writeset() argument 674 r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root, metadata_digest_remove_writeset() 675 &key, &md->writeset_tree_root); metadata_digest_remove_writeset() 687 static int metadata_digest_transcribe_writeset(struct era_metadata *md, metadata_digest_transcribe_writeset() argument 705 r = dm_array_set_value(&md->era_array_info, md->era_array_root, metadata_digest_transcribe_writeset() 706 b, &d->value, &md->era_array_root); metadata_digest_transcribe_writeset() 721 static int metadata_digest_lookup_writeset(struct era_metadata *md, metadata_digest_lookup_writeset() argument 728 r = dm_btree_find_lowest_key(&md->writeset_tree_info, metadata_digest_lookup_writeset() 729 md->writeset_tree_root, &key); metadata_digest_lookup_writeset() 735 r = dm_btree_lookup(&md->writeset_tree_info, metadata_digest_lookup_writeset() 736 md->writeset_tree_root, &key, &disk); metadata_digest_lookup_writeset() 750 d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); metadata_digest_lookup_writeset() 757 static int metadata_digest_start(struct era_metadata *md, struct digest *d) metadata_digest_start() argument 768 dm_disk_bitset_init(md->tm, &d->info); metadata_digest_start() 783 struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL); metadata_open() local 785 if (!md) metadata_open() 788 md->bdev = bdev; metadata_open() 789 md->block_size = block_size; metadata_open() 791 md->writesets[0].md.root = INVALID_WRITESET_ROOT; metadata_open() 792 md->writesets[1].md.root = INVALID_WRITESET_ROOT; metadata_open() 793 md->current_writeset = &md->writesets[0]; metadata_open() 795 r = create_persistent_data_objects(md, may_format); metadata_open() 797 kfree(md); metadata_open() 801 return md; metadata_open() 804 static void metadata_close(struct era_metadata *md) metadata_close() argument 806 destroy_persistent_data_objects(md); metadata_close() 807 kfree(md); metadata_close() 819 static int metadata_resize(struct era_metadata *md, void *arg) metadata_resize() argument 831 writeset_free(&md->writesets[0]); metadata_resize() 832 writeset_free(&md->writesets[1]); metadata_resize() 834 r = writeset_alloc(&md->writesets[0], *new_size); metadata_resize() 840 r = writeset_alloc(&md->writesets[1], *new_size); metadata_resize() 848 r = dm_array_resize(&md->era_array_info, md->era_array_root, metadata_resize() 849 md->nr_blocks, *new_size, metadata_resize() 850 &value, &md->era_array_root); metadata_resize() 856 md->nr_blocks = *new_size; metadata_resize() 860 static int metadata_era_archive(struct era_metadata *md) metadata_era_archive() argument 866 r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, metadata_era_archive() 867 &md->current_writeset->md.root); metadata_era_archive() 873 ws_pack(&md->current_writeset->md, &value); metadata_era_archive() 874 md->current_writeset->md.root = INVALID_WRITESET_ROOT; metadata_era_archive() 876 keys[0] = md->current_era; metadata_era_archive() 878 r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root, metadata_era_archive() 879 keys, &value, &md->writeset_tree_root); metadata_era_archive() 886 md->archived_writesets = true; metadata_era_archive() 891 static struct writeset *next_writeset(struct era_metadata *md) next_writeset() argument 893 return (md->current_writeset == &md->writesets[0]) ? next_writeset() 894 &md->writesets[1] : &md->writesets[0]; next_writeset() 897 static int metadata_new_era(struct era_metadata *md) metadata_new_era() argument 900 struct writeset *new_writeset = next_writeset(md); metadata_new_era() 902 r = writeset_init(&md->bitset_info, new_writeset); metadata_new_era() 908 swap_writeset(md, new_writeset); metadata_new_era() 909 md->current_era++; metadata_new_era() 914 static int metadata_era_rollover(struct era_metadata *md) metadata_era_rollover() argument 918 if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { metadata_era_rollover() 919 r = metadata_era_archive(md); metadata_era_rollover() 927 r = metadata_new_era(md); metadata_era_rollover() 937 static bool metadata_current_marked(struct era_metadata *md, dm_block_t block) metadata_current_marked() argument 943 ws = rcu_dereference(md->current_writeset); metadata_current_marked() 950 static int metadata_commit(struct era_metadata *md) metadata_commit() argument 955 if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { metadata_commit() 956 r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, metadata_commit() 957 &md->current_writeset->md.root); metadata_commit() 964 r = save_sm_root(md); metadata_commit() 970 r = dm_tm_pre_commit(md->tm); metadata_commit() 976 r = superblock_lock(md, &sblock); metadata_commit() 982 prepare_superblock(md, dm_block_data(sblock)); metadata_commit() 984 return dm_tm_commit(md->tm, sblock); metadata_commit() 987 static int metadata_checkpoint(struct era_metadata *md) metadata_checkpoint() argument 993 return metadata_era_rollover(md); metadata_checkpoint() 999 static int metadata_take_snap(struct era_metadata *md) metadata_take_snap() argument 1004 if (md->metadata_snap != SUPERBLOCK_LOCATION) { metadata_take_snap() 1009 r = metadata_era_rollover(md); metadata_take_snap() 1015 r = metadata_commit(md); metadata_take_snap() 1021 r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION); metadata_take_snap() 1027 r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION, metadata_take_snap() 1031 dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION); metadata_take_snap() 1036 r = dm_sm_inc_block(md->sm, md->writeset_tree_root); metadata_take_snap() 1039 dm_tm_unlock(md->tm, clone); metadata_take_snap() 1043 r = dm_sm_inc_block(md->sm, md->era_array_root); metadata_take_snap() 1046 dm_sm_dec_block(md->sm, md->writeset_tree_root); metadata_take_snap() 1047 dm_tm_unlock(md->tm, clone); metadata_take_snap() 1051 md->metadata_snap = dm_block_location(clone); metadata_take_snap() 1053 dm_tm_unlock(md->tm, clone); metadata_take_snap() 1058 static int metadata_drop_snap(struct era_metadata *md) metadata_drop_snap() argument 1065 if (md->metadata_snap == SUPERBLOCK_LOCATION) { metadata_drop_snap() 1070 r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone); metadata_drop_snap() 1080 md->metadata_snap = SUPERBLOCK_LOCATION; metadata_drop_snap() 1083 r = dm_btree_del(&md->writeset_tree_info, metadata_drop_snap() 1087 dm_tm_unlock(md->tm, clone); metadata_drop_snap() 1091 r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root)); metadata_drop_snap() 1094 dm_tm_unlock(md->tm, clone); metadata_drop_snap() 1099 dm_tm_unlock(md->tm, clone); metadata_drop_snap() 1101 return dm_sm_dec_block(md->sm, location); metadata_drop_snap() 1111 static int metadata_get_stats(struct era_metadata *md, void *ptr) metadata_get_stats() argument 1117 r = dm_sm_get_nr_free(md->sm, &nr_free); metadata_get_stats() 1123 r = dm_sm_get_nr_blocks(md->sm, &nr_total); metadata_get_stats() 1131 s->snap = md->metadata_snap; metadata_get_stats() 1132 s->era = md->current_era; metadata_get_stats() 1149 struct era_metadata *md; member in struct:era 1216 r = era->digest.step(era->md, &era->digest); process_old_eras() 1242 r = writeset_test_and_set(&era->md->bitset_info, process_deferred_bios() 1243 era->md->current_writeset, process_deferred_bios() 1259 r = metadata_commit(era->md); process_deferred_bios() 1285 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); process_rpc_calls() 1290 r = metadata_commit(era->md); process_rpc_calls() 1302 if (era->md->archived_writesets) { kick_off_digest() 1303 era->md->archived_writesets = false; kick_off_digest() 1304 metadata_digest_start(era->md, &era->digest); kick_off_digest() 1393 if (era->md) era_destroy() 1394 metadata_close(era->md); era_destroy() 1429 struct era_metadata *md; era_ctr() local 1482 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); era_ctr() 1483 if (IS_ERR(md)) { era_ctr() 1486 return PTR_ERR(md); era_ctr() 1488 era->md = md; era_ctr() 1492 r = metadata_resize(era->md, &era->nr_blocks); era_ctr() 1547 !metadata_current_marked(era->md, block)) { era_map()
|
H A D | dm-sysfs.c | 25 struct mapped_device *md; dm_attr_show() local 32 md = dm_get_from_kobject(kobj); dm_attr_show() 33 if (!md) dm_attr_show() 36 ret = dm_attr->show(md, page); dm_attr_show() 37 dm_put(md); dm_attr_show() 50 struct mapped_device *md; dm_attr_store() local 57 md = dm_get_from_kobject(kobj); dm_attr_store() 58 if (!md) dm_attr_store() 61 ret = dm_attr->store(md, page, count); dm_attr_store() 62 dm_put(md); dm_attr_store() 67 static ssize_t dm_attr_name_show(struct mapped_device *md, char *buf) dm_attr_name_show() argument 69 if (dm_copy_name_and_uuid(md, buf, NULL)) dm_attr_name_show() 76 static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) dm_attr_uuid_show() argument 78 if (dm_copy_name_and_uuid(md, NULL, buf)) dm_attr_uuid_show() 85 static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) dm_attr_suspended_show() argument 87 sprintf(buf, "%d\n", dm_suspended_md(md)); dm_attr_suspended_show() 92 static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf) dm_attr_use_blk_mq_show() argument 94 sprintf(buf, "%d\n", dm_use_blk_mq(md)); dm_attr_use_blk_mq_show() 127 * because nobody using md yet, no need to call explicit dm_get/put 129 int dm_sysfs_init(struct mapped_device *md) dm_sysfs_init() argument 131 return kobject_init_and_add(dm_kobject(md), &dm_ktype, dm_sysfs_init() 132 &disk_to_dev(dm_disk(md))->kobj, dm_sysfs_init() 139 void dm_sysfs_exit(struct mapped_device *md) dm_sysfs_exit() argument 141 struct kobject *kobj = dm_kobject(md); dm_sysfs_exit()
|
H A D | dm-ioctl.c | 35 struct mapped_device *md; member in struct:hash_cell 118 dm_get(hc->md); __get_name_cell() 132 dm_get(hc->md); __get_uuid_cell() 141 struct mapped_device *md; __get_dev_cell() local 144 md = dm_get_md(huge_decode_dev(dev)); __get_dev_cell() 145 if (!md) __get_dev_cell() 148 hc = dm_get_mdptr(md); __get_dev_cell() 150 dm_put(md); __get_dev_cell() 161 struct mapped_device *md) alloc_cell() 189 hc->md = md; alloc_cell() 207 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) dm_hash_insert() argument 214 cell = alloc_cell(name, uuid, md); dm_hash_insert() 224 dm_put(hc->md); dm_hash_insert() 234 dm_put(hc->md); dm_hash_insert() 239 dm_get(md); dm_hash_insert() 241 dm_set_mdptr(md, cell); dm_hash_insert() 262 dm_set_mdptr(hc->md, NULL); __hash_remove() 265 table = dm_get_live_table(hc->md, &srcu_idx); __hash_remove() 268 dm_put_live_table(hc->md, srcu_idx); __hash_remove() 273 dm_put(hc->md); __hash_remove() 283 struct mapped_device *md; dm_hash_remove_all() local 293 md = hc->md; dm_hash_remove_all() 294 dm_get(md); dm_hash_remove_all() 297 dm_lock_for_deletion(md, mark_deferred, only_deferred)) { dm_hash_remove_all() 298 dm_put(md); dm_hash_remove_all() 308 dm_sync_table(md); dm_hash_remove_all() 311 dm_put(md); dm_hash_remove_all() 313 dm_destroy(md); dm_hash_remove_all() 315 dm_destroy_immediate(md); dm_hash_remove_all() 374 struct mapped_device *md; dm_hash_rename() local 400 dm_put(hc->md); dm_hash_rename() 425 dm_put(hc->md); dm_hash_rename() 439 table = dm_get_live_table(hc->md, &srcu_idx); dm_hash_rename() 442 dm_put_live_table(hc->md, srcu_idx); dm_hash_rename() 444 if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) dm_hash_rename() 447 md = hc->md; dm_hash_rename() 451 return md; dm_hash_rename() 543 disk = dm_disk(hc->md); list_devices() 645 static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) dm_get_inactive_table() argument 651 dm_get_live_table(md, srcu_idx); dm_get_inactive_table() 654 hc = dm_get_mdptr(md); dm_get_inactive_table() 655 if (!hc || hc->md != md) { dm_get_inactive_table() 668 static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, dm_get_live_or_inactive_table() argument 673 dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx); dm_get_live_or_inactive_table() 680 static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) __dev_status() argument 682 struct gendisk *disk = dm_disk(md); __dev_status() 689 if (dm_suspended_md(md)) __dev_status() 692 if (dm_suspended_internally_md(md)) __dev_status() 695 if (dm_test_deferred_remove_flag(md)) __dev_status() 705 param->open_count = dm_open_count(md); __dev_status() 707 param->event_nr = dm_get_event_nr(md); __dev_status() 710 table = dm_get_live_table(md, &srcu_idx); __dev_status() 720 dm_put_live_table(md, srcu_idx); __dev_status() 724 table = dm_get_inactive_table(md, &srcu_idx); __dev_status() 730 dm_put_live_table(md, srcu_idx); __dev_status() 737 struct mapped_device *md; dev_create() local 746 r = dm_create(m, &md); dev_create() 750 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); dev_create() 752 dm_put(md); dev_create() 753 dm_destroy(md); dev_create() 759 __dev_status(md, param); dev_create() 761 dm_put(md); dev_create() 815 struct mapped_device *md = NULL; find_device() local 820 md = hc->md; find_device() 823 return md; find_device() 829 struct mapped_device *md; dev_remove() local 842 md = hc->md; dev_remove() 847 r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false); dev_remove() 851 dm_put(md); dev_remove() 856 dm_put(md); dev_remove() 864 dm_sync_table(md); dev_remove() 870 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) dev_remove() 873 dm_put(md); dev_remove() 874 dm_destroy(md); dev_remove() 895 struct mapped_device *md; dev_rename() local 911 md = dm_hash_rename(param, new_data); dev_rename() 912 if (IS_ERR(md)) dev_rename() 913 return PTR_ERR(md); dev_rename() 915 __dev_status(md, param); dev_rename() 916 dm_put(md); dev_rename() 924 struct mapped_device *md; dev_set_geometry() local 930 md = find_device(param); dev_set_geometry() 931 if (!md) dev_set_geometry() 959 r = dm_set_geometry(md, &geometry); dev_set_geometry() 964 dm_put(md); dev_set_geometry() 972 struct mapped_device *md; do_suspend() local 974 md = find_device(param); do_suspend() 975 if (!md) do_suspend() 983 if (!dm_suspended_md(md)) { do_suspend() 984 r = dm_suspend(md, suspend_flags); do_suspend() 989 __dev_status(md, param); do_suspend() 992 dm_put(md); do_suspend() 1002 struct mapped_device *md; do_resume() local 1014 md = hc->md; do_resume() 1029 if (!dm_suspended_md(md)) do_resume() 1030 dm_suspend(md, suspend_flags); do_resume() 1032 old_map = dm_swap_table(md, new_map); do_resume() 1034 dm_sync_table(md); do_resume() 1036 dm_put(md); do_resume() 1041 set_disk_ro(dm_disk(md), 0); do_resume() 1043 set_disk_ro(dm_disk(md), 1); do_resume() 1046 if (dm_suspended_md(md)) { do_resume() 1047 r = dm_resume(md); do_resume() 1048 if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) do_resume() 1060 __dev_status(md, param); do_resume() 1062 dm_put(md); do_resume() 1084 struct mapped_device *md; dev_status() local 1086 md = find_device(param); dev_status() 1087 if (!md) dev_status() 1090 __dev_status(md, param); dev_status() 1091 dm_put(md); dev_status() 1176 struct mapped_device *md; dev_wait() local 1180 md = find_device(param); dev_wait() 1181 if (!md) dev_wait() 1187 if (dm_wait_event(md, param->event_nr)) { dev_wait() 1197 __dev_status(md, param); dev_wait() 1199 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); dev_wait() 1202 dm_put_live_table(md, srcu_idx); dev_wait() 1205 dm_put(md); dev_wait() 1275 struct mapped_device *md; table_load() local 1278 md = find_device(param); table_load() 1279 if (!md) table_load() 1282 r = dm_table_create(&t, get_mode(param), param->target_count, md); table_load() 1286 /* Protect md->type and md->queue against concurrent table loads. */ table_load() 1287 dm_lock_md_type(md); table_load() 1292 immutable_target_type = dm_get_immutable_target_type(md); table_load() 1301 if (dm_get_md_type(md) == DM_TYPE_NONE) { table_load() 1303 dm_set_md_type(md, dm_table_get_type(t)); table_load() 1305 /* setup md->queue to reflect md's type (may block) */ table_load() 1306 r = dm_setup_md_queue(md); table_load() 1311 } else if (dm_get_md_type(md) != dm_table_get_type(t)) { table_load() 1317 dm_unlock_md_type(md); table_load() 1321 hc = dm_get_mdptr(md); table_load() 1322 if (!hc || hc->md != md) { table_load() 1335 __dev_status(md, param); table_load() 1338 dm_sync_table(md); table_load() 1342 dm_put(md); table_load() 1347 dm_unlock_md_type(md); table_load() 1351 dm_put(md); table_load() 1359 struct mapped_device *md; table_clear() local 1378 __dev_status(hc->md, param); table_clear() 1379 md = hc->md; table_clear() 1382 dm_sync_table(md); table_clear() 1385 dm_put(md); table_clear() 1432 struct mapped_device *md; table_deps() local 1436 md = find_device(param); table_deps() 1437 if (!md) table_deps() 1440 __dev_status(md, param); table_deps() 1442 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); table_deps() 1445 dm_put_live_table(md, srcu_idx); table_deps() 1447 dm_put(md); table_deps() 1458 struct mapped_device *md; table_status() local 1462 md = find_device(param); table_status() 1463 if (!md) table_status() 1466 __dev_status(md, param); table_status() 1468 table = dm_get_live_or_inactive_table(md, param, &srcu_idx); table_status() 1471 dm_put_live_table(md, srcu_idx); table_status() 1473 dm_put(md); table_status() 1484 static int message_for_md(struct mapped_device *md, unsigned argc, char **argv, message_for_md() argument 1497 return dm_cancel_deferred_remove(md); message_for_md() 1500 r = dm_stats_message(md, argc, argv, result, maxlen); message_for_md() 1515 struct mapped_device *md; target_message() local 1523 md = find_device(param); target_message() 1524 if (!md) target_message() 1545 r = message_for_md(md, argc, argv, result, maxlen); target_message() 1549 table = dm_get_live_table(md, &srcu_idx); target_message() 1553 if (dm_deleting_md(md)) { target_message() 1570 dm_put_live_table(md, srcu_idx); target_message() 1575 __dev_status(md, param); target_message() 1586 dm_put(md); target_message() 1928 * @md: Pointer to mapped_device 1932 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) dm_copy_name_and_uuid() argument 1937 if (!md) dm_copy_name_and_uuid() 1941 hc = dm_get_mdptr(md); dm_copy_name_and_uuid() 1942 if (!hc || hc->md != md) { dm_copy_name_and_uuid() 160 alloc_cell(const char *name, const char *uuid, struct mapped_device *md) alloc_cell() argument
|
H A D | dm.h | 81 void dm_lock_md_type(struct mapped_device *md); 82 void dm_unlock_md_type(struct mapped_device *md); 83 void dm_set_md_type(struct mapped_device *md, unsigned type); 84 unsigned dm_get_md_type(struct mapped_device *md); 85 struct target_type *dm_get_immutable_target_type(struct mapped_device *md); 87 int dm_setup_md_queue(struct mapped_device *md); 126 int dm_deleting_md(struct mapped_device *md); 131 int dm_suspended_md(struct mapped_device *md); 136 int dm_suspended_internally_md(struct mapped_device *md); 137 void dm_internal_suspend_fast(struct mapped_device *md); 138 void dm_internal_resume_fast(struct mapped_device *md); 139 void dm_internal_suspend_noflush(struct mapped_device *md); 140 void dm_internal_resume(struct mapped_device *md); 145 int dm_test_deferred_remove_flag(struct mapped_device *md); 172 int dm_sysfs_init(struct mapped_device *md); 173 void dm_sysfs_exit(struct mapped_device *md); 174 struct kobject *dm_kobject(struct mapped_device *md); 194 void dm_destroy(struct mapped_device *md); 195 void dm_destroy_immediate(struct mapped_device *md); 196 int dm_open_count(struct mapped_device *md); 197 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); 198 int dm_cancel_deferred_remove(struct mapped_device *md); 199 int dm_request_based(struct mapped_device *md); 200 sector_t dm_get_size(struct mapped_device *md); 201 struct request_queue *dm_get_md_queue(struct mapped_device *md); 202 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 204 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); 205 struct dm_stats *dm_get_stats(struct mapped_device *md); 207 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 210 void dm_internal_suspend(struct mapped_device *md); 211 void dm_internal_resume(struct mapped_device *md); 213 bool dm_use_blk_mq(struct mapped_device *md); 224 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 239 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf); 240 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
|
H A D | dm-raid.c | 11 #include "md.h" 69 struct mddev md; member in struct:raid_set 165 mddev_init(&rs->md); context_alloc() 169 rs->md.raid_disks = raid_devs; context_alloc() 170 rs->md.level = raid_type->level; context_alloc() 171 rs->md.new_level = rs->md.level; context_alloc() 172 rs->md.layout = raid_type->algorithm; context_alloc() 173 rs->md.new_layout = rs->md.layout; context_alloc() 174 rs->md.delta_disks = 0; context_alloc() 175 rs->md.recovery_cp = 0; context_alloc() 182 * rs->md.persistent context_alloc() 183 * rs->md.external context_alloc() 184 * rs->md.chunk_sectors context_alloc() 185 * rs->md.new_chunk_sectors context_alloc() 186 * rs->md.dev_sectors context_alloc() 196 for (i = 0; i < rs->md.raid_disks; i++) { context_free() 230 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) { dev_parms() 241 rs->dev[i].rdev.mddev = &rs->md; dev_parms() 283 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); dev_parms() 289 rs->md.external = 0; dev_parms() 290 rs->md.persistent = 1; dev_parms() 291 rs->md.major_version = 2; dev_parms() 292 } else if (rebuild && !rs->md.recovery_cp) { dev_parms() 317 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). 360 if (region_size < rs->md.chunk_sectors) { validate_region_size() 369 rs->md.bitmap_info.chunksize = (region_size << 9); validate_region_size() 389 for (i = 0; i < rs->md.raid_disks; i++) validate_raid_redundancy() 396 if (rebuild_cnt >= rs->md.raid_disks) validate_raid_redundancy() 406 copies = raid10_md_layout_to_copies(rs->md.layout); validate_raid_redundancy() 424 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) { validate_raid_redundancy() 425 for (i = 0; i < rs->md.raid_disks * copies; i++) { validate_raid_redundancy() 428 d = i % rs->md.raid_disks; validate_raid_redundancy() 449 group_size = (rs->md.raid_disks / copies); validate_raid_redundancy() 450 last_group_start = (rs->md.raid_disks / group_size) - 1; validate_raid_redundancy() 452 for (i = 0; i < rs->md.raid_disks; i++) { validate_raid_redundancy() 525 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; parse_raid_params() 546 for (i = 0; i < rs->md.raid_disks; i++) { parse_raid_params() 556 rs->md.recovery_cp = MaxSector; parse_raid_params() 561 rs->md.recovery_cp = 0; parse_raid_params() 598 if (value >= rs->md.raid_disks) { parse_raid_params() 610 if (value >= rs->md.raid_disks) { parse_raid_params() 631 rs->md.bitmap_info.max_write_behind = value; parse_raid_params() 638 rs->md.bitmap_info.daemon_sleep = value; parse_raid_params() 653 if (raid5_set_cache_size(&rs->md, (int)value)) { parse_raid_params() 663 rs->md.sync_speed_min = (int)value; parse_raid_params() 670 rs->md.sync_speed_max = (int)value; parse_raid_params() 692 if (rs->md.chunk_sectors) parse_raid_params() 693 max_io_len = rs->md.chunk_sectors; parse_raid_params() 701 if (raid10_copies > rs->md.raid_disks) { parse_raid_params() 717 sector_div(sectors_per_dev, rs->md.raid_disks); parse_raid_params() 719 rs->md.layout = raid10_format_to_md_layout(raid10_format, parse_raid_params() 721 rs->md.new_layout = rs->md.layout; parse_raid_params() 724 (rs->md.raid_disks - rs->raid_type->parity_devs))) { parse_raid_params() 728 rs->md.dev_sectors = sectors_per_dev; parse_raid_params() 731 rs->md.persistent = 0; parse_raid_params() 732 rs->md.external = 1; parse_raid_params() 739 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); do_table_event() 748 return mddev_congested(&rs->md, bits); raid_is_congested() 752 * This structure is never routinely used by userspace, unlike md superblocks. 763 __le64 events; /* Incremented by md when superblock updated */ 812 struct raid_set *rs = container_of(mddev, struct raid_set, md); super_sync() 901 struct raid_set *rs = container_of(mddev, struct raid_set, md); super_init_validation() 1030 struct mddev *mddev = &rs->md; super_validate() 1073 struct mddev *mddev = &rs->md; analyse_superblocks() 1170 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); configure_discard_support() 1172 for (i = 0; i < rs->md.raid_disks; i++) { configure_discard_support() 1200 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); configure_discard_support() 1275 rs->md.sync_super = super_sync; raid_ctr() 1280 INIT_WORK(&rs->md.event_work, do_table_event); raid_ctr() 1290 mddev_lock_nointr(&rs->md); raid_ctr() 1291 ret = md_run(&rs->md); raid_ctr() 1292 rs->md.in_sync = 0; /* Assume already marked dirty */ raid_ctr() 1293 mddev_unlock(&rs->md); raid_ctr() 1300 if (ti->len != rs->md.array_sectors) { raid_ctr() 1308 mddev_suspend(&rs->md); raid_ctr() 1312 md_stop(&rs->md); raid_ctr() 1324 md_stop(&rs->md); raid_dtr() 1331 struct mddev *mddev = &rs->md; raid_map() 1374 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks); raid_status() 1377 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery)) raid_status() 1378 sync = rs->md.curr_resync_completed; raid_status() 1380 sync = rs->md.recovery_cp; raid_status() 1382 if (sync >= rs->md.resync_max_sectors) { raid_status() 1387 sync = rs->md.resync_max_sectors; raid_status() 1388 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) { raid_status() 1402 for (i = 0; i < rs->md.raid_disks; i++) raid_status() 1409 sync = rs->md.resync_max_sectors; raid_status() 1418 for (i = 0; i < rs->md.raid_disks; i++) { raid_status() 1438 (unsigned long long) rs->md.resync_max_sectors); raid_status() 1445 DMEMIT(" %s", decipher_sync_action(&rs->md)); raid_status() 1453 (strcmp(rs->md.last_sync_action, "check")) ? 0 : raid_status() 1455 atomic64_read(&rs->md.resync_mismatches)); raid_status() 1459 for (i = 0; i < rs->md.raid_disks; i++) { raid_status() 1474 raid_param_cnt, rs->md.chunk_sectors); raid_status() 1477 (rs->md.recovery_cp == MaxSector)) raid_status() 1482 for (i = 0; i < rs->md.raid_disks; i++) raid_status() 1490 rs->md.bitmap_info.daemon_sleep); raid_status() 1493 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min); raid_status() 1496 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); raid_status() 1498 for (i = 0; i < rs->md.raid_disks; i++) raid_status() 1505 rs->md.bitmap_info.max_write_behind); raid_status() 1508 struct r5conf *conf = rs->md.private; raid_status() 1517 rs->md.bitmap_info.chunksize >> 9); raid_status() 1521 raid10_md_layout_to_copies(rs->md.layout)); raid_status() 1525 raid10_md_layout_to_format(rs->md.layout)); raid_status() 1527 DMEMIT(" %d", rs->md.raid_disks); raid_status() 1528 for (i = 0; i < rs->md.raid_disks; i++) { raid_status() 1545 struct mddev *mddev = &rs->md; raid_message() 1603 for (i = 0; !ret && i < rs->md.raid_disks; i++) raid_iterate_devices() 1608 rs->md.dev_sectors, raid_iterate_devices() 1617 unsigned chunk_size = rs->md.chunk_sectors << 9; raid_io_hints() 1618 struct r5conf *conf = rs->md.private; raid_io_hints() 1628 md_stop_writes(&rs->md); raid_presuspend() 1635 mddev_suspend(&rs->md); raid_postsuspend() 1646 for (i = 0; i < rs->md.raid_disks; i++) { attempt_restore_of_faulty_devices() 1685 rdev_for_each(r, &rs->md) { attempt_restore_of_faulty_devices() 1699 set_bit(MD_CHANGE_DEVS, &rs->md.flags); raid_resume() 1702 bitmap_load(&rs->md); raid_resume() 1713 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); raid_resume() 1716 mddev_resume(&rs->md); raid_resume()
|
H A D | dm-uevent.c | 44 struct mapped_device *md; member in struct:dm_uevent 57 static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md) dm_uevent_alloc() argument 66 event->md = md; dm_uevent_alloc() 71 static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md, dm_build_path_uevent() argument 80 event = dm_uevent_alloc(md); dm_build_path_uevent() 101 dm_next_uevent_seq(md))) { dm_build_path_uevent() 146 if (dm_copy_name_and_uuid(event->md, event->name, list_for_each_entry_safe() 186 struct mapped_device *md = dm_table_get_md(ti->table); dm_path_uevent() local 194 event = dm_build_path_uevent(md, ti, dm_path_uevent() 201 dm_uevent_add(md, &event->elist); dm_path_uevent()
|
H A D | raid0.c | 24 #include "md.h" 54 printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n", dump_zones() 58 printk(KERN_INFO "md: zone%d=[", j); dump_zones() 91 pr_debug("md/raid0:%s: looking at %s\n", rdev_for_each() 105 pr_debug("md/raid0:%s: comparing %s(%llu)" rdev_for_each() 113 pr_debug("md/raid0:%s: END\n", rdev_for_each() 122 pr_debug("md/raid0:%s: EQUAL\n", rdev_for_each() 127 pr_debug("md/raid0:%s: NOT EQUAL\n", rdev_for_each() 131 pr_debug("md/raid0:%s: ==> UNIQUE\n", 134 pr_debug("md/raid0:%s: %d zones\n", 138 pr_debug("md/raid0:%s: FINAL %d zones\n", 145 printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n", 190 "md/raid0:%s: remove inactive devices before converting to RAID0\n", rdev_for_each() 195 printk(KERN_ERR "md/raid0:%s: bad disk number %d - " rdev_for_each() 200 printk(KERN_ERR "md/raid0:%s: multiple devices for %d - " rdev_for_each() 211 printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " 228 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i); 236 pr_debug("md/raid0:%s: checking %s ... nope\n", 241 pr_debug("md/raid0:%s: checking %s ..." 249 pr_debug("md/raid0:%s: (%llu) is smallest!.\n", 257 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n", 264 pr_debug("md/raid0:%s: current zone start: %llu\n", 269 pr_debug("md/raid0:%s: done.\n", mdname(mddev)); 360 printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n", raid0_run() 402 printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n", 512 printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", raid0_takeover_raid45() 521 printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n", rdev_for_each() 552 printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n", raid0_takeover_raid10() 558 printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n", raid0_takeover_raid10() 563 printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n", raid0_takeover_raid10() 591 printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n", raid0_takeover_raid1() 634 printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n", raid0_takeover() 645 printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n", raid0_takeover() 694 MODULE_ALIAS("md-personality-2"); /* RAID0 */ 695 MODULE_ALIAS("md-raid0"); 696 MODULE_ALIAS("md-level-0");
|
H A D | dm-table.c | 32 struct mapped_device *md; member in struct:dm_table 184 unsigned num_targets, struct mapped_device *md) dm_table_create() 210 t->md = md; dm_table_create() 215 static void free_devices(struct list_head *devices, struct mapped_device *md) free_devices() argument 223 dm_device_name(md), dd->dm_dev->name); list_for_each_safe() 224 dm_put_table_device(md, dd->dm_dev); list_for_each_safe() 253 free_devices(&t->devices, t->md); dm_table_destroy() 298 dm_device_name(ti->table->md), bdevname(bdev, b), device_area_is_invalid() 311 dm_device_name(ti->table->md), bdevname(bdev, b), device_area_is_invalid() 324 dm_device_name(ti->table->md), device_area_is_invalid() 333 dm_device_name(ti->table->md), device_area_is_invalid() 349 struct mapped_device *md) upgrade_mode() 356 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, upgrade_mode() 362 dm_put_table_device(md, old_dev); upgrade_mode() 411 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { dm_get_device() 420 r = upgrade_mode(dd, mode, t->md); dm_get_device() 441 dm_device_name(ti->table->md), bdevname(bdev, b)); dm_set_device_limits() 449 dm_device_name(ti->table->md), bdevname(bdev, b), dm_set_device_limits() 475 dm_device_name(ti->table->md), d->name); 479 dm_put_table_device(ti->table->md, d); 660 dm_device_name(table->md), i, validate_hardware_logical_block_alignment() 679 dm_device_name(t->md), t->targets->type->name); dm_table_add_target() 689 DMERR("%s: zero-length target", dm_device_name(t->md)); dm_table_add_target() 695 DMERR("%s: %s: unknown target type", dm_device_name(t->md), dm_table_add_target() 703 dm_device_name(t->md), type); dm_table_add_target() 711 dm_device_name(t->md), type); dm_table_add_target() 718 dm_device_name(t->md), t->immutable_target_type->name); dm_table_add_target() 724 dm_device_name(t->md), tgt->type->name); dm_table_add_target() 759 dm_device_name(t->md), type); dm_table_add_target() 764 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); dm_table_add_target() 841 unsigned live_md_type = dm_get_md_type(t->md); dm_table_set_type() 945 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) dm_table_alloc_md_mempools() argument 963 t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size); dm_table_alloc_md_mempools() 1059 dm_device_name(t->md), 1077 struct mapped_device *md = t->md; dm_table_register_integrity() local 1084 if (!integrity_profile_exists(dm_disk(md))) { dm_table_register_integrity() 1090 blk_integrity_register(dm_disk(md), dm_table_register_integrity() 1099 if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { dm_table_register_integrity() 1102 dm_device_name(t->md), dm_table_register_integrity() 1138 r = dm_table_alloc_md_mempools(t, t->md); dm_table_complete() 1289 dm_device_name(table->md), dm_calculate_queue_limits() 1313 blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) dm_table_verify_integrity() 1317 if (integrity_profile_exists(dm_disk(t->md))) { dm_table_verify_integrity() 1319 dm_device_name(t->md)); dm_table_verify_integrity() 1320 blk_integrity_unregister(dm_disk(t->md)); dm_table_verify_integrity() 1538 * md->deferred where queue settings are not needed yet. dm_table_set_restrictions() 1629 dm_device_name(t->md), ti->type->name, r); dm_table_resume_targets() 1665 dm_device_name(t->md), list_for_each_entry() 1678 return t->md; dm_table_get_md() 1684 struct mapped_device *md; dm_table_run_md_queue_async() local 1691 md = dm_table_get_md(t); dm_table_run_md_queue_async() 1692 queue = dm_get_md_queue(md); dm_table_run_md_queue_async() 183 dm_table_create(struct dm_table **result, fmode_t mode, unsigned num_targets, struct mapped_device *md) dm_table_create() argument 348 upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) upgrade_mode() argument
|
H A D | dm-stats.h | 29 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
|
H A D | md-cluster.h | 6 #include "md.h"
|
H A D | md-cluster.c | 16 #include "md.h" 18 #include "md-cluster.h" 137 pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name); lockres_init() 144 pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name); lockres_init() 157 pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name); lockres_init() 254 pr_err("md-cluster: Cannot initialize bitmaps\n"); recover_bitmaps() 260 pr_err("md-cluster: Could not DLM lock %s: %d\n", recover_bitmaps() 266 pr_err("md-cluster: Could not copy data from bitmap %d\n", slot); recover_bitmaps() 299 pr_warn("md-cluster: Could not create recovery thread\n"); __recover_slot() 311 pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n", recover_slot() 316 * cluster-md begins with 0 */ recover_slot() 507 pr_err("md/raid1:failed to get CR on MESSAGE\n"); recv_daemon() 549 pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n", lock_comm() 581 pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error); __sendmsg() 590 pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n", __sendmsg() 598 pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n", __sendmsg() 606 pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n", __sendmsg() 614 pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n", __sendmsg() 679 pr_warn("md-cluster: Could not gather bitmaps from slot %d", i); gather_all_resync_info() 722 pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).", join() 731 pr_err("md-cluster: cannot allocate memory for recv_thread!\n"); join() 749 pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n", join() 753 pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret); join() 756 pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number); join() 828 * DLM starts the slot numbers from 1, wheras cluster-md 864 pr_warn("md-cluster: No good device id found to send\n"); 973 pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev)); new_disk_ack() 1011 pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); gather_bitmaps() 1041 pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n"); cluster_init()
|
H A D | linear.c | 24 #include "md.h" 104 printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n", rdev_for_each() 126 printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n", 266 "md/linear:%s: make_request: Sector %llu out of bounds on " linear_make_request() 315 MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/ 316 MODULE_ALIAS("md-linear"); 317 MODULE_ALIAS("md-level--1");
|
H A D | dm-stats.c | 243 struct mapped_device *md) dm_stats_create() 361 suspend_callback(md); 383 resume_callback(md); 389 resume_callback(md); 945 static int message_stats_create(struct mapped_device *md, message_stats_create() argument 979 len = dm_get_size(md); message_stats_create() 1050 id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags, message_stats_create() 1052 dm_internal_suspend_fast, dm_internal_resume_fast, md); message_stats_create() 1070 static int message_stats_delete(struct mapped_device *md, message_stats_delete() argument 1082 return dm_stats_delete(dm_get_stats(md), id); message_stats_delete() 1085 static int message_stats_clear(struct mapped_device *md, message_stats_clear() argument 1097 return dm_stats_clear(dm_get_stats(md), id); message_stats_clear() 1100 static int message_stats_list(struct mapped_device *md, message_stats_list() argument 1116 r = dm_stats_list(dm_get_stats(md), program, result, maxlen); message_stats_list() 1123 static int message_stats_print(struct mapped_device *md, message_stats_print() argument 1146 return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear, message_stats_print() 1150 static int message_stats_set_aux(struct mapped_device *md, message_stats_set_aux() argument 1162 return dm_stats_set_aux(dm_get_stats(md), id, argv[2]); message_stats_set_aux() 1165 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, dm_stats_message() argument 1172 r = message_stats_create(md, argc, argv, result, maxlen); dm_stats_message() 1174 r = message_stats_delete(md, argc, argv); dm_stats_message() 1176 r = message_stats_clear(md, argc, argv); dm_stats_message() 1178 r = message_stats_list(md, argc, argv, result, maxlen); dm_stats_message() 1180 r = message_stats_print(md, argc, argv, false, result, maxlen); dm_stats_message() 1182 r = message_stats_print(md, argc, argv, true, result, maxlen); dm_stats_message() 1184 r = message_stats_set_aux(md, argc, argv); dm_stats_message() 236 dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end, sector_t step, unsigned stat_flags, unsigned n_histogram_entries, unsigned long long *histogram_boundaries, const char *program_id, const char *aux_data, void (*suspend_callback)(struct mapped_device *), void (*resume_callback)(struct mapped_device *), struct mapped_device *md) dm_stats_create() argument
|
H A D | faulty.c | 6 * fautly-device-simulator personality for md 69 #include "md.h" 369 MODULE_ALIAS("md-personality-10"); /* faulty */ 370 MODULE_ALIAS("md-faulty"); 371 MODULE_ALIAS("md-level--5");
|
H A D | bitmap.h | 125 __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */ 128 __le64 sync_size; /* 40 the size of the md device's sync range(3) */ 136 __u8 cluster_name[64]; /* 72 cluster name to which this md belongs */ 141 * (1) This event counter is updated before the eventcounter in the md superblock 194 struct mddev *mddev; /* the md device that the bitmap is for */ 237 /* these are used only by md/bitmap */
|
H A D | md.c | 2 md.c : Multiple Devices driver for Linux 54 #include "md.h" 56 #include "md-cluster.h" 99 * or /sys/block/mdX/md/sync_speed_{min,max} 218 * Enables to iterate over all existing md arrays 363 * Generic flush handling for md 695 printk(KERN_ALERT "md: out of memory.\n"); alloc_disk_sb() 726 printk("md: super_written gets error=%d\n", bio->bi_error); super_written() 800 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", read_disk_sb() 823 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); sb_equal() 969 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", super_90_load() 987 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", super_90_load() 1009 printk(KERN_WARNING "md: %s has different UUID to %s\n", super_90_load() 1014 printk(KERN_WARNING "md: %s has same UUID" super_90_load() 1418 printk("md: invalid superblock checksum on %s\n", super_1_load() 1423 printk("md: data_size too small on %s\n", super_1_load() 1509 printk(KERN_WARNING "md: %s has strangely different" super_1_load() 1651 "md: journal device provided without journal feature, ignoring the device\n"); super_1_validate() 1922 .name = "md-1", 2007 * profiles, register the common profile for the md device. 2012 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 2014 printk(KERN_ERR "md: failed to create integrity pool for %s\n", 2095 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", bind_rdev_to_array() 2103 printk(KERN_INFO "md: bind<%s>\n", b); bind_rdev_to_array() 2122 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", bind_rdev_to_array() 2140 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); unbind_rdev_from_array() 2170 printk(KERN_ERR "md: could not open %s.\n", lock_rdev() 2191 printk(KERN_INFO "md: export_rdev(%s)\n", export_rdev() 2400 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2414 pr_debug("md: (write) %s's sb offset: %llu\n", rdev_for_each() 2427 pr_debug("md: %s (skipping faulty)\n", rdev_for_each() 3211 printk(KERN_ERR "md: could not alloc mem for new device!\n"); md_import_device() 3231 "md: %s has zero or unknown size, marking faulty!\n", md_import_device() 3242 "md: %s does not have a valid v%d.%d " md_import_device() 3250 "md: could not read %s's sb, not importing!\n", md_import_device() 3287 "md: fatal superblock inconsistency in %s" rdev_for_each_safe() 3302 "md: %s: %s: only %d devices permitted\n", rdev_for_each_safe() 3311 printk(KERN_WARNING "md: kicking non-fresh %s" rdev_for_each_safe() 3383 pr_info("md: Safemode is disabled for clustered mode\n"); safe_delay_store() 3471 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", level_store() 3484 if (request_module("md-%s", clevel) != 0) level_store() 3485 request_module("md-level-%s", clevel); level_store() 3490 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); level_store() 3504 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", level_store() 3525 printk(KERN_WARNING "md: %s: %s would not accept array\n", level_store() 3570 "md: cannot register extra attributes for %s\n", level_store() 3600 printk(KERN_WARNING "md: cannot register rd%d" rdev_for_each() 5038 sprintf(disk->disk_name, "md%d", unit); md_alloc() 5056 &disk_to_dev(disk)->kobj, "%s", "md"); md_alloc() 5061 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", md_alloc() 5144 request_module("md-level-%d", mddev->level); md_run() 5146 request_module("md-%s", mddev->clevel); md_run() 5150 * the only valid external interface is through the md md_run() 5169 printk("md: %s: data overlaps metadata\n", rdev_for_each() 5176 printk("md: %s: metadata overlaps data\n", rdev_for_each() 5192 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 5195 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 5254 printk(KERN_ERR "md: pers->run() failed ...\n"); 5259 "md: invalid array_size %llu > default size %llu\n", 5294 "md: cannot register extra attributes for %s\n", 5397 printk(KERN_INFO "md: %s switched to read-write mode.\n", 5490 printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n", mddev_detach() 5567 printk("md: %s still in use.\n",mdname(mddev)); md_set_readonly() 5630 printk("md: %s still in use.\n",mdname(mddev)); do_md_stop() 5667 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); do_md_stop() 5700 printk(KERN_INFO "md: running: "); autorun_array() 5710 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 5733 printk(KERN_INFO "md: autorun ...\n"); autorun_devices() 5741 printk(KERN_INFO "md: considering %s ...\n", autorun_devices() 5746 printk(KERN_INFO "md: adding %s ...\n", autorun_devices() 5764 printk(KERN_INFO "md: unit number in %s is bad: %d\n", autorun_devices() 5775 "md: cannot allocate memory for md drive.\n"); autorun_devices() 5779 printk(KERN_WARNING "md: %s locked, cannot run\n", autorun_devices() 5784 "md: %s already running, cannot run %s\n", autorun_devices() 5788 printk(KERN_INFO "md: created %s\n", mdname(mddev)); autorun_devices() 5807 printk(KERN_INFO "md: ... autorun DONE.\n"); autorun_devices() 5974 "md: md_import_device returned %ld\n", add_new_disk() 5986 "md: %s has different UUID to %s\n", add_new_disk() 6019 "md: md_import_device returned %ld\n", add_new_disk() 6105 "md: error, md_import_device() returned %ld\n", add_new_disk() 6123 printk(KERN_INFO "md: nonpersistent superblock ...\n"); add_new_disk() 6174 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", hot_remove_disk() 6204 "md: error, md_import_device() returned %ld\n", hot_add_disk() 6218 "md: can not hot-add faulty %s disk to %s!\n", hot_add_disk() 6356 "md: superblock version %d not known\n", set_array_info() 6379 /* don't set md_minor, it is determined by which /dev/md* was set_array_info() 6552 * we don't need to do anything at the md level, the update_array_info() 6793 "md: ioctl lock interrupted, reason %d, cmd %d\n", md_ioctl() 6809 printk(KERN_WARNING "md: couldn't update" md_ioctl() 6817 "md: array %s already has disks!\n", md_ioctl() 6824 "md: array %s already initialised!\n", md_ioctl() 6831 printk(KERN_WARNING "md: couldn't set" md_ioctl() 7126 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); md_wakeup_thread() 7529 printk(KERN_INFO "md: %s personality registered for level %d\n", register_md_personality() 7540 printk(KERN_INFO "md: %s personality unregistered\n", p->name); unregister_md_personality() 7577 err = request_module("md-cluster"); md_setup_cluster() 7579 pr_err("md-cluster module not found.\n"); md_setup_cluster() 7842 printk(KERN_INFO "md: delaying %s of %s" for_each_mddev() 7901 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 7902 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 7904 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 7923 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", 7931 "md: resuming %s of %s from checkpoint.\n", 8059 printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc, 8087 "md: checkpointing %s of %s.\n", 8276 printk(KERN_INFO "md: %s in immediate safe mode\n", md_check_recovery() 9033 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); md_geninit() 9042 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); md_init() 9050 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) md_init() 9069 unregister_blkdev(MD_MAJOR, "md"); md_init() 9095 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); rdev_for_each() 9225 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" md_autodetect_dev() 9240 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); autostart_arrays() 9261 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", autostart_arrays() 9278 unregister_blkdev(MD_MAJOR,"md"); md_exit() 9321 MODULE_ALIAS("md");
|
H A D | multipath.c | 27 #include "md.h" 524 MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ 525 MODULE_ALIAS("md-multipath"); 526 MODULE_ALIAS("md-level--4");
|
H A D | raid10.c | 6 * RAID-10 support for md. 28 #include "md.h" 408 "md/raid10:%s: %s: rescheduling sector %llu\n", raid10_end_read_request() 1598 "md/raid10:%s: Disk failure on %s, disabling device.\n" error() 1599 "md/raid10:%s: Operation continuing on %d devices.\n", error() 2113 "md/raid10:%s: recovery aborted" fix_recovery_read_error() 2264 "md/raid10:%s: %s: Raid device exceeded " fix_read_error() 2269 "md/raid10:%s: %s: Failing raid device\n", fix_read_error() 2358 "md/raid10:%s: read correction " fix_read_error() 2367 printk(KERN_NOTICE "md/raid10:%s: %s: failing " fix_read_error() 2398 "md/raid10:%s: unable to read back " fix_read_error() 2406 printk(KERN_NOTICE "md/raid10:%s: %s: failing " fix_read_error() 2413 "md/raid10:%s: read error corrected" fix_read_error() 2526 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" handle_read_error() 2538 "md/raid10:%s: %s: redirecting " handle_read_error() 3131 printk(KERN_INFO "md/raid10:%s: insufficient " sync_request() 3450 printk(KERN_ERR "md/raid10:%s: chunk size must be " setup_conf() 3457 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n", setup_conf() 3518 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", setup_conf() 3616 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n", 3658 printk(KERN_NOTICE "md/raid10:%s: not clean" 3662 "md/raid10:%s: active with %d out of %d devices\n", 3699 printk("md/raid10: offset difference not enough to continue reshape\n"); 3804 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n", raid10_takeover_raid0() 3845 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0" raid10_takeover() 4036 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n", 4656 MODULE_ALIAS("md-personality-9"); /* RAID10 */ 4657 MODULE_ALIAS("md-raid10"); 4658 MODULE_ALIAS("md-level-10");
|
H A D | raid1.c | 40 #include "md.h" 356 KERN_ERR "md/raid1:%s: %s: " raid1_end_read_request() 1480 "md/raid1:%s: Disk failure on %s, disabling device.\n" error() 1481 "md/raid1:%s: Operation continuing on %d devices.\n", error() 1846 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" fix_sync_read_error() 2134 "md/raid1:%s: read error corrected " fix_read_error() 2319 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" handle_read_error() 2338 "md/raid1:%s: redirecting sector %llu" handle_read_error() 2877 "md/raid1:%s: couldn't allocate thread\n", 2905 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", run() 2910 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n", run() 2950 printk(KERN_NOTICE "md/raid1:%s: not clean" 2954 "md/raid1:%s: active with %d out of %d mirrors\n", 3108 "md/raid1:%s: cannot register rd%d\n", raid1_reshape() 3207 MODULE_ALIAS("md-personality-3"); /* RAID1 */ 3208 MODULE_ALIAS("md-raid1"); 3209 MODULE_ALIAS("md-level-1");
|
H A D | raid5.c | 60 #include "md.h" 193 /* md starts just after Q block */ raid6_d0() 2330 "md/raid:%s: read error corrected" raid5_end_read_request() 2353 "md/raid:%s: read error on replacement device " raid5_end_read_request() 2362 "md/raid:%s: read error not correctable " raid5_end_read_request() 2372 "md/raid:%s: read error NOT corrected!! " raid5_end_read_request() 2380 "md/raid:%s: Too many read errors, failing device %s.\n", raid5_end_read_request() 2522 "md/raid:%s: Disk failure on %s, disabling device.\n" error() 2523 "md/raid:%s: Operation continuing on %d devices.\n", error() 2849 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", raid5_compute_blocknr() 6475 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", setup_conf() 6483 printk(KERN_ERR "md/raid:%s: layout %d not supported\n", setup_conf() 6488 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", setup_conf() 6496 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", setup_conf() 6588 printk(KERN_INFO "md/raid:%s: device %s operational as raid" rdev_for_each() 6623 "md/raid:%s: couldn't allocate %dkB for buffers\n", 6627 printk(KERN_INFO "md/raid:%s: allocated %dkB\n", 6645 "md/raid:%s: couldn't allocate thread.\n", 6699 printk(KERN_NOTICE "md/raid:%s: not clean" run() 6744 printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n", 6750 printk(KERN_ERR "md/raid:%s: unsupported reshape " 6767 printk(KERN_ERR "md/raid:%s: reshape_position not " 6789 printk(KERN_ERR "md/raid:%s: in-place reshape " 6801 printk(KERN_ERR "md/raid:%s: reshape_position too early for " 6806 printk(KERN_INFO "md/raid:%s: reshape will continue\n", 6825 printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n", 6851 printk(KERN_ERR "md: cannot handle concurrent " 6894 printk(KERN_ERR "md/raid:%s: not enough operational devices" 6908 "md/raid:%s: starting dirty degraded array" 6913 "md/raid:%s: cannot start dirty degraded array.\n", 6920 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" 6925 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" 7017 pr_info("md/raid456: discard support disabled due to uncertainty.\n"); rdev_for_each() 7037 printk(KERN_INFO"md/raid:%s: using device %s as journal\n", 7048 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); 7325 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", check_stripe_cache() 7409 printk(KERN_ERR "md/raid:%s: array size must be reduced " 7626 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", raid45_takeover_raid0() 7948 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 7949 MODULE_ALIAS("md-raid5"); 7950 MODULE_ALIAS("md-raid4"); 7951 MODULE_ALIAS("md-level-5"); 7952 MODULE_ALIAS("md-level-4"); 7953 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 7954 MODULE_ALIAS("md-raid6"); 7955 MODULE_ALIAS("md-level-6");
|
H A D | md.h | 2 md.h : kernel internal structure of the Linux MD driver 27 #include "md-cluster.h" 328 /* resync even though the same disks are shared among md-devices */ 383 int degraded; /* whether md should consider
|
H A D | raid5.h | 58 * Buffers for the md device that arrive via make_request are attached 572 /* DDF RAID6 layouts differ from md/raid6 layouts in two ways. 574 * different between the 'LEFT_*' modes of md and the "_N_*" modes 578 * Consequently we have different layouts for DDF/raid6 than md/raid6.
|
H A D | dm-verity.c | 203 struct mapped_device *md = dm_table_get_md(v->ti->table); verity_handle_err() local 233 kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp); verity_handle_err()
|
H A D | bitmap.c | 30 #include "md.h" 92 pr_debug("md/bitmap: map page allocation failed, hijacking\n"); 410 printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n", read_page() 897 * bitmap_file_set_bit -- called before performing a write to the md device 972 /* this gets called when the md device is ready to unplug its underlying 1715 mddev->bitmap = NULL; /* disconnect from the md device */ bitmap_destroy()
|
H A D | dm-mpath.c | 1257 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests do_end_io()
|
H A D | raid5-cache.c | 21 #include "md.h"
|
/linux-4.4.14/arch/ia64/kernel/ |
H A D | efi.c | 254 is_memory_available (efi_memory_desc_t *md) is_memory_available() argument 256 if (!(md->attribute & EFI_MEMORY_WB)) is_memory_available() 259 switch (md->type) { is_memory_available() 278 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) 287 efi_md_end(efi_memory_desc_t *md) efi_md_end() argument 289 return (md->phys_addr + efi_md_size(md)); efi_md_end() 293 efi_wb(efi_memory_desc_t *md) efi_wb() argument 295 return (md->attribute & EFI_MEMORY_WB); efi_wb() 299 efi_uc(efi_memory_desc_t *md) efi_uc() argument 301 return (md->attribute & EFI_MEMORY_UC); efi_uc() 351 efi_memory_desc_t *md; efi_get_pal_addr() local 361 md = p; efi_get_pal_addr() 362 if (md->type != EFI_PAL_CODE) efi_get_pal_addr() 367 "dropped @ %llx\n", md->phys_addr); efi_get_pal_addr() 375 vaddr = PAGE_OFFSET + md->phys_addr; efi_get_pal_addr() 396 if (efi_md_size(md) > IA64_GRANULE_SIZE) efi_get_pal_addr() 404 smp_processor_id(), md->phys_addr, efi_get_pal_addr() 405 md->phys_addr + efi_md_size(md), efi_get_pal_addr() 408 return __va(md->phys_addr); efi_get_pal_addr() 562 efi_memory_desc_t *md; efi_init() local 572 md = p; efi_init() 573 size = md->num_pages << EFI_PAGE_SHIFT; efi_init() 591 i, efi_md_typeattr_format(buf, sizeof(buf), md), efi_init() 592 md->phys_addr, efi_init() 593 md->phys_addr + efi_md_size(md), size, unit); efi_init() 606 efi_memory_desc_t *md; efi_enter_virtual_mode() local 615 md = p; efi_enter_virtual_mode() 616 if (md->attribute & EFI_MEMORY_RUNTIME) { efi_enter_virtual_mode() 621 if (md->attribute & EFI_MEMORY_WB) { efi_enter_virtual_mode() 622 md->virt_addr = (u64) __va(md->phys_addr); efi_enter_virtual_mode() 623 } else if (md->attribute & EFI_MEMORY_UC) { efi_enter_virtual_mode() 624 md->virt_addr = (u64) ioremap(md->phys_addr, 0); efi_enter_virtual_mode() 625 } else if (md->attribute & EFI_MEMORY_WC) { efi_enter_virtual_mode() 627 md->virt_addr = ia64_remap(md->phys_addr, efi_enter_virtual_mode() 636 md->virt_addr = (u64) ioremap(md->phys_addr, 0); efi_enter_virtual_mode() 638 } else if (md->attribute & EFI_MEMORY_WT) { efi_enter_virtual_mode() 640 md->virt_addr = ia64_remap(md->phys_addr, efi_enter_virtual_mode() 649 md->virt_addr = (u64) ioremap(md->phys_addr, 0); efi_enter_virtual_mode() 691 efi_memory_desc_t *md; efi_get_iobase() local 699 md = p; efi_get_iobase() 700 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { efi_get_iobase() 701 if (md->attribute & EFI_MEMORY_UC) efi_get_iobase() 702 return md->phys_addr; efi_get_iobase() 711 struct kern_memdesc *md; kern_memory_descriptor() local 713 for (md = kern_memmap; md->start != ~0UL; md++) { kern_memory_descriptor() 714 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) kern_memory_descriptor() 715 return md; kern_memory_descriptor() 724 efi_memory_desc_t *md; efi_memory_descriptor() local 732 md = p; efi_memory_descriptor() 734 if (phys_addr - md->phys_addr < efi_md_size(md)) efi_memory_descriptor() 735 return md; efi_memory_descriptor() 744 efi_memory_desc_t *md; efi_memmap_intersects() local 755 md = p; efi_memmap_intersects() 756 if (md->phys_addr < end && efi_md_end(md) > phys_addr) efi_memmap_intersects() 765 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_type() local 767 if (md) efi_mem_type() 768 return md->type; efi_mem_type() 775 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_attributes() local 777 if (md) efi_mem_attributes() 778 return md->attribute; efi_mem_attributes() 787 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); efi_mem_attribute() local 790 if (!md) efi_mem_attribute() 797 attr = md->attribute & ~EFI_MEMORY_RUNTIME; efi_mem_attribute() 799 unsigned long md_end = efi_md_end(md); efi_mem_attribute() 804 md = efi_memory_descriptor(md_end); efi_mem_attribute() 805 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) efi_mem_attribute() 807 } while (md); efi_mem_attribute() 815 struct kern_memdesc *md; kern_mem_attribute() local 829 md = kern_memory_descriptor(phys_addr); kern_mem_attribute() 830 if (!md) kern_mem_attribute() 833 attr = md->attribute; kern_mem_attribute() 835 unsigned long md_end = kmd_end(md); kern_mem_attribute() 840 md = kern_memory_descriptor(md_end); kern_mem_attribute() 841 if (!md || md->attribute != attr) kern_mem_attribute() 843 } while (md); kern_mem_attribute() 977 efi_memory_desc_t *md, *pmd = NULL, *check_md; find_memmap_space() local 993 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { find_memmap_space() 994 md = p; find_memmap_space() 995 if (!efi_wb(md)) { find_memmap_space() 999 efi_md_end(pmd) != md->phys_addr) { find_memmap_space() 1000 contig_low = GRANULEROUNDUP(md->phys_addr); find_memmap_space() 1001 contig_high = efi_md_end(md); find_memmap_space() 1013 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA) find_memmap_space() 1017 as = max(contig_low, md->phys_addr); find_memmap_space() 1018 ae = min(contig_high, efi_md_end(md)); find_memmap_space() 1054 efi_memory_desc_t *md, *pmd = NULL, *check_md; efi_memmap_init() local 1064 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { efi_memmap_init() 1065 md = p; efi_memmap_init() 1066 if (!efi_wb(md)) { efi_memmap_init() 1067 if (efi_uc(md) && efi_memmap_init() 1068 (md->type == EFI_CONVENTIONAL_MEMORY || efi_memmap_init() 1069 md->type == EFI_BOOT_SERVICES_DATA)) { efi_memmap_init() 1071 k->start = md->phys_addr; efi_memmap_init() 1072 k->num_pages = md->num_pages; efi_memmap_init() 1078 efi_md_end(pmd) != md->phys_addr) { efi_memmap_init() 1079 contig_low = GRANULEROUNDUP(md->phys_addr); efi_memmap_init() 1080 contig_high = efi_md_end(md); efi_memmap_init() 1092 if (!is_memory_available(md)) efi_memmap_init() 1099 if (md->phys_addr < contig_low) { efi_memmap_init() 1100 lim = min(efi_md_end(md), contig_low); efi_memmap_init() 1101 if (efi_uc(md)) { efi_memmap_init() 1104 kmd_end(k-1) == md->phys_addr) { efi_memmap_init() 1106 (lim - md->phys_addr) efi_memmap_init() 1110 k->start = md->phys_addr; efi_memmap_init() 1111 k->num_pages = (lim - md->phys_addr) efi_memmap_init() 1118 as = md->phys_addr; efi_memmap_init() 1120 if (efi_md_end(md) > contig_high) { efi_memmap_init() 1121 lim = max(md->phys_addr, contig_high); efi_memmap_init() 1122 if (efi_uc(md)) { efi_memmap_init() 1123 if (lim == md->phys_addr && k > kern_memmap && efi_memmap_init() 1125 kmd_end(k-1) == md->phys_addr) { efi_memmap_init() 1126 (k-1)->num_pages += md->num_pages; efi_memmap_init() 1130 k->num_pages = (efi_md_end(md) - lim) efi_memmap_init() 1137 ae = efi_md_end(md); efi_memmap_init() 1151 if (prev && kmd_end(prev) == md->phys_addr) { efi_memmap_init() 1178 efi_memory_desc_t *md; efi_initialize_iomem_resources() local 1190 md = p; efi_initialize_iomem_resources() 1192 if (md->num_pages == 0) /* should not happen */ efi_initialize_iomem_resources() 1196 switch (md->type) { efi_initialize_iomem_resources() 1207 if (md->attribute & EFI_MEMORY_WP) { efi_initialize_iomem_resources() 1210 } else if (md->attribute == EFI_MEMORY_UC) efi_initialize_iomem_resources() 1246 res->start = md->phys_addr; efi_initialize_iomem_resources() 1247 res->end = md->phys_addr + efi_md_size(md) - 1; efi_initialize_iomem_resources() 1282 efi_memory_desc_t *md; kdump_find_rsvd_region() local 1290 md = p; kdump_find_rsvd_region() 1291 if (!efi_wb(md)) kdump_find_rsvd_region() 1293 start = ALIGN(md->phys_addr, alignment); kdump_find_rsvd_region() 1294 end = efi_md_end(md); kdump_find_rsvd_region() 1323 efi_memory_desc_t *md; vmcore_find_descriptor_size() local 1332 md = p; vmcore_find_descriptor_size() 1333 if (efi_wb(md) && md->type == EFI_LOADER_DATA vmcore_find_descriptor_size() 1334 && md->phys_addr == address) { vmcore_find_descriptor_size() 1335 ret = efi_md_size(md); vmcore_find_descriptor_size()
|
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/ |
H A D | Makefile | 4 lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \
|
H A D | lib-md.c | 36 * lnet/lnet/lib-md.c 47 lnet_md_unlink(lnet_libmd_t *md) lnet_md_unlink() argument 49 if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) { lnet_md_unlink() 51 lnet_me_t *me = md->md_me; lnet_md_unlink() 53 md->md_flags |= LNET_MD_FLAG_ZOMBIE; lnet_md_unlink() 60 lnet_ptl_detach_md(me, md); lnet_md_unlink() 66 lnet_res_lh_invalidate(&md->md_lh); lnet_md_unlink() 69 if (md->md_refcount != 0) { lnet_md_unlink() 70 CDEBUG(D_NET, "Queueing unlink of md %p\n", md); lnet_md_unlink() 74 CDEBUG(D_NET, "Unlinking md %p\n", md); lnet_md_unlink() 76 if (md->md_eq != NULL) { lnet_md_unlink() 77 int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); lnet_md_unlink() 79 LASSERT(*md->md_eq->eq_refs[cpt] > 0); lnet_md_unlink() 80 (*md->md_eq->eq_refs[cpt])--; lnet_md_unlink() 83 LASSERT(!list_empty(&md->md_list)); lnet_md_unlink() 84 list_del_init(&md->md_list); lnet_md_unlink() 85 lnet_md_free(md); lnet_md_unlink() 168 lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt) lnet_md_link() argument 172 /* NB we are passed an allocated, but inactive md. lnet_md_link() 185 md->md_eq = lnet_handle2eq(&eq_handle); lnet_md_link() 187 if (md->md_eq == NULL) lnet_md_link() 190 (*md->md_eq->eq_refs[cpt])++; lnet_md_link() 193 lnet_res_lh_initialize(container, &md->md_lh); lnet_md_link() 195 LASSERT(list_empty(&md->md_list)); lnet_md_link() 196 list_add(&md->md_list, &container->rec_active); lnet_md_link() 272 struct lnet_libmd *md; LNetMDAttach() local 287 md = lnet_md_alloc(&umd); LNetMDAttach() 288 if (md == NULL) LNetMDAttach() 291 rc = lnet_md_build(md, &umd, unlink); LNetMDAttach() 304 rc = lnet_md_link(md, umd.eq_handle, cpt); LNetMDAttach() 311 lnet_ptl_attach_md(me, md, &matches, &drops); LNetMDAttach() 313 lnet_md2handle(handle, md); LNetMDAttach() 323 lnet_md_free(md); LNetMDAttach() 349 lnet_libmd_t *md; LNetMDBind() local 364 md = lnet_md_alloc(&umd); LNetMDBind() 365 if (md == NULL) LNetMDBind() 368 rc = lnet_md_build(md, &umd, unlink); LNetMDBind() 374 rc = lnet_md_link(md, umd.eq_handle, cpt); LNetMDBind() 378 lnet_md2handle(handle, md); LNetMDBind() 384 lnet_md_free(md); LNetMDBind() 425 lnet_libmd_t *md; LNetMDUnlink() local 434 md = lnet_handle2md(&mdh); LNetMDUnlink() 435 if (md == NULL) { LNetMDUnlink() 440 md->md_flags |= LNET_MD_FLAG_ABORTED; LNetMDUnlink() 444 if (md->md_eq != NULL && md->md_refcount == 0) { LNetMDUnlink() 445 lnet_build_unlink_event(md, &ev); LNetMDUnlink() 446 lnet_eq_enqueue_event(md->md_eq, &ev); LNetMDUnlink() 449 lnet_md_unlink(md); LNetMDUnlink()
|
H A D | lib-me.c | 232 lnet_libmd_t *md; LNetMEUnlink() local 248 md = me->me_md; LNetMEUnlink() 249 if (md != NULL) { LNetMEUnlink() 250 md->md_flags |= LNET_MD_FLAG_ABORTED; LNetMEUnlink() 251 if (md->md_eq != NULL && md->md_refcount == 0) { LNetMEUnlink() 252 lnet_build_unlink_event(md, &ev); LNetMEUnlink() 253 lnet_eq_enqueue_event(md->md_eq, &ev); LNetMEUnlink() 271 lnet_libmd_t *md = me->me_md; lnet_me_unlink() local 274 lnet_ptl_detach_md(me, md); lnet_me_unlink() 275 lnet_md_unlink(md); lnet_me_unlink() 292 CWARN("\tMD\t= %p\n", me->md);
|
H A D | lib-move.c | 604 lnet_libmd_t *md = msg->msg_md; lnet_setpayloadbuffer() local 608 LASSERT(md != NULL); lnet_setpayloadbuffer() 613 msg->msg_niov = md->md_niov; lnet_setpayloadbuffer() 614 if ((md->md_options & LNET_MD_KIOV) != 0) lnet_setpayloadbuffer() 615 msg->msg_kiov = md->md_iov.kiov; lnet_setpayloadbuffer() 617 msg->msg_iov = md->md_iov.iov; lnet_setpayloadbuffer() 1500 lnet_libmd_t *md; lnet_parse_reply() local 1512 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd); lnet_parse_reply() 1513 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { lnet_parse_reply() 1516 (md == NULL) ? "invalid" : "inactive", lnet_parse_reply() 1519 if (md != NULL && md->md_me != NULL) lnet_parse_reply() 1521 md->md_me->me_portal); lnet_parse_reply() 1527 LASSERT(md->md_offset == 0); lnet_parse_reply() 1530 mlength = min_t(uint, rlength, md->md_length); lnet_parse_reply() 1533 (md->md_options & LNET_MD_TRUNCATE) == 0) { lnet_parse_reply() 1542 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n", lnet_parse_reply() 1546 lnet_msg_attach_md(msg, md, 0, mlength); lnet_parse_reply() 1564 lnet_libmd_t *md; lnet_parse_ack() local 1578 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd); lnet_parse_ack() 1579 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { lnet_parse_ack() 1584 (md == NULL) ? "invalid" : "inactive", lnet_parse_ack() 1587 if (md != NULL && md->md_me != NULL) lnet_parse_ack() 1589 md->md_me->me_portal); lnet_parse_ack() 1595 CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n", lnet_parse_ack() 1599 lnet_msg_attach_md(msg, md, 0, 0); lnet_parse_ack() 1672 CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n", lnet_print_hdr() 1683 CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n", lnet_print_hdr() 1694 CWARN(" dst md %#llx.%#llx, manipulated length %d\n", lnet_print_hdr() 1701 CWARN(" dst md %#llx.%#llx, length %d\n", lnet_print_hdr() 1992 /* md won't disappear under me, since each msg lnet_recv_delayed_msg_list() 2064 struct lnet_libmd *md; LNetPut() local 2089 md = lnet_handle2md(&mdh); LNetPut() 2090 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { LNetPut() 2093 md == NULL ? -1 : md->md_threshold); LNetPut() 2094 if (md != NULL && md->md_me != NULL) LNetPut() 2096 md->md_me->me_portal); LNetPut() 2105 lnet_msg_attach_md(msg, md, 0, 0); LNetPut() 2107 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length); LNetPut() 2119 md->md_lh.lh_cookie; LNetPut() 2146 /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This lnet_create_reply_msg() 2182 CDEBUG(D_NET, "%s: Reply from %s md %p\n", lnet_create_reply_msg() 2264 struct lnet_libmd *md; LNetGet() local 2288 md = lnet_handle2md(&mdh); LNetGet() 2289 if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { LNetGet() 2292 md == NULL ? -1 : md->md_threshold); LNetGet() 2293 if (md != NULL && md->md_me != NULL) LNetGet() 2295 md->md_me->me_portal); LNetGet() 2305 lnet_msg_attach_md(msg, md, 0, 0); LNetGet() 2312 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length); LNetGet() 2318 md->md_lh.lh_cookie; LNetGet()
|
H A D | lib-ptl.c | 139 lnet_try_match_md(lnet_libmd_t *md, lnet_try_match_md() argument 146 lnet_me_t *me = md->md_me; lnet_try_match_md() 149 if (lnet_md_exhausted(md)) lnet_try_match_md() 153 if ((md->md_options & info->mi_opc) == 0) lnet_try_match_md() 171 if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0) lnet_try_match_md() 172 offset = md->md_offset; lnet_try_match_md() 176 if ((md->md_options & LNET_MD_MAX_SIZE) != 0) { lnet_try_match_md() 177 mlength = md->md_max_size; lnet_try_match_md() 178 LASSERT(md->md_offset + mlength <= md->md_length); lnet_try_match_md() 180 mlength = md->md_length - offset; lnet_try_match_md() 185 } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) { lnet_try_match_md() 189 info->mi_rlength, md->md_length - offset, mlength); lnet_try_match_md() 195 CDEBUG(D_NET, "Incoming %s index %x from %s of length %d/%d into md %#llx [%d] + %d\n", lnet_try_match_md() 198 info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset); lnet_try_match_md() 200 lnet_msg_attach_md(msg, md, offset, mlength); lnet_try_match_md() 201 md->md_offset = offset + mlength; lnet_try_match_md() 203 if (!lnet_md_exhausted(md)) lnet_try_match_md() 207 * We bumped md->md_refcount above so the MD just gets flagged lnet_try_match_md() 209 if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0) lnet_try_match_md() 210 lnet_md_unlink(md); lnet_try_match_md() 612 lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md) lnet_ptl_detach_md() argument 614 LASSERT(me->me_md == md && md->md_me == me); lnet_ptl_detach_md() 617 md->md_me = NULL; lnet_ptl_detach_md() 622 lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, lnet_ptl_attach_md() argument 633 LASSERT(md->md_refcount == 0); /* a brand new MD */ lnet_ptl_attach_md() 635 me->me_md = md; lnet_ptl_attach_md() 636 md->md_me = me; lnet_ptl_attach_md() 638 cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); lnet_ptl_attach_md() 665 rc = lnet_try_match_md(md, &info, msg); list_for_each_entry_safe()
|
H A D | lib-msg.c | 46 lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev) lnet_build_unlink_event() argument 53 lnet_md_deconstruct(md, &ev->md); lnet_build_unlink_event() 54 lnet_md2handle(&ev->md_handle, md); lnet_build_unlink_event() 308 lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md, lnet_msg_attach_md() argument 318 msg->msg_md = md; lnet_msg_attach_md() 324 md->md_refcount++; lnet_msg_attach_md() 325 if (md->md_threshold != LNET_MD_THRESH_INF) { lnet_msg_attach_md() 326 LASSERT(md->md_threshold > 0); lnet_msg_attach_md() 327 md->md_threshold--; lnet_msg_attach_md() 331 lnet_md2handle(&msg->msg_ev.md_handle, md); lnet_msg_attach_md() 332 lnet_md_deconstruct(md, &msg->msg_ev.md); lnet_msg_attach_md() 338 lnet_libmd_t *md = msg->msg_md; lnet_msg_detach_md() local 342 md->md_refcount--; lnet_msg_detach_md() 343 LASSERT(md->md_refcount >= 0); lnet_msg_detach_md() 345 unlink = lnet_md_unlinkable(md); lnet_msg_detach_md() 346 if (md->md_eq != NULL) { lnet_msg_detach_md() 349 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev); lnet_msg_detach_md() 353 lnet_md_unlink(md); lnet_msg_detach_md()
|
H A D | api-ni.c | 1582 lnet_md_t md = { NULL }; lnet_ping_target_init() local 1614 /* initialize md content */ lnet_ping_target_init() 1617 md.start = the_lnet.ln_ping_info; lnet_ping_target_init() 1618 md.length = infosz; lnet_ping_target_init() 1619 md.threshold = LNET_MD_THRESH_INF; lnet_ping_target_init() 1620 md.max_size = 0; lnet_ping_target_init() 1621 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE | lnet_ping_target_init() 1623 md.user_ptr = NULL; lnet_ping_target_init() 1624 md.eq_handle = the_lnet.ln_ping_target_eq; lnet_ping_target_init() 1626 rc = LNetMDAttach(meh, md, lnet_ping_target_init() 1657 /* NB md could be busy; this just starts the unlink */ lnet_ping_target_fini() 1690 lnet_md_t md = { NULL }; lnet_ping() local 1724 /* initialize md content */ lnet_ping() 1725 md.start = info; lnet_ping() 1726 md.length = infosz; lnet_ping() 1727 md.threshold = 2; /*GET/REPLY*/ lnet_ping() 1728 md.max_size = 0; lnet_ping() 1729 md.options = LNET_MD_TRUNCATE; lnet_ping() 1730 md.user_ptr = NULL; lnet_ping() 1731 md.eq_handle = eqh; lnet_ping() 1733 rc = LNetMDBind(md, LNET_UNLINK, &mdh); lnet_ping()
|
H A D | router.c | 690 lnet_rc_data_t *rcd = event->md.user_ptr; lnet_router_checker_event()
|
/linux-4.4.14/arch/mips/pci/ |
H A D | msi-xlp.c | 134 struct xlp_msi_data *md = irq_data_get_irq_chip_data(d); xlp_msi_enable() local 139 spin_lock_irqsave(&md->msi_lock, flags); xlp_msi_enable() 140 md->msi_enabled_mask |= 1u << vec; xlp_msi_enable() 142 nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, xlp_msi_enable() 143 md->msi_enabled_mask); xlp_msi_enable() 145 nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); xlp_msi_enable() 146 spin_unlock_irqrestore(&md->msi_lock, flags); xlp_msi_enable() 151 struct xlp_msi_data *md = irq_data_get_irq_chip_data(d); xlp_msi_disable() local 156 spin_lock_irqsave(&md->msi_lock, flags); xlp_msi_disable() 157 md->msi_enabled_mask &= ~(1u << vec); xlp_msi_disable() 159 nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, xlp_msi_disable() 160 md->msi_enabled_mask); xlp_msi_disable() 162 nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); xlp_msi_disable() 163 spin_unlock_irqrestore(&md->msi_lock, flags); xlp_msi_disable() 168 struct xlp_msi_data *md = irq_data_get_irq_chip_data(d); xlp_msi_mask_ack() local 177 nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec); xlp_msi_mask_ack() 179 nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); xlp_msi_mask_ack() 207 struct xlp_msi_data *md; xlp_msix_mask_ack() local 214 md = irq_data_get_irq_chip_data(d); xlp_msix_mask_ack() 224 nlm_write_reg(md->lnkbase, status_reg, 1u << bit); xlp_msix_mask_ack() 227 nlm_pic_ack(md->node->picbase, xlp_msix_mask_ack() 296 struct xlp_msi_data *md; xlp_setup_msi() local 305 md = irq_get_chip_data(xirq); xlp_setup_msi() 308 spin_lock_irqsave(&md->msi_lock, flags); xlp_setup_msi() 309 if (md->msi_alloc_mask == 0) { xlp_setup_msi() 322 msivec = fls(md->msi_alloc_mask); xlp_setup_msi() 324 spin_unlock_irqrestore(&md->msi_lock, flags); xlp_setup_msi() 327 md->msi_alloc_mask |= (1u << msivec); xlp_setup_msi() 328 spin_unlock_irqrestore(&md->msi_lock, flags); xlp_setup_msi() 403 struct xlp_msi_data *md; xlp_setup_msix() local 412 md = irq_get_chip_data(xirq); xlp_setup_msix() 415 spin_lock_irqsave(&md->msi_lock, flags); xlp_setup_msix() 417 if (md->msix_alloc_mask == 0) xlp_setup_msix() 421 t = fls(md->msix_alloc_mask); xlp_setup_msix() 423 spin_unlock_irqrestore(&md->msi_lock, flags); xlp_setup_msix() 426 md->msix_alloc_mask |= (1u << t); xlp_setup_msix() 427 spin_unlock_irqrestore(&md->msi_lock, flags); xlp_setup_msix() 469 struct xlp_msi_data *md; xlp_init_node_msi_irqs() local 476 md = kzalloc(sizeof(*md), GFP_KERNEL); xlp_init_node_msi_irqs() 477 spin_lock_init(&md->msi_lock); xlp_init_node_msi_irqs() 478 md->msi_enabled_mask = 0; xlp_init_node_msi_irqs() 479 md->msi_alloc_mask = 0; xlp_init_node_msi_irqs() 480 md->msix_alloc_mask = 0; xlp_init_node_msi_irqs() 481 md->node = nodep; xlp_init_node_msi_irqs() 482 md->lnkbase = nlm_get_pcie_base(node, link); xlp_init_node_msi_irqs() 488 irq_set_chip_data(i, md); xlp_init_node_msi_irqs() 495 nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i + xlp_init_node_msi_irqs() 511 irq_set_chip_data(irq, md); xlp_init_node_msi_irqs() 517 struct xlp_msi_data *md; nlm_dispatch_msi() local 523 md = irq_get_chip_data(irqbase); nlm_dispatch_msi() 525 status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) & nlm_dispatch_msi() 526 md->msi_enabled_mask; nlm_dispatch_msi() 528 status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) & nlm_dispatch_msi() 529 md->msi_enabled_mask; nlm_dispatch_msi() 539 nlm_pic_ack(md->node->picbase, nlm_dispatch_msi() 542 nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); nlm_dispatch_msi() 547 struct xlp_msi_data *md; nlm_dispatch_msix() local 553 md = irq_get_chip_data(irqbase); nlm_dispatch_msix() 555 status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link)); nlm_dispatch_msix() 557 status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS); nlm_dispatch_msix()
|
/linux-4.4.14/arch/arm64/kernel/ |
H A D | efi.c | 53 static int __init is_normal_ram(efi_memory_desc_t *md) is_normal_ram() argument 55 if (md->attribute & EFI_MEMORY_WB) is_normal_ram() 67 efi_memory_desc_t *md; efi_to_phys() local 69 for_each_efi_memory_desc(&memmap, md) { efi_to_phys() 70 if (!(md->attribute & EFI_MEMORY_RUNTIME)) efi_to_phys() 72 if (md->virt_addr == 0) efi_to_phys() 75 if (md->virt_addr <= addr && efi_to_phys() 76 (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) efi_to_phys() 77 return md->phys_addr + addr - md->virt_addr; efi_to_phys() 147 static __init int is_reserve_region(efi_memory_desc_t *md) is_reserve_region() argument 149 switch (md->type) { is_reserve_region() 160 return is_normal_ram(md); is_reserve_region() 165 efi_memory_desc_t *md; reserve_regions() local 171 for_each_efi_memory_desc(&memmap, md) { reserve_regions() 172 paddr = md->phys_addr; reserve_regions() 173 npages = md->num_pages; reserve_regions() 180 efi_md_typeattr_format(buf, sizeof(buf), md)); reserve_regions() 186 if (is_normal_ram(md)) reserve_regions() 189 if (is_reserve_region(md)) { reserve_regions() 237 efi_memory_desc_t *md; efi_virtmap_init() local 241 for_each_efi_memory_desc(&memmap, md) { efi_virtmap_init() 244 if (!(md->attribute & EFI_MEMORY_RUNTIME)) efi_virtmap_init() 246 if (md->virt_addr == 0) efi_virtmap_init() 250 md->phys_addr, (void *)md->virt_addr); efi_virtmap_init() 257 if (!is_normal_ram(md)) efi_virtmap_init() 259 else if (md->type == EFI_RUNTIME_SERVICES_CODE || efi_virtmap_init() 260 !PAGE_ALIGNED(md->phys_addr)) efi_virtmap_init() 265 create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr, efi_virtmap_init() 266 md->num_pages << EFI_PAGE_SHIFT, efi_virtmap_init()
|
/linux-4.4.14/drivers/firmware/efi/ |
H A D | fake_mem.c | 61 efi_memory_desc_t *md; efi_fake_memmap() local 72 md = old; efi_fake_memmap() 73 start = md->phys_addr; efi_fake_memmap() 74 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; efi_fake_memmap() 117 md = new; efi_fake_memmap() 118 start = md->phys_addr; efi_fake_memmap() 119 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; efi_fake_memmap() 128 md->attribute |= m_attr; efi_fake_memmap() 133 md->attribute |= m_attr; efi_fake_memmap() 134 md->num_pages = (m_end - md->phys_addr + 1) >> efi_fake_memmap() 139 md = new; efi_fake_memmap() 140 md->phys_addr = m_end + 1; efi_fake_memmap() 141 md->num_pages = (end - md->phys_addr + 1) >> efi_fake_memmap() 147 md->num_pages = (m_start - md->phys_addr) >> efi_fake_memmap() 152 md = new; efi_fake_memmap() 153 md->attribute |= m_attr; efi_fake_memmap() 154 md->phys_addr = m_start; efi_fake_memmap() 155 md->num_pages = (m_end - m_start + 1) >> efi_fake_memmap() 160 md = new; efi_fake_memmap() 161 md->phys_addr = m_end + 1; efi_fake_memmap() 162 md->num_pages = (end - m_end) >> efi_fake_memmap() 169 md->num_pages = (m_start - md->phys_addr) >> efi_fake_memmap() 174 md = new; efi_fake_memmap() 175 md->phys_addr = m_start; efi_fake_memmap() 176 md->num_pages = (end - md->phys_addr + 1) >> efi_fake_memmap() 178 md->attribute |= m_attr; efi_fake_memmap()
|
H A D | efi.c | 280 efi_memory_desc_t *md; efi_mem_desc_lookup() local 290 md = early_memremap(p, sizeof (*md)); efi_mem_desc_lookup() 291 if (!md) { efi_mem_desc_lookup() 293 &p, sizeof (*md)); efi_mem_desc_lookup() 297 if (!(md->attribute & EFI_MEMORY_RUNTIME) && efi_mem_desc_lookup() 298 md->type != EFI_BOOT_SERVICES_DATA && efi_mem_desc_lookup() 299 md->type != EFI_RUNTIME_SERVICES_DATA) { efi_mem_desc_lookup() 300 early_memunmap(md, sizeof (*md)); efi_mem_desc_lookup() 304 size = md->num_pages << EFI_PAGE_SHIFT; efi_mem_desc_lookup() 305 end = md->phys_addr + size; efi_mem_desc_lookup() 306 if (phys_addr >= md->phys_addr && phys_addr < end) { efi_mem_desc_lookup() 307 memcpy(out_md, md, sizeof(*out_md)); efi_mem_desc_lookup() 308 early_memunmap(md, sizeof (*md)); efi_mem_desc_lookup() 312 early_memunmap(md, sizeof (*md)); efi_mem_desc_lookup() 321 u64 __init efi_mem_desc_end(efi_memory_desc_t *md) efi_mem_desc_end() argument 323 u64 size = md->num_pages << EFI_PAGE_SHIFT; efi_mem_desc_end() 324 u64 end = md->phys_addr + size; efi_mem_desc_end() 343 efi_memory_desc_t *md = p; efi_lookup_mapped_addr() local 344 u64 size = md->num_pages << EFI_PAGE_SHIFT; efi_lookup_mapped_addr() 345 u64 end = md->phys_addr + size; efi_lookup_mapped_addr() 346 if (!(md->attribute & EFI_MEMORY_RUNTIME) && efi_lookup_mapped_addr() 347 md->type != EFI_BOOT_SERVICES_CODE && efi_lookup_mapped_addr() 348 md->type != EFI_BOOT_SERVICES_DATA) efi_lookup_mapped_addr() 350 if (!md->virt_addr) efi_lookup_mapped_addr() 352 if (phys_addr >= md->phys_addr && phys_addr < end) { efi_lookup_mapped_addr() 353 phys_addr += md->virt_addr - md->phys_addr; efi_lookup_mapped_addr() 592 const efi_memory_desc_t *md) efi_md_typeattr_format() 599 if (md->type >= ARRAY_SIZE(memory_type_name)) efi_md_typeattr_format() 600 type_len = snprintf(pos, size, "[type=%u", md->type); efi_md_typeattr_format() 604 memory_type_name[md->type]); efi_md_typeattr_format() 611 attr = md->attribute; efi_md_typeattr_format() 650 efi_memory_desc_t *md; efi_mem_attributes() local 658 md = p; efi_mem_attributes() 659 if ((md->phys_addr <= phys_addr) && efi_mem_attributes() 660 (phys_addr < (md->phys_addr + efi_mem_attributes() 661 (md->num_pages << EFI_PAGE_SHIFT)))) efi_mem_attributes() 662 return md->attribute; efi_mem_attributes() 591 efi_md_typeattr_format(char *buf, size_t size, const efi_memory_desc_t *md) efi_md_typeattr_format() argument
|
H A D | runtime-map.c | 22 efi_memory_desc_t md; member in struct:efi_runtime_map_entry 40 return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); type_show() 43 #define EFI_RUNTIME_FIELD(var) entry->md.var 127 memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, add_sysfs_runtime_map_entry()
|
H A D | esrt.c | 249 efi_memory_desc_t md; efi_esrt_init() local 257 rc = efi_mem_desc_lookup(efi.esrt, &md); efi_esrt_init() 263 max = efi_mem_desc_end(&md); efi_esrt_init()
|
/linux-4.4.14/init/ |
H A D | do_mounts_md.c | 18 * When md (and any require personalities) are compiled into the kernel 22 * with md=..... 51 * the MD devices (by specifying multiple "md=" lines) 54 * md=n,0,factor,fault,device-list uses RAID0 for device n 55 * md=n,-1,factor,fault,device-list uses LINEAR for device n 56 * md=n,device-list reads a RAID superblock from the devices 75 printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); md_setup() 82 printk(KERN_WARNING "md: md=%s%d, Specified more than once. " md_setup() 87 printk(KERN_WARNING "md: md=%s%d - too many md initialisations\n", partitioned?"d":"", minor); md_setup() 97 printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); md_setup() 117 printk(KERN_INFO "md: Will configure md%d (%s) from %s, below.\n", md_setup() 143 sprintf(name, "/dev/md%s%d", partitioned?"_d":"", minor); md_setup_drive() 166 printk(KERN_WARNING "md: Unknown device name: %s\n", devname); md_setup_drive() 179 printk(KERN_INFO "md: Loading md%s%d: %s\n", md_setup_drive() 185 printk(KERN_ERR "md: open failed - cannot start " md_setup_drive() 191 "md: Ignoring md=%d, already autodetected. (Use raid=noautodetect)\n", md_setup_drive() 238 printk(KERN_WARNING "md: starting md%d failed\n", minor); md_setup_drive() 242 * boot a kernel with devfs compiled in from partitioned md md_setup_drive() 281 __setup("md=", md_setup); 291 printk(KERN_INFO "md: Waiting for all devices to be available before autodetect\n"); autodetect_raid() 292 printk(KERN_INFO "md: If you don't use raid, use raid=noautodetect\n"); autodetect_raid() 308 printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=autodetect will force)\n"); md_run_setup()
|
H A D | do_mounts.c | 264 * after revalidating the disk, like partitioned md devices name_to_dev_t()
|
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | pers.c | 45 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, ptlrpc_fill_bulk_md() argument 52 LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | ptlrpc_fill_bulk_md() 55 md->options |= LNET_MD_KIOV; ptlrpc_fill_bulk_md() 56 md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); ptlrpc_fill_bulk_md() 57 md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); ptlrpc_fill_bulk_md() 59 md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV]; ptlrpc_fill_bulk_md() 61 md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV]; ptlrpc_fill_bulk_md()
|
H A D | niobuf.c | 56 lnet_md_t md; ptl_send_buf() local 61 md.start = base; ptl_send_buf() 62 md.length = len; ptl_send_buf() 63 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1; ptl_send_buf() 64 md.options = PTLRPC_MD_OPTIONS; ptl_send_buf() 65 md.user_ptr = cbid; ptl_send_buf() 66 md.eq_handle = ptlrpc_eq_h; ptl_send_buf() 75 rc = LNetMDBind(md, LNET_UNLINK, mdh); ptl_send_buf() 123 lnet_md_t md; ptlrpc_register_bulk() local 168 md.user_ptr = &desc->bd_cbid; ptlrpc_register_bulk() 169 md.eq_handle = ptlrpc_eq_h; ptlrpc_register_bulk() 170 md.threshold = 1; /* PUT or GET */ ptlrpc_register_bulk() 173 md.options = PTLRPC_MD_OPTIONS | ptlrpc_register_bulk() 176 ptlrpc_fill_bulk_md(&md, desc, posted_md); ptlrpc_register_bulk() 188 rc = LNetMDAttach(me_h, md, LNET_UNLINK, ptlrpc_register_bulk() 684 lnet_md_t md; ptlrpc_register_rqbd() local 708 md.start = rqbd->rqbd_buffer; ptlrpc_register_rqbd() 709 md.length = service->srv_buf_size; ptlrpc_register_rqbd() 710 md.max_size = service->srv_max_req_size; ptlrpc_register_rqbd() 711 md.threshold = LNET_MD_THRESH_INF; ptlrpc_register_rqbd() 712 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE; ptlrpc_register_rqbd() 713 md.user_ptr = &rqbd->rqbd_cbid; ptlrpc_register_rqbd() 714 md.eq_handle = ptlrpc_eq_h; ptlrpc_register_rqbd() 716 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h); ptlrpc_register_rqbd()
|
H A D | events.c | 56 struct ptlrpc_cb_id *cbid = ev->md.user_ptr; request_out_callback() 89 struct ptlrpc_cb_id *cbid = ev->md.user_ptr; reply_in_callback() 95 LASSERT(ev->md.start == req->rq_repbuf); reply_in_callback() 99 LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0); reply_in_callback() 175 struct ptlrpc_cb_id *cbid = ev->md.user_ptr; client_bulk_callback() 284 struct ptlrpc_cb_id *cbid = ev->md.user_ptr; request_in_callback() 292 LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer); request_in_callback() 293 LASSERT((char *)ev->md.start + ev->offset + ev->mlength <= request_in_callback() 327 req->rq_reqbuf = ev->md.start + ev->offset; request_in_callback() 383 struct ptlrpc_cb_id *cbid = ev->md.user_ptr; reply_out_callback() 420 struct ptlrpc_cb_id *cbid = ev->md.user_ptr; ptlrpc_master_callback()
|
H A D | ptlrpc_internal.h | 229 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
|
/linux-4.4.14/drivers/mmc/card/ |
H A D | block.c | 141 struct mmc_blk_data *md); 159 struct mmc_blk_data *md; mmc_blk_get() local 162 md = disk->private_data; mmc_blk_get() 163 if (md && md->usage == 0) mmc_blk_get() 164 md = NULL; mmc_blk_get() 165 if (md) mmc_blk_get() 166 md->usage++; mmc_blk_get() 169 return md; mmc_blk_get() 182 static void mmc_blk_put(struct mmc_blk_data *md) mmc_blk_put() argument 185 md->usage--; mmc_blk_put() 186 if (md->usage == 0) { mmc_blk_put() 187 int devidx = mmc_get_devidx(md->disk); mmc_blk_put() 188 blk_cleanup_queue(md->queue.queue); mmc_blk_put() 192 put_disk(md->disk); mmc_blk_put() 193 kfree(md); mmc_blk_put() 202 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); power_ro_lock_show() local 203 struct mmc_card *card = md->queue.card; power_ro_lock_show() 213 mmc_blk_put(md); power_ro_lock_show() 222 struct mmc_blk_data *md, *part_md; power_ro_lock_store() local 232 md = mmc_blk_get(dev_to_disk(dev)); power_ro_lock_store() 233 card = md->queue.card; power_ro_lock_store() 242 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); power_ro_lock_store() 250 md->disk->disk_name); power_ro_lock_store() 251 set_disk_ro(md->disk, 1); power_ro_lock_store() 253 list_for_each_entry(part_md, &md->part, part) power_ro_lock_store() 260 mmc_blk_put(md); power_ro_lock_store() 268 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); force_ro_show() local 272 md->read_only); force_ro_show() 273 mmc_blk_put(md); force_ro_show() 282 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); force_ro_store() local 289 set_disk_ro(dev_to_disk(dev), set || md->read_only); force_ro_store() 292 mmc_blk_put(md); force_ro_store() 298 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); mmc_blk_open() local 302 if (md) { mmc_blk_open() 303 if (md->usage == 2) mmc_blk_open() 307 if ((mode & FMODE_WRITE) && md->read_only) { mmc_blk_open() 308 mmc_blk_put(md); mmc_blk_open() 319 struct mmc_blk_data *md = disk->private_data; mmc_blk_release() local 322 mmc_blk_put(md); mmc_blk_release() 467 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, __mmc_blk_ioctl_cmd() argument 478 if (!card || !md || !idata) __mmc_blk_ioctl_cmd() 481 if (md->area_type & MMC_BLK_DATA_AREA_RPMB) __mmc_blk_ioctl_cmd() 526 err = mmc_blk_part_switch(card, md); __mmc_blk_ioctl_cmd() 595 struct mmc_blk_data *md; mmc_blk_ioctl_cmd() local 611 md = mmc_blk_get(bdev->bd_disk); mmc_blk_ioctl_cmd() 612 if (!md) { mmc_blk_ioctl_cmd() 617 card = md->queue.card; mmc_blk_ioctl_cmd() 625 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata); mmc_blk_ioctl_cmd() 632 mmc_blk_put(md); mmc_blk_ioctl_cmd() 645 struct mmc_blk_data *md; mmc_blk_ioctl_multi_cmd() local 677 md = mmc_blk_get(bdev->bd_disk); mmc_blk_ioctl_multi_cmd() 678 if (!md) mmc_blk_ioctl_multi_cmd() 681 card = md->queue.card; mmc_blk_ioctl_multi_cmd() 690 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]); mmc_blk_ioctl_multi_cmd() 699 mmc_blk_put(md); mmc_blk_ioctl_multi_cmd() 744 struct mmc_blk_data *md) mmc_blk_part_switch() 749 if (main_md->part_curr == md->part_type) mmc_blk_part_switch() 756 part_config |= md->part_type; mmc_blk_part_switch() 767 main_md->part_curr = md->part_type; mmc_blk_part_switch() 1100 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, mmc_blk_reset() argument 1105 if (md->reset_done & type) mmc_blk_reset() 1108 md->reset_done |= type; mmc_blk_reset() 1117 part_err = mmc_blk_part_switch(host->card, md); mmc_blk_reset() 1129 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) mmc_blk_reset_success() argument 1131 md->reset_done &= ~type; mmc_blk_reset_success() 1136 struct mmc_blk_data *md = mq->data; mmc_access_rpmb() local 1140 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) mmc_access_rpmb() 1148 struct mmc_blk_data *md = mq->data; mmc_blk_issue_discard_rq() local 1149 struct mmc_card *card = md->queue.card; mmc_blk_issue_discard_rq() 1180 if (err == -EIO && !mmc_blk_reset(md, card->host, type)) mmc_blk_issue_discard_rq() 1183 mmc_blk_reset_success(md, type); mmc_blk_issue_discard_rq() 1192 struct mmc_blk_data *md = mq->data; mmc_blk_issue_secdiscard_rq() local 1193 struct mmc_card *card = md->queue.card; mmc_blk_issue_secdiscard_rq() 1246 if (err && !mmc_blk_reset(md, card->host, type)) mmc_blk_issue_secdiscard_rq() 1249 mmc_blk_reset_success(md, type); mmc_blk_issue_secdiscard_rq() 1258 struct mmc_blk_data *md = mq->data; mmc_blk_issue_flush() local 1259 struct mmc_card *card = md->queue.card; mmc_blk_issue_flush() 1472 struct mmc_blk_data *md = mq->data; mmc_blk_rw_rq_prep() local 1481 (md->flags & MMC_BLK_REL_WR); mmc_blk_rw_rq_prep() 1583 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && mmc_blk_rw_rq_prep() 1645 struct mmc_blk_data *md = mq->data; mmc_blk_prep_packed_list() local 1654 if (!(md->flags & MMC_BLK_PACKED_CMD)) mmc_blk_prep_packed_list() 1665 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) mmc_blk_prep_packed_list() 1714 (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) mmc_blk_prep_packed_list() 1755 struct mmc_blk_data *md = mq->data; mmc_blk_packed_hdr_wrq_prep() local 1778 do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); mmc_blk_packed_hdr_wrq_prep() 1832 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, mmc_blk_cmd_err() argument 1935 struct mmc_blk_data *md = mq->data; mmc_blk_issue_rw_rq() local 1936 struct mmc_card *card = md->queue.card; mmc_blk_issue_rw_rq() 1993 mmc_blk_reset_success(md, type); mmc_blk_issue_rw_rq() 2017 ret = mmc_blk_cmd_err(md, card, brq, req, ret); mmc_blk_issue_rw_rq() 2018 if (mmc_blk_reset(md, card->host, type)) mmc_blk_issue_rw_rq() 2029 if (!mmc_blk_reset(md, card->host, type)) mmc_blk_issue_rw_rq() 2035 err = mmc_blk_reset(md, card->host, type); mmc_blk_issue_rw_rq() 2128 struct mmc_blk_data *md = mq->data; mmc_blk_issue_rq() local 2129 struct mmc_card *card = md->queue.card; mmc_blk_issue_rq() 2138 ret = mmc_blk_part_switch(card, md); mmc_blk_issue_rq() 2196 struct mmc_blk_data *md; mmc_blk_alloc_req() local 2204 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); mmc_blk_alloc_req() 2205 if (!md) { mmc_blk_alloc_req() 2217 md->name_idx = find_first_zero_bit(name_use, max_devices); mmc_blk_alloc_req() 2218 __set_bit(md->name_idx, name_use); mmc_blk_alloc_req() 2220 md->name_idx = ((struct mmc_blk_data *) mmc_blk_alloc_req() 2223 md->area_type = area_type; mmc_blk_alloc_req() 2229 md->read_only = mmc_blk_readonly(card); mmc_blk_alloc_req() 2231 md->disk = alloc_disk(perdev_minors); mmc_blk_alloc_req() 2232 if (md->disk == NULL) { mmc_blk_alloc_req() 2237 spin_lock_init(&md->lock); mmc_blk_alloc_req() 2238 INIT_LIST_HEAD(&md->part); mmc_blk_alloc_req() 2239 md->usage = 1; mmc_blk_alloc_req() 2241 ret = mmc_init_queue(&md->queue, card, &md->lock, subname); mmc_blk_alloc_req() 2245 md->queue.issue_fn = mmc_blk_issue_rq; mmc_blk_alloc_req() 2246 md->queue.data = md; mmc_blk_alloc_req() 2248 md->disk->major = MMC_BLOCK_MAJOR; mmc_blk_alloc_req() 2249 md->disk->first_minor = devidx * perdev_minors; mmc_blk_alloc_req() 2250 md->disk->fops = &mmc_bdops; mmc_blk_alloc_req() 2251 md->disk->private_data = md; mmc_blk_alloc_req() 2252 md->disk->queue = md->queue.queue; mmc_blk_alloc_req() 2253 md->disk->driverfs_dev = parent; mmc_blk_alloc_req() 2254 set_disk_ro(md->disk, md->read_only || default_ro); mmc_blk_alloc_req() 2256 md->disk->flags |= GENHD_FL_NO_PART_SCAN; mmc_blk_alloc_req() 2270 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), mmc_blk_alloc_req() 2271 "mmcblk%u%s", md->name_idx, subname ? subname : ""); mmc_blk_alloc_req() 2274 blk_queue_logical_block_size(md->queue.queue, mmc_blk_alloc_req() 2277 blk_queue_logical_block_size(md->queue.queue, 512); mmc_blk_alloc_req() 2279 set_capacity(md->disk, size); mmc_blk_alloc_req() 2285 md->flags |= MMC_BLK_CMD23; mmc_blk_alloc_req() 2289 md->flags & MMC_BLK_CMD23 && mmc_blk_alloc_req() 2292 md->flags |= MMC_BLK_REL_WR; mmc_blk_alloc_req() 2293 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); mmc_blk_alloc_req() 2298 (md->flags & MMC_BLK_CMD23) && mmc_blk_alloc_req() 2300 if (!mmc_packed_init(&md->queue, card)) mmc_blk_alloc_req() 2301 md->flags |= MMC_BLK_PACKED_CMD; mmc_blk_alloc_req() 2304 return md; mmc_blk_alloc_req() 2307 put_disk(md->disk); mmc_blk_alloc_req() 2309 kfree(md); mmc_blk_alloc_req() 2338 struct mmc_blk_data *md, mmc_blk_alloc_part() 2348 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, mmc_blk_alloc_part() 2353 list_add(&part_md->part, &md->part); mmc_blk_alloc_part() 2369 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) mmc_blk_alloc_parts() argument 2378 ret = mmc_blk_alloc_part(card, md, mmc_blk_alloc_parts() 2392 static void mmc_blk_remove_req(struct mmc_blk_data *md) mmc_blk_remove_req() argument 2396 if (md) { mmc_blk_remove_req() 2402 card = md->queue.card; mmc_blk_remove_req() 2403 mmc_cleanup_queue(&md->queue); mmc_blk_remove_req() 2404 if (md->flags & MMC_BLK_PACKED_CMD) mmc_blk_remove_req() 2405 mmc_packed_clean(&md->queue); mmc_blk_remove_req() 2406 if (md->disk->flags & GENHD_FL_UP) { mmc_blk_remove_req() 2407 device_remove_file(disk_to_dev(md->disk), &md->force_ro); mmc_blk_remove_req() 2408 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && mmc_blk_remove_req() 2410 device_remove_file(disk_to_dev(md->disk), mmc_blk_remove_req() 2411 &md->power_ro_lock); mmc_blk_remove_req() 2413 del_gendisk(md->disk); mmc_blk_remove_req() 2415 mmc_blk_put(md); mmc_blk_remove_req() 2420 struct mmc_blk_data *md) mmc_blk_remove_parts() 2425 __clear_bit(md->name_idx, name_use); mmc_blk_remove_parts() 2426 list_for_each_safe(pos, q, &md->part) { mmc_blk_remove_parts() 2433 static int mmc_add_disk(struct mmc_blk_data *md) mmc_add_disk() argument 2436 struct mmc_card *card = md->queue.card; mmc_add_disk() 2438 add_disk(md->disk); mmc_add_disk() 2439 md->force_ro.show = force_ro_show; mmc_add_disk() 2440 md->force_ro.store = force_ro_store; mmc_add_disk() 2441 sysfs_attr_init(&md->force_ro.attr); mmc_add_disk() 2442 md->force_ro.attr.name = "force_ro"; mmc_add_disk() 2443 md->force_ro.attr.mode = S_IRUGO | S_IWUSR; mmc_add_disk() 2444 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); mmc_add_disk() 2448 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && mmc_add_disk() 2457 md->power_ro_lock.show = power_ro_lock_show; mmc_add_disk() 2458 md->power_ro_lock.store = power_ro_lock_store; mmc_add_disk() 2459 sysfs_attr_init(&md->power_ro_lock.attr); mmc_add_disk() 2460 md->power_ro_lock.attr.mode = mode; mmc_add_disk() 2461 md->power_ro_lock.attr.name = mmc_add_disk() 2463 ret = device_create_file(disk_to_dev(md->disk), mmc_add_disk() 2464 &md->power_ro_lock); mmc_add_disk() 2471 device_remove_file(disk_to_dev(md->disk), &md->force_ro); mmc_add_disk() 2473 del_gendisk(md->disk); mmc_add_disk() 2560 struct mmc_blk_data *md, *part_md; mmc_blk_probe() local 2571 md = mmc_blk_alloc(card); mmc_blk_probe() 2572 if (IS_ERR(md)) mmc_blk_probe() 2573 return PTR_ERR(md); mmc_blk_probe() 2575 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, mmc_blk_probe() 2578 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), mmc_blk_probe() 2579 cap_str, md->read_only ? "(ro)" : ""); mmc_blk_probe() 2581 if (mmc_blk_alloc_parts(card, md)) mmc_blk_probe() 2584 dev_set_drvdata(&card->dev, md); mmc_blk_probe() 2586 if (mmc_add_disk(md)) mmc_blk_probe() 2589 list_for_each_entry(part_md, &md->part, part) { mmc_blk_probe() 2609 mmc_blk_remove_parts(card, md); mmc_blk_probe() 2610 mmc_blk_remove_req(md); mmc_blk_probe() 2616 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); mmc_blk_remove() local 2618 mmc_blk_remove_parts(card, md); mmc_blk_remove() 2621 mmc_blk_part_switch(card, md); mmc_blk_remove() 2626 mmc_blk_remove_req(md); mmc_blk_remove() 2633 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); _mmc_blk_suspend() local 2635 if (md) { _mmc_blk_suspend() 2636 mmc_queue_suspend(&md->queue); _mmc_blk_suspend() 2637 list_for_each_entry(part_md, &md->part, part) { _mmc_blk_suspend() 2660 struct mmc_blk_data *md = dev_get_drvdata(dev); mmc_blk_resume() local 2662 if (md) { mmc_blk_resume() 2667 md->part_curr = md->part_type; mmc_blk_resume() 2668 mmc_queue_resume(&md->queue); mmc_blk_resume() 2669 list_for_each_entry(part_md, &md->part, part) { mmc_blk_resume() 743 mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md) mmc_blk_part_switch() argument 2337 mmc_blk_alloc_part(struct mmc_card *card, struct mmc_blk_data *md, unsigned int part_type, sector_t size, bool default_ro, const char *subname, int area_type) mmc_blk_alloc_part() argument 2419 mmc_blk_remove_parts(struct mmc_card *card, struct mmc_blk_data *md) mmc_blk_remove_parts() argument
|
/linux-4.4.14/drivers/video/fbdev/matrox/ |
H A D | matroxfb_maven.c | 134 static int* get_ctrl_ptr(struct maven_data* md, int idx) { get_ctrl_ptr() argument 135 return (int*)((char*)(md->primary_head) + maven_controls[idx].control); get_ctrl_ptr() 339 static unsigned char maven_compute_deflicker (const struct maven_data* md) { maven_compute_deflicker() argument 342 df = (md->version == MGATVO_B?0x40:0x00); maven_compute_deflicker() 343 switch (md->primary_head->altout.tvo_params.deflicker) { maven_compute_deflicker() 357 static void maven_compute_bwlevel (const struct maven_data* md, maven_compute_bwlevel() argument 359 const int b = md->primary_head->altout.tvo_params.brightness + BLMIN; maven_compute_bwlevel() 360 const int c = md->primary_head->altout.tvo_params.contrast; maven_compute_bwlevel() 366 static const struct maven_gamma* maven_compute_gamma (const struct maven_data* md) { maven_compute_gamma() argument 367 return maven_gamma + md->primary_head->altout.tvo_params.gamma; maven_compute_gamma() 371 static void maven_init_TVdata(const struct maven_data* md, struct mavenregs* data) { maven_init_TVdata() argument 476 struct matrox_fb_info *minfo = md->primary_head; maven_init_TVdata() 484 data->regs[0x93] = maven_compute_deflicker(md); maven_init_TVdata() 489 g = maven_compute_gamma(md); maven_init_TVdata() 504 maven_compute_bwlevel (md, &bl, &wl); maven_init_TVdata() 754 static inline int maven_compute_timming(struct maven_data* md, maven_compute_timming() argument 759 struct matrox_fb_info *minfo = md->primary_head; maven_compute_timming() 769 maven_init_TVdata(md, m); maven_compute_timming() 806 if (md->version == MGATVO_B) { maven_compute_timming() 988 static int maven_program_timming(struct maven_data* md, maven_program_timming() argument 990 struct i2c_client *c = md->client; maven_program_timming() 1026 static inline int maven_resync(struct maven_data* md) { maven_resync() argument 1027 struct i2c_client *c = md->client; maven_resync() 1032 static int maven_get_queryctrl (struct maven_data* md, maven_get_queryctrl() argument 1054 static int maven_set_control (struct maven_data* md, maven_set_control() argument 1064 if (p->value == *get_ctrl_ptr(md, i)) return 0; maven_set_control() 1075 *get_ctrl_ptr(md, i) = p->value; maven_set_control() 1082 maven_compute_bwlevel(md, &blacklevel, &whitelevel); maven_set_control() 1085 maven_set_reg_pair(md->client, 0x0e, blacklevel); maven_set_control() 1086 maven_set_reg_pair(md->client, 0x1e, whitelevel); maven_set_control() 1091 maven_set_reg(md->client, 0x20, p->value); maven_set_control() 1092 maven_set_reg(md->client, 0x22, p->value); maven_set_control() 1097 maven_set_reg(md->client, 0x25, p->value); maven_set_control() 1103 g = maven_compute_gamma(md); maven_set_control() 1104 maven_set_reg(md->client, 0x83, g->reg83); maven_set_control() 1105 maven_set_reg(md->client, 0x84, g->reg84); maven_set_control() 1106 maven_set_reg(md->client, 0x85, g->reg85); maven_set_control() 1107 maven_set_reg(md->client, 0x86, g->reg86); maven_set_control() 1108 maven_set_reg(md->client, 0x87, g->reg87); maven_set_control() 1109 maven_set_reg(md->client, 0x88, g->reg88); maven_set_control() 1110 maven_set_reg(md->client, 0x89, g->reg89); maven_set_control() 1111 maven_set_reg(md->client, 0x8a, g->reg8a); maven_set_control() 1112 maven_set_reg(md->client, 0x8b, g->reg8b); maven_set_control() 1118 = maven_get_reg(md->client, 0x8d); maven_set_control() 1121 maven_set_reg(md->client, 0x8d, val); maven_set_control() 1126 maven_set_reg(md->client, 0x93, maven_compute_deflicker(md)); maven_set_control() 1135 static int maven_get_control (struct maven_data* md, maven_get_control() argument 1141 p->value = *get_ctrl_ptr(md, i); maven_get_control() 1147 static int maven_out_compute(void* md, struct my_timming* mt) { maven_out_compute() argument 1148 #define mdinfo ((struct maven_data*)md) maven_out_compute() 1150 return maven_compute_timming(md, mt, &minfo->hw.maven); maven_out_compute() 1155 static int maven_out_program(void* md) { maven_out_program() argument 1156 #define mdinfo ((struct maven_data*)md) maven_out_program() 1158 return maven_program_timming(md, &minfo->hw.maven); maven_out_program() 1163 static int maven_out_start(void* md) { maven_out_start() argument 1164 return maven_resync(md); maven_out_start() 1167 static int maven_out_verify_mode(void* md, u_int32_t arg) { maven_out_verify_mode() argument 1177 static int maven_out_get_queryctrl(void* md, struct v4l2_queryctrl* p) { maven_out_get_queryctrl() argument 1178 return maven_get_queryctrl(md, p); maven_out_get_queryctrl() 1181 static int maven_out_get_ctrl(void* md, struct v4l2_control* p) { maven_out_get_ctrl() argument 1182 return maven_get_control(md, p); maven_out_get_ctrl() 1185 static int maven_out_set_ctrl(void* md, struct v4l2_control* p) { maven_out_set_ctrl() argument 1186 return maven_set_control(md, p); maven_out_set_ctrl() 1201 struct maven_data* md = i2c_get_clientdata(clnt); maven_init_client() local 1206 md->primary_head = minfo; maven_init_client() 1207 md->client = clnt; maven_init_client() 1211 minfo->outputs[1].data = md; maven_init_client() 1215 md->version = MGATVO_B; maven_init_client() 1218 md->version = MGATVO_C; maven_init_client() 1227 *get_ctrl_ptr(md, i) = maven_controls[i].desc.default_value; maven_init_client() 1235 struct maven_data* md = i2c_get_clientdata(clnt); maven_shutdown_client() local 1237 if (md->primary_head) { maven_shutdown_client() 1238 struct matrox_fb_info *minfo = md->primary_head; maven_shutdown_client() 1246 md->primary_head = NULL; maven_shutdown_client()
|
H A D | matroxfb_g450.c | 141 static int g450_query_ctrl(void* md, struct v4l2_queryctrl *p) { g450_query_ctrl() argument 162 static int g450_set_ctrl(void* md, struct v4l2_control *p) { g450_set_ctrl() argument 164 struct matrox_fb_info *minfo = md; g450_set_ctrl() 216 static int g450_get_ctrl(void* md, struct v4l2_control *p) { g450_get_ctrl() argument 218 struct matrox_fb_info *minfo = md; g450_get_ctrl() 520 static int matroxfb_g450_compute(void* md, struct my_timming* mt) { matroxfb_g450_compute() argument 521 struct matrox_fb_info *minfo = md; matroxfb_g450_compute() 558 static int matroxfb_g450_program(void* md) { matroxfb_g450_program() argument 559 struct matrox_fb_info *minfo = md; matroxfb_g450_program() 567 static int matroxfb_g450_verify_mode(void* md, u_int32_t arg) { matroxfb_g450_verify_mode() argument 577 static int g450_dvi_compute(void* md, struct my_timming* mt) { g450_dvi_compute() argument 578 struct matrox_fb_info *minfo = md; g450_dvi_compute()
|
/linux-4.4.14/arch/x86/platform/efi/ |
H A D | efi.c | 126 efi_memory_desc_t *md = p; efi_find_mirror() local 127 unsigned long long start = md->phys_addr; efi_find_mirror() 128 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; efi_find_mirror() 131 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) { efi_find_mirror() 152 efi_memory_desc_t *md = p; do_add_efi_memmap() local 153 unsigned long long start = md->phys_addr; do_add_efi_memmap() 154 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; do_add_efi_memmap() 157 switch (md->type) { do_add_efi_memmap() 163 if (md->attribute & EFI_MEMORY_WB) do_add_efi_memmap() 228 efi_memory_desc_t *md; efi_print_memmap() local 237 md = p; efi_print_memmap() 239 i, efi_md_typeattr_format(buf, sizeof(buf), md), efi_print_memmap() 240 md->phys_addr, efi_print_memmap() 241 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), efi_print_memmap() 242 (md->num_pages >> (20 - EFI_PAGE_SHIFT))); efi_print_memmap() 537 void __init efi_set_executable(efi_memory_desc_t *md, bool executable) efi_set_executable() argument 541 addr = md->virt_addr; efi_set_executable() 542 npages = md->num_pages; efi_set_executable() 554 efi_memory_desc_t *md; runtime_code_page_mkexec() local 559 md = p; runtime_code_page_mkexec() 561 if (md->type != EFI_RUNTIME_SERVICES_CODE) runtime_code_page_mkexec() 564 efi_set_executable(md, true); runtime_code_page_mkexec() 578 void __init old_map_region(efi_memory_desc_t *md) old_map_region() argument 584 start_pfn = PFN_DOWN(md->phys_addr); old_map_region() 585 size = md->num_pages << PAGE_SHIFT; old_map_region() 586 end = md->phys_addr + size; old_map_region() 590 va = __va(md->phys_addr); old_map_region() 592 if (!(md->attribute & EFI_MEMORY_WB)) old_map_region() 595 va = efi_ioremap(md->phys_addr, size, old_map_region() 596 md->type, md->attribute); old_map_region() 598 md->virt_addr = (u64) (unsigned long) va; old_map_region() 601 (unsigned long long)md->phys_addr); old_map_region() 608 efi_memory_desc_t *md, *prev_md = NULL; efi_merge_regions() local 612 md = p; efi_merge_regions() 615 prev_md = md; efi_merge_regions() 619 if (prev_md->type != md->type || efi_merge_regions() 620 prev_md->attribute != md->attribute) { efi_merge_regions() 621 prev_md = md; efi_merge_regions() 627 if (md->phys_addr == (prev_md->phys_addr + prev_size)) { efi_merge_regions() 628 prev_md->num_pages += md->num_pages; efi_merge_regions() 629 md->type = EFI_RESERVED_TYPE; efi_merge_regions() 630 md->attribute = 0; efi_merge_regions() 633 prev_md = md; efi_merge_regions() 637 static void __init get_systab_virt_addr(efi_memory_desc_t *md) get_systab_virt_addr() argument 642 size = md->num_pages << EFI_PAGE_SHIFT; get_systab_virt_addr() 643 end = md->phys_addr + size; get_systab_virt_addr() 645 if (md->phys_addr <= systab && systab < end) { get_systab_virt_addr() 646 systab += md->virt_addr - md->phys_addr; get_systab_virt_addr() 654 efi_memory_desc_t *md; save_runtime_map() local 662 md = p; save_runtime_map() 664 if (!(md->attribute & EFI_MEMORY_RUNTIME) || save_runtime_map() 665 (md->type == EFI_BOOT_SERVICES_CODE) || save_runtime_map() 666 (md->type == EFI_BOOT_SERVICES_DATA)) save_runtime_map() 673 memcpy(q + count * memmap.desc_size, md, memmap.desc_size); save_runtime_map() 779 efi_memory_desc_t *md; efi_map_regions() local 783 md = p; efi_map_regions() 784 if (!(md->attribute & EFI_MEMORY_RUNTIME)) { efi_map_regions() 786 if (md->type != EFI_BOOT_SERVICES_CODE && efi_map_regions() 787 md->type != EFI_BOOT_SERVICES_DATA) efi_map_regions() 792 efi_map_region(md); efi_map_regions() 793 get_systab_virt_addr(md); efi_map_regions() 804 memcpy(new_memmap + (*count * memmap.desc_size), md, efi_map_regions() 817 efi_memory_desc_t *md; kexec_enter_virtual_mode() local 837 md = p; kexec_enter_virtual_mode() 838 efi_map_region_fixed(md); /* FIXME: add error handling */ kexec_enter_virtual_mode() 839 get_systab_virt_addr(md); kexec_enter_virtual_mode() 1004 efi_memory_desc_t *md; efi_mem_type() local 1011 md = p; efi_mem_type() 1012 if ((md->phys_addr <= phys_addr) && efi_mem_type() 1013 (phys_addr < (md->phys_addr + efi_mem_type() 1014 (md->num_pages << EFI_PAGE_SHIFT)))) efi_mem_type() 1015 return md->type; efi_mem_type()
|
H A D | efi_64.c | 63 efi_memory_desc_t *md; early_code_mapping_set_exec() local 71 md = p; early_code_mapping_set_exec() 72 if (md->type == EFI_RUNTIME_SERVICES_CODE || early_code_mapping_set_exec() 73 md->type == EFI_BOOT_SERVICES_CODE) early_code_mapping_set_exec() 74 efi_set_executable(md, executable); early_code_mapping_set_exec() 204 static void __init __map_region(efi_memory_desc_t *md, u64 va) __map_region() argument 209 if (!(md->attribute & EFI_MEMORY_WB)) __map_region() 212 if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) __map_region() 214 md->phys_addr, va); __map_region() 217 void __init efi_map_region(efi_memory_desc_t *md) efi_map_region() argument 219 unsigned long size = md->num_pages << PAGE_SHIFT; efi_map_region() 220 u64 pa = md->phys_addr; efi_map_region() 223 return old_map_region(md); efi_map_region() 230 __map_region(md, md->phys_addr); efi_map_region() 238 md->virt_addr = md->phys_addr; efi_map_region() 264 __map_region(md, efi_va); efi_map_region() 265 md->virt_addr = efi_va; efi_map_region() 270 * md->virt_addr is the original virtual address which had been mapped in kexec 273 void __init efi_map_region_fixed(efi_memory_desc_t *md) efi_map_region_fixed() argument 275 __map_region(md, md->virt_addr); efi_map_region_fixed()
|
H A D | quirks.c | 146 efi_memory_desc_t *md = p; efi_reserve_boot_services() local 147 u64 start = md->phys_addr; efi_reserve_boot_services() 148 u64 size = md->num_pages << EFI_PAGE_SHIFT; efi_reserve_boot_services() 150 if (md->type != EFI_BOOT_SERVICES_CODE && efi_reserve_boot_services() 151 md->type != EFI_BOOT_SERVICES_DATA) efi_reserve_boot_services() 164 md->num_pages = 0; efi_reserve_boot_services() 177 efi_memory_desc_t *md = p; efi_free_boot_services() local 178 unsigned long long start = md->phys_addr; efi_free_boot_services() 179 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; efi_free_boot_services() 181 if (md->type != EFI_BOOT_SERVICES_CODE && efi_free_boot_services() 182 md->type != EFI_BOOT_SERVICES_DATA) efi_free_boot_services()
|
H A D | efi_32.c | 51 void __init efi_map_region(efi_memory_desc_t *md) efi_map_region() argument 53 old_map_region(md); efi_map_region() 56 void __init efi_map_region_fixed(efi_memory_desc_t *md) {} parse_efi_setup() argument
|
/linux-4.4.14/drivers/input/touchscreen/ |
H A D | cyttsp4_core.c | 719 static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md, cyttsp4_report_slot_liftoff() argument 724 if (md->num_prv_tch == 0) cyttsp4_report_slot_liftoff() 728 input_mt_slot(md->input, t); cyttsp4_report_slot_liftoff() 729 input_mt_report_slot_state(md->input, cyttsp4_report_slot_liftoff() 734 static void cyttsp4_lift_all(struct cyttsp4_mt_data *md) cyttsp4_lift_all() argument 736 if (!md->si) cyttsp4_lift_all() 739 if (md->num_prv_tch != 0) { cyttsp4_lift_all() 740 cyttsp4_report_slot_liftoff(md, cyttsp4_lift_all() 741 md->si->si_ofs.tch_abs[CY_TCH_T].max); cyttsp4_lift_all() 742 input_sync(md->input); cyttsp4_lift_all() 743 md->num_prv_tch = 0; cyttsp4_lift_all() 747 static void cyttsp4_get_touch_axis(struct cyttsp4_mt_data *md, cyttsp4_get_touch_axis() argument 754 dev_vdbg(&md->input->dev, cyttsp4_get_touch_axis() 765 dev_vdbg(&md->input->dev, cyttsp4_get_touch_axis() 772 static void cyttsp4_get_touch(struct cyttsp4_mt_data *md, cyttsp4_get_touch() argument 775 struct device *dev = &md->input->dev; cyttsp4_get_touch() 776 struct cyttsp4_sysinfo *si = md->si; cyttsp4_get_touch() 781 cyttsp4_get_touch_axis(md, &touch->abs[abs], cyttsp4_get_touch() 791 if (md->pdata->flags & CY_FLAG_FLIP) { cyttsp4_get_touch() 797 if (md->pdata->flags & CY_FLAG_INV_X) { cyttsp4_get_touch() 799 touch->abs[CY_TCH_X] = md->si->si_ofs.max_y - cyttsp4_get_touch() 802 touch->abs[CY_TCH_X] = md->si->si_ofs.max_x - cyttsp4_get_touch() 805 if (md->pdata->flags & CY_FLAG_INV_Y) { cyttsp4_get_touch() 807 touch->abs[CY_TCH_Y] = md->si->si_ofs.max_x - cyttsp4_get_touch() 810 touch->abs[CY_TCH_Y] = md->si->si_ofs.max_y - cyttsp4_get_touch() 816 md->pdata->flags & CY_FLAG_INV_X ? "true" : "false", cyttsp4_get_touch() 817 md->pdata->flags & CY_FLAG_INV_Y ? "true" : "false", cyttsp4_get_touch() 836 static void cyttsp4_get_mt_touches(struct cyttsp4_mt_data *md, int num_cur_tch) cyttsp4_get_mt_touches() argument 838 struct device *dev = &md->input->dev; cyttsp4_get_mt_touches() 839 struct cyttsp4_sysinfo *si = md->si; cyttsp4_get_mt_touches() 847 cyttsp4_get_touch(md, &tch, si->xy_data + cyttsp4_get_mt_touches() 849 if ((tch.abs[CY_TCH_T] < md->pdata->frmwrk->abs cyttsp4_get_mt_touches() 851 (tch.abs[CY_TCH_T] > md->pdata->frmwrk->abs cyttsp4_get_mt_touches() 855 md->pdata->frmwrk->abs[(CY_ABS_ID_OST * cyttsp4_get_mt_touches() 861 sig = md->pdata->frmwrk->abs cyttsp4_get_mt_touches() 864 t = tch.abs[CY_TCH_T] - md->pdata->frmwrk->abs cyttsp4_get_mt_touches() 871 input_mt_slot(md->input, t); cyttsp4_get_mt_touches() 872 input_mt_report_slot_state(md->input, MT_TOOL_FINGER, cyttsp4_get_mt_touches() 879 sig = md->pdata->frmwrk->abs[((CY_ABS_X_OST + j) * cyttsp4_get_mt_touches() 882 input_report_abs(md->input, sig, cyttsp4_get_mt_touches() 897 sig = md->pdata->frmwrk->abs cyttsp4_get_mt_touches() 901 input_report_abs(md->input, sig, cyttsp4_get_mt_touches() 928 cyttsp4_final_sync(md->input, si->si_ofs.tch_abs[CY_TCH_T].max, ids); cyttsp4_get_mt_touches() 930 md->num_prv_tch = num_cur_tch; cyttsp4_get_mt_touches() 938 struct cyttsp4_mt_data *md = &cd->md; cyttsp4_xy_worker() local 939 struct device *dev = &md->input->dev; cyttsp4_xy_worker() 940 struct cyttsp4_sysinfo *si = md->si; cyttsp4_xy_worker() 1012 cyttsp4_get_mt_touches(md, num_cur_tch); cyttsp4_xy_worker() 1014 cyttsp4_lift_all(md); cyttsp4_xy_worker() 1025 struct cyttsp4_mt_data *md = &cd->md; cyttsp4_mt_attention() local 1028 if (!md->si) cyttsp4_mt_attention() 1031 mutex_lock(&md->report_lock); cyttsp4_mt_attention() 1032 if (!md->is_suspended) { cyttsp4_mt_attention() 1039 mutex_unlock(&md->report_lock); cyttsp4_mt_attention() 1633 cyttsp4_lift_all(&cd->md); cyttsp4_startup_() 1820 struct cyttsp4_mt_data *md = &cd->md; cyttsp4_core_suspend() local 1823 md->is_suspended = true; cyttsp4_core_suspend() 1836 struct cyttsp4_mt_data *md = &cd->md; cyttsp4_core_resume() local 1839 md->is_suspended = false; cyttsp4_core_resume() 1865 struct cyttsp4_mt_data *md = input_get_drvdata(input); cyttsp4_mt_close() local 1866 mutex_lock(&md->report_lock); cyttsp4_mt_close() 1867 if (!md->is_suspended) cyttsp4_mt_close() 1869 mutex_unlock(&md->report_lock); cyttsp4_mt_close() 1876 struct cyttsp4_mt_data *md = &cd->md; cyttsp4_setup_input_device() local 1884 __set_bit(EV_ABS, md->input->evbit); cyttsp4_setup_input_device() 1885 __set_bit(EV_REL, md->input->evbit); cyttsp4_setup_input_device() 1886 __set_bit(EV_KEY, md->input->evbit); cyttsp4_setup_input_device() 1888 max_x_tmp = md->si->si_ofs.max_x; cyttsp4_setup_input_device() 1889 max_y_tmp = md->si->si_ofs.max_y; cyttsp4_setup_input_device() 1892 if (md->pdata->flags & CY_FLAG_FLIP) { cyttsp4_setup_input_device() 1899 max_p = md->si->si_ofs.max_p; cyttsp4_setup_input_device() 1902 for (i = 0; i < (md->pdata->frmwrk->size / CY_NUM_ABS_SET); i++) { cyttsp4_setup_input_device() 1903 signal = md->pdata->frmwrk->abs cyttsp4_setup_input_device() 1906 __set_bit(signal, md->input->absbit); cyttsp4_setup_input_device() 1907 min = md->pdata->frmwrk->abs cyttsp4_setup_input_device() 1909 max = md->pdata->frmwrk->abs cyttsp4_setup_input_device() 1921 input_set_abs_params(md->input, signal, min, max, cyttsp4_setup_input_device() 1922 md->pdata->frmwrk->abs cyttsp4_setup_input_device() 1924 md->pdata->frmwrk->abs cyttsp4_setup_input_device() 1929 (md->si->si_ofs.tch_rec_size < cyttsp4_setup_input_device() 1935 input_mt_init_slots(md->input, md->si->si_ofs.tch_abs[CY_TCH_T].max, cyttsp4_setup_input_device() 1937 rc = input_register_device(md->input); cyttsp4_setup_input_device() 1947 struct cyttsp4_mt_data *md = &cd->md; cyttsp4_mt_probe() local 1951 mutex_init(&md->report_lock); cyttsp4_mt_probe() 1952 md->pdata = pdata; cyttsp4_mt_probe() 1956 md->input = input_allocate_device(); cyttsp4_mt_probe() 1957 if (md->input == NULL) { cyttsp4_mt_probe() 1964 md->input->name = pdata->inp_dev_name; cyttsp4_mt_probe() 1965 scnprintf(md->phys, sizeof(md->phys)-1, "%s", dev_name(dev)); cyttsp4_mt_probe() 1966 md->input->phys = md->phys; cyttsp4_mt_probe() 1967 md->input->id.bustype = cd->bus_ops->bustype; cyttsp4_mt_probe() 1968 md->input->dev.parent = dev; cyttsp4_mt_probe() 1969 md->input->open = cyttsp4_mt_open; cyttsp4_mt_probe() 1970 md->input->close = cyttsp4_mt_close; cyttsp4_mt_probe() 1971 input_set_drvdata(md->input, md); cyttsp4_mt_probe() 1974 md->si = &cd->sysinfo; cyttsp4_mt_probe() 1975 if (!md->si) { cyttsp4_mt_probe() 1977 __func__, md->si); cyttsp4_mt_probe() 1988 input_free_device(md->input); cyttsp4_mt_probe() 1990 input_set_drvdata(md->input, NULL); cyttsp4_mt_probe() 2123 static void cyttsp4_mt_release(struct cyttsp4_mt_data *md) cyttsp4_mt_release() argument 2125 input_unregister_device(md->input); cyttsp4_mt_release() 2126 input_set_drvdata(md->input, NULL); cyttsp4_mt_release() 2133 cyttsp4_mt_release(&cd->md); cyttsp4_remove()
|
H A D | cyttsp4_core.h | 360 struct cyttsp4_mt_data md; member in struct:cyttsp4
|
/linux-4.4.14/drivers/w1/ |
H A D | w1.c | 88 struct w1_master *md = dev_to_w1_master(dev); w1_master_release() local 90 dev_dbg(dev, "%s: Releasing %s.\n", __func__, md->name); w1_master_release() 91 memset(md, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master)); w1_master_release() 92 kfree(md); w1_master_release() 227 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_show_name() local 230 mutex_lock(&md->mutex); w1_master_attribute_show_name() 231 count = sprintf(buf, "%s\n", md->name); w1_master_attribute_show_name() 232 mutex_unlock(&md->mutex); w1_master_attribute_show_name() 242 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_store_search() local 249 mutex_lock(&md->mutex); w1_master_attribute_store_search() 250 md->search_count = tmp; w1_master_attribute_store_search() 251 mutex_unlock(&md->mutex); w1_master_attribute_store_search() 254 wake_up_process(md->thread); w1_master_attribute_store_search() 263 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_show_search() local 266 mutex_lock(&md->mutex); w1_master_attribute_show_search() 267 count = sprintf(buf, "%d\n", md->search_count); w1_master_attribute_show_search() 268 mutex_unlock(&md->mutex); w1_master_attribute_show_search() 278 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_store_pullup() local 285 mutex_lock(&md->mutex); w1_master_attribute_store_pullup() 286 md->enable_pullup = tmp; w1_master_attribute_store_pullup() 287 mutex_unlock(&md->mutex); w1_master_attribute_store_pullup() 296 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_show_pullup() local 299 mutex_lock(&md->mutex); w1_master_attribute_show_pullup() 300 count = sprintf(buf, "%d\n", md->enable_pullup); w1_master_attribute_show_pullup() 301 mutex_unlock(&md->mutex); w1_master_attribute_show_pullup() 308 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_show_pointer() local 311 mutex_lock(&md->mutex); w1_master_attribute_show_pointer() 312 count = sprintf(buf, "0x%p\n", md->bus_master); w1_master_attribute_show_pointer() 313 mutex_unlock(&md->mutex); w1_master_attribute_show_pointer() 336 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_store_max_slave_count() local 341 mutex_lock(&md->mutex); w1_master_attribute_store_max_slave_count() 342 md->max_slave_count = tmp; w1_master_attribute_store_max_slave_count() 344 clear_bit(W1_WARN_MAX_COUNT, &md->flags); w1_master_attribute_store_max_slave_count() 345 mutex_unlock(&md->mutex); w1_master_attribute_store_max_slave_count() 352 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_show_max_slave_count() local 355 mutex_lock(&md->mutex); w1_master_attribute_show_max_slave_count() 356 count = sprintf(buf, "%d\n", md->max_slave_count); w1_master_attribute_show_max_slave_count() 357 mutex_unlock(&md->mutex); w1_master_attribute_show_max_slave_count() 363 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_show_attempts() local 366 mutex_lock(&md->mutex); w1_master_attribute_show_attempts() 367 count = sprintf(buf, "%lu\n", md->attempts); w1_master_attribute_show_attempts() 368 mutex_unlock(&md->mutex); w1_master_attribute_show_attempts() 374 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_show_slave_count() local 377 mutex_lock(&md->mutex); w1_master_attribute_show_slave_count() 378 count = sprintf(buf, "%d\n", md->slave_count); w1_master_attribute_show_slave_count() 379 mutex_unlock(&md->mutex); w1_master_attribute_show_slave_count() 386 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_show_slaves() local 391 mutex_lock(&md->list_mutex); w1_master_attribute_show_slaves() 393 list_for_each_safe(ent, n, &md->slist) { w1_master_attribute_show_slaves() 401 mutex_unlock(&md->list_mutex); w1_master_attribute_show_slaves() 478 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_store_add() local 486 mutex_lock(&md->mutex); w1_master_attribute_store_add() 487 sl = w1_slave_search_device(md, &rn); w1_master_attribute_store_add() 496 w1_attach_slave_device(md, &rn); w1_master_attribute_store_add() 498 mutex_unlock(&md->mutex); w1_master_attribute_store_add() 516 struct w1_master *md = dev_to_w1_master(dev); w1_master_attribute_store_remove() local 524 mutex_lock(&md->mutex); w1_master_attribute_store_remove() 525 sl = w1_slave_search_device(md, &rn); w1_master_attribute_store_remove() 536 mutex_unlock(&md->mutex); w1_master_attribute_store_remove() 597 struct w1_master *md = NULL; w1_uevent() local 603 md = container_of(dev, struct w1_master, dev); w1_uevent() 605 name = md->name; w1_uevent()
|
/linux-4.4.14/drivers/net/ethernet/tile/ |
H A D | tilegx.c | 361 struct mpipe_data *md = &mpipe_data[instance]; tile_net_provide_buffer() local 383 gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind, tile_net_provide_buffer() 411 struct mpipe_data *md = &mpipe_data[instance]; tile_net_pop_all_buffers() local 415 (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context, tile_net_pop_all_buffers() 463 struct mpipe_data *md = &mpipe_data[instance]; tile_tx_timestamp() local 468 gxio_mpipe_get_timestamp(&md->context, &ts); tile_tx_timestamp() 589 struct mpipe_data *md = &mpipe_data[instance]; tile_net_handle_packet() local 590 struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel]; tile_net_handle_packet() 658 struct mpipe_data *md; tile_net_poll() local 683 md = &mpipe_data[instance]; tile_net_poll() 686 &md->context, info->mpipe[instance].iqueue.ring); tile_net_poll() 823 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); ptp_mpipe_adjfreq() local 824 mutex_lock(&md->ptp_lock); ptp_mpipe_adjfreq() 825 if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb)) ptp_mpipe_adjfreq() 827 mutex_unlock(&md->ptp_lock); ptp_mpipe_adjfreq() 834 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); ptp_mpipe_adjtime() local 835 mutex_lock(&md->ptp_lock); ptp_mpipe_adjtime() 836 if (gxio_mpipe_adjust_timestamp(&md->context, delta)) ptp_mpipe_adjtime() 838 mutex_unlock(&md->ptp_lock); ptp_mpipe_adjtime() 846 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); ptp_mpipe_gettime() local 847 mutex_lock(&md->ptp_lock); ptp_mpipe_gettime() 848 if (gxio_mpipe_get_timestamp(&md->context, ts)) ptp_mpipe_gettime() 850 mutex_unlock(&md->ptp_lock); ptp_mpipe_gettime() 858 struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps); ptp_mpipe_settime() local 859 mutex_lock(&md->ptp_lock); ptp_mpipe_settime() 860 if (gxio_mpipe_set_timestamp(&md->context, ts)) ptp_mpipe_settime() 862 mutex_unlock(&md->ptp_lock); ptp_mpipe_settime() 887 static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md) register_ptp_clock() argument 892 gxio_mpipe_set_timestamp(&md->context, &ts); register_ptp_clock() 894 mutex_init(&md->ptp_lock); register_ptp_clock() 895 md->caps = ptp_mpipe_caps; register_ptp_clock() 896 md->ptp_clock = ptp_clock_register(&md->caps, NULL); register_ptp_clock() 897 if (IS_ERR(md->ptp_clock)) register_ptp_clock() 899 PTR_ERR(md->ptp_clock)); register_ptp_clock() 928 struct mpipe_data *md = &mpipe_data[instance]; tile_net_update() local 934 gxio_mpipe_rules_init(&rules, &md->context); tile_net_update() 937 if (md->tile_net_devs_for_channel[channel] == NULL) tile_net_update() 941 gxio_mpipe_rules_begin(&rules, md->first_bucket, tile_net_update() 942 md->num_buckets, NULL); tile_net_update() 963 (void *)(long)(md->ingress_irq), 1); for_each_online_cpu() 989 (void *)(long)(md->ingress_irq), 1); 1004 struct mpipe_data *md = &mpipe_data[instance]; create_buffer_stack() local 1006 int stack_idx = md->first_buffer_stack + kind; create_buffer_stack() 1013 md->buffer_stack_bytes[kind] = create_buffer_stack() 1016 va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL); create_buffer_stack() 1020 md->buffer_stack_bytes[kind], kind); create_buffer_stack() 1025 rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx, create_buffer_stack() 1027 md->buffer_stack_bytes[kind], 0); create_buffer_stack() 1031 free_pages_exact(va, md->buffer_stack_bytes[kind]); create_buffer_stack() 1035 md->buffer_stack_vas[kind] = va; create_buffer_stack() 1037 rc = gxio_mpipe_register_client_memory(&md->context, stack_idx, create_buffer_stack() 1068 struct mpipe_data *md = &mpipe_data[instance]; init_buffer_stacks() local 1071 rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0); init_buffer_stacks() 1078 md->first_buffer_stack = rc; init_buffer_stacks() 1108 struct mpipe_data *md = &mpipe_data[instance]; alloc_percpu_mpipe_resources() local 1138 &md->context, ring++, addr, alloc_percpu_mpipe_resources() 1159 struct mpipe_data *md = &mpipe_data[instance]; init_notif_group_and_buckets() local 1162 rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0); init_notif_group_and_buckets() 1172 md->num_buckets = 256; init_notif_group_and_buckets() 1174 md->num_buckets = 16; init_notif_group_and_buckets() 1177 rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0); init_notif_group_and_buckets() 1183 md->first_bucket = rc; init_notif_group_and_buckets() 1187 &md->context, group, ring, network_cpus_count, init_notif_group_and_buckets() 1188 md->first_bucket, md->num_buckets, init_notif_group_and_buckets() 1208 struct mpipe_data *md = &mpipe_data[instance]; tile_net_setup_interrupts() local 1210 irq = md->ingress_irq; tile_net_setup_interrupts() 1230 md->ingress_irq = irq; tile_net_setup_interrupts() 1236 gxio_mpipe_request_notif_ring_interrupt(&md->context, for_each_online_cpu() 1249 struct mpipe_data *md = &mpipe_data[instance]; tile_net_init_mpipe_fail() local 1253 if (md->buffer_stack_vas[kind] != NULL) { tile_net_init_mpipe_fail() 1255 md->first_buffer_stack + tile_net_init_mpipe_fail() 1261 gxio_mpipe_destroy(&md->context); tile_net_init_mpipe_fail() 1276 if (md->buffer_stack_vas[kind] != NULL) { 1277 free_pages_exact(md->buffer_stack_vas[kind], 1278 md->buffer_stack_bytes[kind]); 1279 md->buffer_stack_vas[kind] = NULL; 1283 md->first_buffer_stack = -1; 1284 md->first_bucket = -1; 1302 struct mpipe_data *md = &mpipe_data[instance]; tile_net_init_mpipe() local 1310 rc = gxio_mpipe_init(&md->context, instance); tile_net_init_mpipe() 1323 rc = gxio_mpipe_alloc_notif_rings(&md->context, tile_net_init_mpipe() 1352 register_ptp_clock(dev, md); 1376 struct mpipe_data *md = &mpipe_data[instance]; tile_net_init_egress() local 1379 if (md->egress_for_echannel[echannel].equeue != NULL) tile_net_init_egress() 1418 rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0); tile_net_init_egress() 1428 rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel, tile_net_init_egress() 1449 md->egress_for_echannel[echannel].equeue = equeue; tile_net_init_egress() 1450 md->egress_for_echannel[echannel].headers = headers; tile_net_init_egress() 1471 struct mpipe_data *md = &mpipe_data[instance]; tile_net_link_open() local 1472 int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0); tile_net_link_open() 1600 struct mpipe_data *md = &mpipe_data[instance]; tile_net_stop() local 1612 md->tile_net_devs_for_channel[priv->channel] = NULL; 1835 struct mpipe_data *md = &mpipe_data[instance]; tso_egress() local 1858 edesc_head.stack_idx = md->first_buffer_stack; tso_egress() 1859 edesc_body.stack_idx = md->first_buffer_stack; tso_egress() 1935 struct mpipe_data *md = &mpipe_data[instance]; tile_net_tx_tso() local 1936 struct tile_net_egress *egress = &md->egress_for_echannel[channel]; tile_net_tx_tso() 2003 struct mpipe_data *md = &mpipe_data[instance]; tile_net_tx() local 2005 &md->egress_for_echannel[priv->echannel]; tile_net_tx() 2025 edesc.stack_idx = md->first_buffer_stack; tile_net_tx() 2143 struct mpipe_data *md = &mpipe_data[instance]; tile_net_netpoll() local 2145 disable_percpu_irq(md->ingress_irq); tile_net_netpoll() 2147 enable_percpu_irq(md->ingress_irq, 0); tile_net_netpoll()
|
/linux-4.4.14/drivers/staging/lustre/include/linux/lnet/ |
H A D | lib-lnet.h | 73 static inline int lnet_md_exhausted(lnet_libmd_t *md) lnet_md_exhausted() argument 75 return (md->md_threshold == 0 || lnet_md_exhausted() 76 ((md->md_options & LNET_MD_MAX_SIZE) != 0 && lnet_md_exhausted() 77 md->md_offset + md->md_max_size > md->md_length)); lnet_md_exhausted() 80 static inline int lnet_md_unlinkable(lnet_libmd_t *md) lnet_md_unlinkable() argument 82 /* Should unlink md when its refcount is 0 and either: lnet_md_unlinkable() 83 * - md has been flagged for deletion (by auto unlink or lnet_md_unlinkable() 84 * LNetM[DE]Unlink, in the latter case md may not be exhausted). lnet_md_unlinkable() 85 * - auto unlink is on and md is exhausted. lnet_md_unlinkable() 87 if (md->md_refcount != 0) lnet_md_unlinkable() 90 if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0) lnet_md_unlinkable() 93 return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 && lnet_md_unlinkable() 94 lnet_md_exhausted(md)); lnet_md_unlinkable() 182 lnet_libmd_t *md; lnet_md_alloc() local 195 LIBCFS_ALLOC(md, size); lnet_md_alloc() 197 if (md != NULL) { lnet_md_alloc() 199 md->md_options = umd->options; lnet_md_alloc() 200 md->md_niov = niov; lnet_md_alloc() 201 INIT_LIST_HEAD(&md->md_list); lnet_md_alloc() 204 return md; lnet_md_alloc() 208 lnet_md_free(lnet_libmd_t *md) lnet_md_free() argument 212 if ((md->md_options & LNET_MD_KIOV) != 0) lnet_md_free() 213 size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]); lnet_md_free() 215 size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]); lnet_md_free() 217 LIBCFS_FREE(md, size); lnet_md_free() 288 lnet_md2handle(lnet_handle_md_t *handle, lnet_libmd_t *md) lnet_md2handle() argument 290 handle->cookie = md->md_lh.lh_cookie; lnet_md2handle() 455 void lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md, 458 void lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev); 513 void lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, 515 void lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md); 619 void lnet_md_unlink(lnet_libmd_t *md);
|
H A D | types.h | 264 * object type codes ('eq' for event queue, 'md' for memory descriptor, and 593 * been processed. In particular, the threshold field in md will 596 lnet_md_t md; member in struct:__anon10412
|
/linux-4.4.14/scripts/mod/ |
H A D | sumversion.c | 225 static inline void add_char(unsigned char c, struct md4_ctx *md) add_char() argument 227 md4_update(md, &c, 1); add_char() 231 struct md4_ctx *md) parse_string() 235 add_char(file[0], md); parse_string() 237 add_char(file[i], md); parse_string() 256 static int parse_file(const char *fname, struct md4_ctx *md) parse_file() argument 278 i += parse_string(file+i, len - i, md); parse_file() 288 add_char(file[i], md); parse_file() 305 static int parse_source_files(const char *objfile, struct md4_ctx *md) parse_source_files() argument 351 if (!parse_file(p, md)) { parse_source_files() 379 if (!parse_file(line, md)) { parse_source_files() 404 struct md4_ctx md; get_src_version() local 441 md4_init(&md); get_src_version() 446 !parse_source_files(fname, &md)) get_src_version() 450 md4_final_ascii(&md, sum, sumlen); get_src_version() 230 parse_string(const char *file, unsigned long len, struct md4_ctx *md) parse_string() argument
|
/linux-4.4.14/block/partitions/ |
H A D | mac.c | 41 struct mac_driver_desc *md; mac_partition() local 44 md = read_part_sector(state, 0, §); mac_partition() 45 if (!md) mac_partition() 47 if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) { mac_partition() 51 secsize = be16_to_cpu(md->block_size); mac_partition()
|
/linux-4.4.14/drivers/clk/qcom/ |
H A D | clk-rcg.c | 114 static u32 md_to_m(struct mn *mn, u32 md) md_to_m() argument 116 md >>= mn->m_val_shift; md_to_m() 117 md &= BIT(mn->width) - 1; md_to_m() 118 return md; md_to_m() 140 static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md) mn_to_md() argument 146 md &= ~mask; mn_to_md() 150 md |= m; mn_to_md() 151 md |= ~n & mask_w; mn_to_md() 154 return md; mn_to_md() 208 u32 ns, md, reg; configure_bank() local 241 ret = regmap_read(rcg->clkr.regmap, md_reg, &md); configure_bank() 244 md = mn_to_md(mn, f->m, f->n, md); configure_bank() 245 ret = regmap_write(rcg->clkr.regmap, md_reg, md); configure_bank() 302 u32 ns, md, reg; clk_dyn_rcg_set_parent() local 314 regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md); clk_dyn_rcg_set_parent() 315 f.m = md_to_m(&rcg->mn[bank], md); clk_dyn_rcg_set_parent() 353 u32 pre_div, m = 0, n = 0, ns, md, mode = 0; clk_rcg_recalc_rate() local 360 regmap_read(rcg->clkr.regmap, rcg->md_reg, &md); clk_rcg_recalc_rate() 361 m = md_to_m(mn, md); clk_rcg_recalc_rate() 378 u32 m, n, pre_div, ns, md, mode, reg; clk_dyn_rcg_recalc_rate() local 392 regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md); clk_dyn_rcg_recalc_rate() 393 m = md_to_m(mn, md); clk_dyn_rcg_recalc_rate() 484 u32 ns, md, ctl; __clk_rcg_set_rate() local 498 regmap_read(rcg->clkr.regmap, rcg->md_reg, &md); __clk_rcg_set_rate() 499 md = mn_to_md(mn, f->m, f->n, md); __clk_rcg_set_rate() 500 regmap_write(rcg->clkr.regmap, rcg->md_reg, md); __clk_rcg_set_rate()
|
/linux-4.4.14/arch/unicore32/mm/ |
H A D | mmu.c | 204 * page tables for the mapping specified by `md'. We 208 static void __init create_mapping(struct map_desc *md) create_mapping() argument 214 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { create_mapping() 217 __pfn_to_phys((u64)md->pfn), md->virtual); create_mapping() 221 if ((md->type == MT_DEVICE || md->type == MT_ROM) && create_mapping() 222 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { create_mapping() 225 __pfn_to_phys((u64)md->pfn), md->virtual); create_mapping() 228 type = &mem_types[md->type]; create_mapping() 230 addr = md->virtual & PAGE_MASK; create_mapping() 231 phys = (unsigned long)__pfn_to_phys(md->pfn); create_mapping() 232 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); create_mapping() 237 __pfn_to_phys(md->pfn), addr); create_mapping()
|
/linux-4.4.14/drivers/mtd/nand/ |
H A D | nand_bbt.c | 383 * @md: descriptor for the bad block table mirror 389 struct nand_bbt_descr *td, struct nand_bbt_descr *md) read_abs_bbts() 403 if (md && (md->options & NAND_BBT_VERSION)) { read_abs_bbts() 404 scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift, read_abs_bbts() 405 mtd->writesize, md); read_abs_bbts() 406 md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; read_abs_bbts() 408 md->pages[0], md->version[0]); read_abs_bbts() 591 * @md: descriptor for the bad block table mirror 597 struct nand_bbt_descr *md) search_read_bbts() 603 if (md) search_read_bbts() 604 search_bbt(mtd, buf, md); search_read_bbts() 612 * @md: descriptor for the bad block table mirror 618 struct nand_bbt_descr *td, struct nand_bbt_descr *md, write_bbt() 689 if (!md || md->pages[chip] != page) write_bbt() 843 struct nand_bbt_descr *md = this->bbt_md; check_create() local 861 if (md) { check_create() 862 if (td->pages[i] == -1 && md->pages[i] == -1) { check_create() 866 rd = md; check_create() 868 } else if (md->pages[i] == -1) { check_create() 871 } else if (td->version[i] == md->version[i]) { check_create() 874 rd2 = md; check_create() 875 } else if (((int8_t)(td->version[i] - md->version[i])) > 0) { check_create() 879 rd = md; check_create() 901 if (md) check_create() 902 md->version[i] = 1; check_create() 933 if (md) { check_create() 934 td->version[i] = max(td->version[i], md->version[i]); check_create() 935 md->version[i] = td->version[i]; check_create() 940 res = write_bbt(mtd, buf, td, md, chipsel); check_create() 946 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { check_create() 947 res = write_bbt(mtd, buf, md, td, chipsel); check_create() 1081 struct nand_bbt_descr *md = this->bbt_md; nand_scan_bbt() local 1104 verify_bbt_descr(mtd, md); nand_scan_bbt() 1117 read_abs_bbts(mtd, buf, td, md); nand_scan_bbt() 1120 search_read_bbts(mtd, buf, td, md); nand_scan_bbt() 1129 if (md) nand_scan_bbt() 1130 mark_bbt_region(mtd, md); nand_scan_bbt() 1155 struct nand_bbt_descr *md = this->bbt_md; nand_update_bbt() local 1177 if (md) nand_update_bbt() 1178 md->version[chip]++; nand_update_bbt() 1182 res = write_bbt(mtd, buf, td, md, chipsel); nand_update_bbt() 1187 if (md && (md->options & NAND_BBT_WRITE)) { nand_update_bbt() 1188 res = write_bbt(mtd, buf, md, td, chipsel); nand_update_bbt() 388 read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) read_abs_bbts() argument 595 search_read_bbts(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) search_read_bbts() argument 617 write_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md, int chipsel) write_bbt() argument
|
/linux-4.4.14/include/linux/ |
H A D | device-mapper.h | 369 int dm_create(int minor, struct mapped_device **md); 372 * Reference counting for md. 375 void dm_get(struct mapped_device *md); 376 int dm_hold(struct mapped_device *md); 377 void dm_put(struct mapped_device *md); 382 void dm_set_mdptr(struct mapped_device *md, void *ptr); 383 void *dm_get_mdptr(struct mapped_device *md); 388 int dm_suspend(struct mapped_device *md, unsigned suspend_flags); 389 int dm_resume(struct mapped_device *md); 394 uint32_t dm_get_event_nr(struct mapped_device *md); 395 int dm_wait_event(struct mapped_device *md, int event_nr); 396 uint32_t dm_next_uevent_seq(struct mapped_device *md); 397 void dm_uevent_add(struct mapped_device *md, struct list_head *elist); 402 const char *dm_device_name(struct mapped_device *md); 403 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 404 struct gendisk *dm_disk(struct mapped_device *md); 410 struct queue_limits *dm_get_queue_limits(struct mapped_device *md); 415 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); 416 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); 426 unsigned num_targets, struct mapped_device *md); 452 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); 453 void dm_put_live_table(struct mapped_device *md, int srcu_idx); 454 void dm_sync_table(struct mapped_device *md); 478 struct dm_table *dm_swap_table(struct mapped_device *md,
|
H A D | efi.h | 912 extern u64 efi_mem_desc_end(efi_memory_desc_t *md); efi_esrt_init() 932 #define for_each_efi_memory_desc(m, md) \ efi_fake_memmap() 933 for ((md) = (m)->map; \ efi_fake_memmap() 934 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ efi_fake_memmap() 935 (md) = (void *)(md) + (m)->desc_size) efi_fake_memmap() 942 const efi_memory_desc_t *md); efi_fake_memmap()
|
H A D | backing-dev-defs.h | 140 congested_fn *congested_fn; /* Function pointer if device is md/dm */
|
/linux-4.4.14/drivers/media/pci/cx23885/ |
H A D | cx23885-av.h | 6 * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23885-video.h | 4 * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23888-ir.h | 6 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23885-input.h | 6 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23885-ioctl.h | 6 * Copyright (c) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23885-ir.h | 6 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23885-av.c | 6 * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23885-ioctl.c | 6 * Copyright (c) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23885-ir.c | 6 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23885-input.c | 8 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx23888-ir.c | 6 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
/linux-4.4.14/drivers/clk/shmobile/ |
H A D | clk-r8a7779.c | 55 #define CPG_CLK_CONFIG_INDEX(md) (((md) & (BIT(2)|BIT(1))) >> 1) 83 #define CPG_PLLA_MULT_INDEX(md) (((md) & (BIT(12)|BIT(11))) >> 11)
|
H A D | clk-rcar-gen2.c | 267 #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \ 268 (((md) & BIT(13)) >> 12) | \ 269 (((md) & BIT(19)) >> 19))
|
/linux-4.4.14/fs/proc/ |
H A D | task_mmu.c | 1385 struct numa_maps md; member in struct:numa_maps_private 1388 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, gather_stats() argument 1393 md->pages += nr_pages; gather_stats() 1395 md->dirty += nr_pages; gather_stats() 1398 md->swapcache += nr_pages; gather_stats() 1401 md->active += nr_pages; gather_stats() 1404 md->writeback += nr_pages; gather_stats() 1407 md->anon += nr_pages; gather_stats() 1409 if (count > md->mapcount_max) gather_stats() 1410 md->mapcount_max = count; gather_stats() 1412 md->node[page_to_nid(page)] += nr_pages; gather_stats() 1467 struct numa_maps *md = walk->private; gather_pte_stats() local 1479 gather_stats(page, md, pmd_dirty(*pmd), gather_pte_stats() 1493 gather_stats(page, md, pte_dirty(*pte), 1); gather_pte_stats() 1504 struct numa_maps *md; gather_hugetlb_stats() local 1514 md = walk->private; gather_hugetlb_stats() 1515 gather_stats(page, md, pte_dirty(huge_pte), 1); gather_hugetlb_stats() 1535 struct numa_maps *md = &numa_priv->md; show_numa_map() local 1541 .private = md, show_numa_map() 1552 memset(md, 0, sizeof(*md)); show_numa_map() 1590 if (!md->pages) show_numa_map() 1593 if (md->anon) show_numa_map() 1594 seq_printf(m, " anon=%lu", md->anon); show_numa_map() 1596 if (md->dirty) show_numa_map() 1597 seq_printf(m, " dirty=%lu", md->dirty); show_numa_map() 1599 if (md->pages != md->anon && md->pages != md->dirty) show_numa_map() 1600 seq_printf(m, " mapped=%lu", md->pages); show_numa_map() 1602 if (md->mapcount_max > 1) show_numa_map() 1603 seq_printf(m, " mapmax=%lu", md->mapcount_max); show_numa_map() 1605 if (md->swapcache) show_numa_map() 1606 seq_printf(m, " swapcache=%lu", md->swapcache); show_numa_map() 1608 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) show_numa_map() 1609 seq_printf(m, " active=%lu", md->active); show_numa_map() 1611 if (md->writeback) show_numa_map() 1612 seq_printf(m, " writeback=%lu", md->writeback); show_numa_map() 1615 if (md->node[nid]) show_numa_map() 1616 seq_printf(m, " N%d=%lu", nid, md->node[nid]); show_numa_map()
|
/linux-4.4.14/arch/arm/mm/ |
H A D | mmu.c | 821 static void __init create_36bit_mapping(struct map_desc *md, create_36bit_mapping() argument 828 addr = md->virtual; create_36bit_mapping() 829 phys = __pfn_to_phys(md->pfn); create_36bit_mapping() 830 length = PAGE_ALIGN(md->length); create_36bit_mapping() 834 (long long)__pfn_to_phys((u64)md->pfn), addr); create_36bit_mapping() 846 (long long)__pfn_to_phys((u64)md->pfn), addr); create_36bit_mapping() 850 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { create_36bit_mapping() 852 (long long)__pfn_to_phys((u64)md->pfn), addr); create_36bit_mapping() 860 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); create_36bit_mapping() 881 * page tables for the mapping specified by `md'. We 886 static void __init create_mapping(struct map_desc *md) create_mapping() argument 893 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { create_mapping() 895 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); create_mapping() 899 if ((md->type == MT_DEVICE || md->type == MT_ROM) && create_mapping() 900 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && create_mapping() 901 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { create_mapping() 903 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); create_mapping() 906 type = &mem_types[md->type]; create_mapping() 912 if (md->pfn >= 0x100000) { create_mapping() 913 create_36bit_mapping(md, type); create_mapping() 918 addr = md->virtual & PAGE_MASK; create_mapping() 919 phys = __pfn_to_phys(md->pfn); create_mapping() 920 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); create_mapping() 924 (long long)__pfn_to_phys(md->pfn), addr); create_mapping() 945 struct map_desc *md; iotable_init() local 954 for (md = io_desc; nr; md++, nr--) { iotable_init() 955 create_mapping(md); iotable_init() 958 vm->addr = (void *)(md->virtual & PAGE_MASK); iotable_init() 959 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); iotable_init() 960 vm->phys_addr = __pfn_to_phys(md->pfn); iotable_init() 962 vm->flags |= VM_ARM_MTYPE(md->type); iotable_init()
|
/linux-4.4.14/arch/sparc/mm/ |
H A D | init_64.c | 919 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, scan_pio_for_cfg_handle() argument 924 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { mdesc_for_each_arc() 925 u64 target = mdesc_arc_target(md, arc); mdesc_for_each_arc() 928 val = mdesc_get_property(md, target, mdesc_for_each_arc() 936 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, scan_arcs_for_cfg_handle() argument 942 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { mdesc_for_each_arc() 943 u64 target = mdesc_arc_target(md, arc); mdesc_for_each_arc() 944 const char *name = mdesc_node_name(md, target); mdesc_for_each_arc() 950 val = mdesc_get_property(md, target, "latency", NULL); mdesc_for_each_arc() 963 return scan_pio_for_cfg_handle(md, candidate, cfg_handle); 969 struct mdesc_handle *md; of_node_to_nid() local 987 md = mdesc_grab(); of_node_to_nid() 991 mdesc_for_each_node_by_name(md, grp, "group") { of_node_to_nid() 992 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { of_node_to_nid() 999 mdesc_release(md); of_node_to_nid() 1031 static int __init grab_mlgroups(struct mdesc_handle *md) grab_mlgroups() argument 1037 mdesc_for_each_node_by_name(md, node, "memory-latency-group") grab_mlgroups() 1051 mdesc_for_each_node_by_name(md, node, "memory-latency-group") { grab_mlgroups() 1057 val = mdesc_get_property(md, node, "latency", NULL); grab_mlgroups() 1059 val = mdesc_get_property(md, node, "address-match", NULL); grab_mlgroups() 1061 val = mdesc_get_property(md, node, "address-mask", NULL); grab_mlgroups() 1072 static int __init grab_mblocks(struct mdesc_handle *md) grab_mblocks() argument 1078 mdesc_for_each_node_by_name(md, node, "mblock") grab_mblocks() 1092 mdesc_for_each_node_by_name(md, node, "mblock") { grab_mblocks() 1096 val = mdesc_get_property(md, node, "base", NULL); grab_mblocks() 1098 val = mdesc_get_property(md, node, "size", NULL); grab_mblocks() 1100 val = mdesc_get_property(md, node, grab_mblocks() 1118 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, numa_parse_mdesc_group_cpus() argument 1125 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { mdesc_for_each_arc() 1126 u64 target = mdesc_arc_target(md, arc); mdesc_for_each_arc() 1127 const char *name = mdesc_node_name(md, target); mdesc_for_each_arc() 1132 id = mdesc_get_property(md, target, "id", NULL); mdesc_for_each_arc() 1173 static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, find_numa_latencies_for_group() argument 1178 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { mdesc_for_each_arc() 1180 u64 target = mdesc_arc_target(md, arc); mdesc_for_each_arc() 1192 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, numa_attach_mlgroup() argument 1199 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { mdesc_for_each_arc() 1200 u64 target = mdesc_arc_target(md, arc); mdesc_for_each_arc() 1230 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, numa_parse_mdesc_group() argument 1236 numa_parse_mdesc_group_cpus(md, grp, &mask); numa_parse_mdesc_group() 1249 return numa_attach_mlgroup(md, grp, index); numa_parse_mdesc_group() 1254 struct mdesc_handle *md = mdesc_grab(); numa_parse_mdesc() local 1258 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); numa_parse_mdesc() 1260 mdesc_release(md); numa_parse_mdesc() 1264 err = grab_mblocks(md); numa_parse_mdesc() 1268 err = grab_mlgroups(md); numa_parse_mdesc() 1273 mdesc_for_each_node_by_name(md, node, "group") { numa_parse_mdesc() 1274 err = numa_parse_mdesc_group(md, node, count); numa_parse_mdesc() 1281 mdesc_for_each_node_by_name(md, node, "group") { numa_parse_mdesc() 1282 find_numa_latencies_for_group(md, node, count); numa_parse_mdesc() 1306 mdesc_release(md); numa_parse_mdesc()
|
/linux-4.4.14/drivers/media/platform/s3c-camif/ |
H A D | camif-core.c | 310 struct media_device *md = &camif->media_dev; camif_media_dev_register() local 315 memset(md, 0, sizeof(*md)); camif_media_dev_register() 316 snprintf(md->model, sizeof(md->model), "SAMSUNG S3C%s CAMIF", camif_media_dev_register() 318 strlcpy(md->bus_info, "platform", sizeof(md->bus_info)); camif_media_dev_register() 319 md->hw_revision = ip_rev; camif_media_dev_register() 320 md->driver_version = KERNEL_VERSION(1, 0, 0); camif_media_dev_register() 322 md->dev = camif->dev; camif_media_dev_register() 325 v4l2_dev->mdev = md; camif_media_dev_register() 331 ret = media_device_register(md); camif_media_dev_register()
|
/linux-4.4.14/drivers/s390/char/ |
H A D | tape_3590.c | 993 sense->fmt.f70.md); tape_3590_print_mim_msg_f0() 1004 "procedure %i", sense->fmt.f70.md); tape_3590_print_mim_msg_f0() 1047 "interface 0x%02x", sense->fmt.f71.md[0]); tape_3590_print_io_sim_msg_f1() 1051 "0x%02x", sense->fmt.f71.md[0]); tape_3590_print_io_sim_msg_f1() 1055 "0x%02x", sense->fmt.f71.md[0]); tape_3590_print_io_sim_msg_f1() 1059 sense->fmt.f71.md[0]); tape_3590_print_io_sim_msg_f1() 1063 "0x%02x", sense->fmt.f71.md[0]); tape_3590_print_io_sim_msg_f1() 1081 "0x%x on CU", sense->fmt.f71.md[1]); tape_3590_print_io_sim_msg_f1() 1084 "nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1], tape_3590_print_io_sim_msg_f1() 1085 sense->fmt.f71.md[2]); tape_3590_print_io_sim_msg_f1() 1091 sense->fmt.f71.md[1]); tape_3590_print_io_sim_msg_f1() 1095 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); tape_3590_print_io_sim_msg_f1() 1100 " path 0x%x on CU", sense->fmt.f71.md[1]); tape_3590_print_io_sim_msg_f1() 1104 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); tape_3590_print_io_sim_msg_f1() 1110 sense->fmt.f71.md[1]); tape_3590_print_io_sim_msg_f1() 1114 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); tape_3590_print_io_sim_msg_f1() 1158 "interface 0x%02x", sense->fmt.f71.md[0]); tape_3590_print_dev_sim_msg_f2() 1162 sense->fmt.f71.md[0]); tape_3590_print_dev_sim_msg_f2() 1166 " 0x%02x", sense->fmt.f71.md[0]); tape_3590_print_dev_sim_msg_f2() 1191 sense->fmt.f71.md[1]); tape_3590_print_dev_sim_msg_f2() 1195 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); tape_3590_print_dev_sim_msg_f2() 1200 "interface 0x%x on DV", sense->fmt.f71.md[1]); tape_3590_print_dev_sim_msg_f2() 1204 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); tape_3590_print_dev_sim_msg_f2() 1209 " 0x%x on DV", sense->fmt.f71.md[1]); tape_3590_print_dev_sim_msg_f2() 1213 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); tape_3590_print_dev_sim_msg_f2() 1222 sense->fmt.f71.md[1]); tape_3590_print_dev_sim_msg_f2() 1226 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); tape_3590_print_dev_sim_msg_f2()
|
H A D | tape_3590.h | 96 unsigned int md:8; member in struct:tape_3590_sense::__anon8946::__anon8947 109 unsigned char md[3]; member in struct:tape_3590_sense::__anon8946::__anon8948
|
/linux-4.4.14/drivers/media/pci/cx18/ |
H A D | cx18-gpio.h | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-irq.h | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-streams.h | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-io.h | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> 31 * The implmentation is the fault of Andy Walls <awalls@md.metrocast.net>.
|
H A D | cx18-alsa.h | 4 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-irq.c | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-queue.h | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-io.c | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-alsa-mixer.c | 5 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-cards.h | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-mailbox.h | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-scb.c | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-alsa-main.c | 4 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-alsa-pcm.c | 5 * Copyright (C) 2009 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-av-firmware.c | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-gpio.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-i2c.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-scb.h | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-av-core.h | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-av-audio.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-cards.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-dvb.c | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-firmware.c | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-queue.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-mailbox.c | 5 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-driver.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-driver.h | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-fileops.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-ioctl.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
H A D | cx18-streams.c | 7 * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
|
/linux-4.4.14/arch/x86/include/asm/ |
H A D | efi.h | 104 extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable); 111 extern void __init efi_map_region(efi_memory_desc_t *md); 112 extern void __init efi_map_region_fixed(efi_memory_desc_t *md); 116 extern void __init old_map_region(efi_memory_desc_t *md);
|
/linux-4.4.14/arch/cris/include/arch-v32/arch/hwregs/ |
H A D | dma.h | 27 unsigned md : 16; member in struct:dma_descr_group 68 unsigned md : 16; member in struct:dma_descr_data
|
H A D | sser_defs.h | 182 unsigned int md : 1; member in struct:__anon852 191 unsigned int md : 1; member in struct:__anon853
|
H A D | dma_defs.h | 125 unsigned int md : 16; member in struct:__anon415 277 unsigned int md : 16; member in struct:__anon423
|
/linux-4.4.14/arch/ia64/hp/sim/boot/ |
H A D | fw-emu.c | 238 efi_memory_desc_t *efi_memmap, *md; sys_fw_init() local 247 md = efi_memmap + i++; \ sys_fw_init() 248 md->type = typ; \ sys_fw_init() 249 md->pad = 0; \ sys_fw_init() 250 md->phys_addr = start; \ sys_fw_init() 251 md->virt_addr = 0; \ sys_fw_init() 252 md->num_pages = (end - start) >> 12; \ sys_fw_init() 253 md->attribute = attr; \ sys_fw_init()
|
/linux-4.4.14/net/ipv4/ |
H A D | tcp_highspeed.c | 17 unsigned int md; member in struct:hstcp_aimd_val 156 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); hstcp_ssthresh()
|
H A D | ip_tunnel_core.c | 123 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, iptunnel_metadata_reply() argument 129 if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX) iptunnel_metadata_reply() 137 src = &md->u.tun_info; iptunnel_metadata_reply()
|
/linux-4.4.14/fs/ncpfs/ |
H A D | inode.c | 493 struct ncp_mount_data* md = (struct ncp_mount_data*)raw_data; ncp_fill_super() local 495 data.flags = md->flags; ncp_fill_super() 497 data.mounted_uid = make_kuid(current_user_ns(), md->mounted_uid); ncp_fill_super() 498 data.wdog_pid = find_get_pid(md->wdog_pid); ncp_fill_super() 499 data.ncp_fd = md->ncp_fd; ncp_fill_super() 500 data.time_out = md->time_out; ncp_fill_super() 501 data.retry_count = md->retry_count; ncp_fill_super() 502 data.uid = make_kuid(current_user_ns(), md->uid); ncp_fill_super() 503 data.gid = make_kgid(current_user_ns(), md->gid); ncp_fill_super() 504 data.file_mode = md->file_mode; ncp_fill_super() 505 data.dir_mode = md->dir_mode; ncp_fill_super() 507 memcpy(data.mounted_vol, md->mounted_vol, ncp_fill_super() 513 struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data; ncp_fill_super() local 515 data.flags = md->flags; ncp_fill_super() 516 data.mounted_uid = make_kuid(current_user_ns(), md->mounted_uid); ncp_fill_super() 517 data.wdog_pid = find_get_pid(md->wdog_pid); ncp_fill_super() 518 data.ncp_fd = md->ncp_fd; ncp_fill_super() 519 data.time_out = md->time_out; ncp_fill_super() 520 data.retry_count = md->retry_count; ncp_fill_super() 521 data.uid = make_kuid(current_user_ns(), md->uid); ncp_fill_super() 522 data.gid = make_kgid(current_user_ns(), md->gid); ncp_fill_super() 523 data.file_mode = md->file_mode; ncp_fill_super() 524 data.dir_mode = md->dir_mode; ncp_fill_super()
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
H A D | llite_lib.c | 143 static int client_common_fill_super(struct super_block *sb, char *md, char *dt, client_common_fill_super() argument 158 obd = class_name2obd(md); client_common_fill_super() 160 CERROR("MD %s: not setup or attached\n", md); client_common_fill_super() 175 err = ldebugfs_register_mountpoint(llite_root, sb, dt, md); client_common_fill_super() 235 LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n", client_common_fill_super() 236 md); client_common_fill_super() 239 CERROR("cannot connect to %s: rc = %d\n", md, err); client_common_fill_super() 484 CERROR("failed to understand root inode md: rc = %d\n", err); client_common_fill_super() 854 char *dt = NULL, *md = NULL; ll_fill_super() local 922 md = kasprintf(GFP_NOFS, "%s-%p", lprof->lp_md, cfg->cfg_instance); ll_fill_super() 923 if (!md) { ll_fill_super() 929 err = client_common_fill_super(sb, md, dt, mnt); ll_fill_super() 932 kfree(md); ll_fill_super() 1100 struct lustre_md md; ll_md_setattr() local 1133 sbi->ll_md_exp, &md); ll_md_setattr() 1147 op_data->op_handle = md.body->handle; ll_md_setattr() 1148 op_data->op_ioepoch = md.body->ioepoch; ll_md_setattr() 1150 ll_update_inode(inode, &md); ll_md_setattr() 1509 void ll_update_inode(struct inode *inode, struct lustre_md *md) ll_update_inode() argument 1512 struct mdt_body *body = md->body; ll_update_inode() 1513 struct lov_stripe_md *lsm = md->lsm; ll_update_inode() 1520 cl_file_inode_init(inode, md); ll_update_inode() 1529 ll_update_remote_perm(inode, md->remote_perm); ll_update_inode() 1536 lli->lli_posix_acl = md->posix_acl; ll_update_inode() 1649 struct lustre_md *md = opaque; ll_read_inode2() local 1665 ll_update_inode(inode, md); ll_read_inode2() 1944 struct lustre_md md; ll_prep_inode() local 1950 sbi->ll_md_exp, &md); ll_prep_inode() 1955 ll_update_inode(*inode, &md); ll_prep_inode() 1963 LASSERT(fid_is_sane(&md.body->fid1)); ll_prep_inode() 1965 *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1, ll_prep_inode() 1967 &md); ll_prep_inode() 1970 if (md.posix_acl) { ll_prep_inode() 1971 posix_acl_release(md.posix_acl); ll_prep_inode() 1972 md.posix_acl = NULL; ll_prep_inode() 2004 conf.u.coc_md = &md; ll_prep_inode() 2011 if (md.lsm != NULL) ll_prep_inode() 2012 obd_free_memmd(sbi->ll_dt_exp, &md.lsm); ll_prep_inode() 2013 md_free_lustre_md(sbi->ll_md_exp, &md); ll_prep_inode()
|
H A D | namei.c | 61 struct lustre_md *md = opaque; ll_test_inode() local 63 if (unlikely(!(md->body->valid & OBD_MD_FLID))) { ll_test_inode() 68 if (!lu_fid_eq(&lli->lli_fid, &md->body->fid1)) ll_test_inode() 108 struct lustre_md *md) ll_iget() 113 inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md); ll_iget() 119 ll_read_inode2(inode, md); ll_iget() 124 ll_get_fsname(sb, NULL, 0), md->lsm, ll_iget() 126 rc = cl_file_inode_init(inode, md); ll_iget() 136 ll_update_inode(inode, md); ll_iget() 138 inode, PFID(&md->body->fid1)); ll_iget() 107 ll_iget(struct super_block *sb, ino_t hash, struct lustre_md *md) ll_iget() argument
|
H A D | file.c | 3363 struct lustre_md md = { NULL }; ll_layout_lock_set() local 3406 rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm, ll_layout_lock_set() 3410 if (md.lsm != NULL) ll_layout_lock_set() 3411 *gen = md.lsm->lsm_layout_gen; ll_layout_lock_set() 3428 conf.u.coc_md = &md; ll_layout_lock_set() 3431 if (md.lsm != NULL) ll_layout_lock_set() 3432 obd_free_memmd(sbi->ll_dt_exp, &md.lsm); ll_layout_lock_set()
|
/linux-4.4.14/drivers/staging/lustre/lustre/mdc/ |
H A D | mdc_request.c | 410 static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md) mdc_unpack_acl() argument 413 struct mdt_body *body = md->body; mdc_unpack_acl() 443 md->posix_acl = acl; mdc_unpack_acl() 447 #define mdc_unpack_acl(req, md) 0 452 struct lustre_md *md) mdc_get_lustre_md() 457 LASSERT(md); mdc_get_lustre_md() 458 memset(md, 0, sizeof(*md)); mdc_get_lustre_md() 460 md->body = req_capsule_server_get(pill, &RMF_MDT_BODY); mdc_get_lustre_md() 461 LASSERT(md->body != NULL); mdc_get_lustre_md() 463 if (md->body->valid & OBD_MD_FLEASIZE) { mdc_get_lustre_md() 467 if (!S_ISREG(md->body->mode)) { mdc_get_lustre_md() 474 if (md->body->eadatasize == 0) { mdc_get_lustre_md() 480 lmmsize = md->body->eadatasize; mdc_get_lustre_md() 487 rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize); mdc_get_lustre_md() 491 if (rc < sizeof(*md->lsm)) { mdc_get_lustre_md() 493 "lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n", mdc_get_lustre_md() 494 rc, (int)sizeof(*md->lsm)); mdc_get_lustre_md() 499 } else if (md->body->valid & OBD_MD_FLDIREA) { mdc_get_lustre_md() 503 if (!S_ISDIR(md->body->mode)) { mdc_get_lustre_md() 510 if (md->body->eadatasize == 0) { mdc_get_lustre_md() 515 if (md->body->valid & OBD_MD_MEA) { mdc_get_lustre_md() 516 lmvsize = md->body->eadatasize; mdc_get_lustre_md() 524 rc = obd_unpackmd(md_exp, (void *)&md->mea, lmv, mdc_get_lustre_md() 529 if (rc < sizeof(*md->mea)) { mdc_get_lustre_md() 531 "size too small: rc < sizeof(*md->mea) (%d < %d)\n", mdc_get_lustre_md() 532 rc, (int)sizeof(*md->mea)); mdc_get_lustre_md() 540 if (md->body->valid & OBD_MD_FLRMTPERM) { mdc_get_lustre_md() 543 md->remote_perm = req_capsule_server_swab_get(pill, &RMF_ACL, mdc_get_lustre_md() 545 if (!md->remote_perm) { mdc_get_lustre_md() 549 } else if (md->body->valid & OBD_MD_FLACL) { mdc_get_lustre_md() 554 if (md->body->aclsize) { mdc_get_lustre_md() 555 rc = mdc_unpack_acl(req, md); mdc_get_lustre_md() 560 md->posix_acl = NULL; mdc_get_lustre_md() 568 posix_acl_release(md->posix_acl); mdc_get_lustre_md() 570 if (md->lsm) mdc_get_lustre_md() 571 obd_free_memmd(dt_exp, &md->lsm); mdc_get_lustre_md() 576 int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md) mdc_free_lustre_md() argument 450 mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req, struct obd_export *dt_exp, struct obd_export *md_exp, struct lustre_md *md) mdc_get_lustre_md() argument
|
H A D | mdc_internal.h | 109 struct lustre_md *md); 111 int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md);
|
/linux-4.4.14/tools/perf/util/ |
H A D | evlist.c | 714 struct perf_mmap *md = &evlist->mmap[idx]; perf_evlist__mmap_read() local 716 u64 old = md->prev; perf_evlist__mmap_read() 717 unsigned char *data = md->base + page_size; perf_evlist__mmap_read() 723 if (!atomic_read(&md->refcnt)) perf_evlist__mmap_read() 726 head = perf_mmap__read_head(md); perf_evlist__mmap_read() 737 if (diff > md->mask / 2 || diff < 0) { perf_evlist__mmap_read() 750 event = (union perf_event *)&data[old & md->mask]; perf_evlist__mmap_read() 757 if ((old & md->mask) + size != ((old + size) & md->mask)) { perf_evlist__mmap_read() 760 void *dst = md->event_copy; perf_evlist__mmap_read() 763 cpy = min(md->mask + 1 - (offset & md->mask), len); perf_evlist__mmap_read() 764 memcpy(dst, &data[offset & md->mask], cpy); perf_evlist__mmap_read() 770 event = (union perf_event *) md->event_copy; perf_evlist__mmap_read() 776 md->prev = old; perf_evlist__mmap_read() 781 static bool perf_mmap__empty(struct perf_mmap *md) perf_mmap__empty() argument 783 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; perf_mmap__empty() 801 struct perf_mmap *md = &evlist->mmap[idx]; perf_evlist__mmap_consume() local 804 u64 old = md->prev; perf_evlist__mmap_consume() 806 perf_mmap__write_tail(md, old); perf_evlist__mmap_consume() 809 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) perf_evlist__mmap_consume()
|
H A D | symbol.c | 1099 struct kcore_mapfn_data *md = data; kcore_mapfn() local 1102 map = map__new2(start, md->dso, md->type); kcore_mapfn() 1109 list_add(&map->node, &md->maps); kcore_mapfn() 1119 struct kcore_mapfn_data md; dso__load_kcore() local 1143 md.dso = dso; dso__load_kcore() 1144 md.type = map->type; dso__load_kcore() 1145 INIT_LIST_HEAD(&md.maps); dso__load_kcore() 1155 err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, dso__load_kcore() 1161 if (list_empty(&md.maps)) { dso__load_kcore() 1178 list_for_each_entry(new_map, &md.maps, node) { dso__load_kcore() 1187 replacement_map = list_entry(md.maps.next, struct map, node); dso__load_kcore() 1190 while (!list_empty(&md.maps)) { dso__load_kcore() 1191 new_map = list_entry(md.maps.next, struct map, node); dso__load_kcore() 1231 while (!list_empty(&md.maps)) { dso__load_kcore() 1232 map = list_entry(md.maps.next, struct map, node); dso__load_kcore()
|
H A D | evlist.h | 210 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) perf_mmap__write_tail() argument 212 struct perf_event_mmap_page *pc = md->base; perf_mmap__write_tail()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | cxgb4_debugfs.c | 2732 struct mem_desc *md = mem; meminfo_show() local 2789 (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A); meminfo_show() 2790 (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A); meminfo_show() 2791 (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A); meminfo_show() 2792 (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A); meminfo_show() 2793 (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A); meminfo_show() 2794 (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A); meminfo_show() 2795 (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A); meminfo_show() 2796 (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A); meminfo_show() 2797 (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A); meminfo_show() 2800 md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A); meminfo_show() 2801 md->limit = md->base - 1 + meminfo_show() 2804 md++; meminfo_show() 2806 md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A); meminfo_show() 2807 md->limit = md->base - 1 + meminfo_show() 2810 md++; meminfo_show() 2815 md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A); meminfo_show() 2818 md->base = t4_read_reg(adap, meminfo_show() 2821 md->limit = 0; meminfo_show() 2823 md->base = 0; meminfo_show() 2824 md->idx = ARRAY_SIZE(region); /* hide it */ meminfo_show() 2826 md++; meminfo_show() 2829 md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\ meminfo_show() 2830 (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \ meminfo_show() 2842 md->base = 0; meminfo_show() 2843 md->idx = ARRAY_SIZE(region); meminfo_show() 2857 md->base = BASEADDR_G(t4_read_reg(adap, meminfo_show() 2859 md->limit = md->base + (size << 2) - 1; meminfo_show() 2863 md++; meminfo_show() 2865 md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A); meminfo_show() 2866 md->limit = 0; meminfo_show() 2867 md++; meminfo_show() 2868 md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A); meminfo_show() 2869 md->limit = 0; meminfo_show() 2870 md++; meminfo_show() 2872 md->base = adap->vres.ocq.start; meminfo_show() 2874 md->limit = md->base + adap->vres.ocq.size - 1; meminfo_show() 2876 md->idx = ARRAY_SIZE(region); /* hide it */ meminfo_show() 2877 md++; meminfo_show() 2882 (md++)->base = avail[n].limit; meminfo_show() 2884 (md++)->base = avail[n].limit; meminfo_show() 2886 n = md - mem; meminfo_show()
|
/linux-4.4.14/arch/cris/include/arch-v32/arch/hwregs/iop/ |
H A D | iop_dmc_in_defs.h | 163 unsigned int md : 16; member in struct:__anon494 187 unsigned int md : 16; member in struct:__anon496 194 unsigned int md : 16; member in struct:__anon497 220 unsigned int md : 16; member in struct:__anon499
|
H A D | iop_dmc_out_defs.h | 153 unsigned int md : 16; member in struct:__anon510 177 unsigned int md : 16; member in struct:__anon512 184 unsigned int md : 16; member in struct:__anon513 210 unsigned int md : 16; member in struct:__anon515
|
/linux-4.4.14/drivers/block/drbd/ |
H A D | drbd_main.c | 850 spin_lock_irq(&device->ldev->md.uuid_lock); _drbd_send_uuids() 852 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); _drbd_send_uuids() 853 spin_unlock_irq(&device->ldev->md.uuid_lock); _drbd_send_uuids() 881 u64 *uuid = device->ldev->md.uuid; drbd_print_uuids() 905 uuid = device->ldev->md.uuid[UI_BITMAP]; drbd_gen_and_send_sync_uuid() 3064 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); drbd_md_write() 3065 buffer->flags = cpu_to_be32(device->ldev->md.flags); drbd_md_write() 3068 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect); drbd_md_write() 3069 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset); drbd_md_write() 3072 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid); drbd_md_write() 3074 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset); drbd_md_write() 3077 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes); drbd_md_write() 3078 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k); drbd_md_write() 3080 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset); drbd_md_write() 3081 sector = device->ldev->md.md_offset; drbd_md_write() 3118 /* Update device->ldev->md.la_size_sect, drbd_md_sync() 3120 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev); drbd_md_sync() 3175 struct drbd_md *in_core = &bdev->md; check_offsets_and_sizes() 3278 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx; drbd_md_read() 3279 bdev->md.md_offset = drbd_md_ss(bdev); drbd_md_read() 3281 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) { drbd_md_read() 3302 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); drbd_md_read() 3304 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); drbd_md_read() 3316 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect); drbd_md_read() 3318 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); drbd_md_read() 3319 bdev->md.flags = be32_to_cpu(buffer->flags); drbd_md_read() 3320 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); drbd_md_read() 3322 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect); drbd_md_read() 3323 bdev->md.al_offset = be32_to_cpu(buffer->al_offset); drbd_md_read() 3324 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset); drbd_md_read() 3326 if (check_activity_log_stripe_size(device, buffer, &bdev->md)) drbd_md_read() 3331 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { drbd_md_read() 3333 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); drbd_md_read() 3336 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { drbd_md_read() 3338 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); drbd_md_read() 3389 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i]; __must_hold() 3403 device->ldev->md.uuid[idx] = val; __must_hold() 3410 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); __must_hold() 3412 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); __must_hold() 3418 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); __must_hold() 3419 if (device->ldev->md.uuid[idx]) { __must_hold() 3421 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx]; __must_hold() 3424 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); __must_hold() 3441 spin_lock_irq(&device->ldev->md.uuid_lock); __must_hold() 3442 bm_uuid = device->ldev->md.uuid[UI_BITMAP]; __must_hold() 3447 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT]; __must_hold() 3449 spin_unlock_irq(&device->ldev->md.uuid_lock); __must_hold() 3459 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) __must_hold() 3462 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); __must_hold() 3465 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; __must_hold() 3466 device->ldev->md.uuid[UI_BITMAP] = 0; __must_hold() 3468 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP]; __must_hold() 3472 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); __must_hold() 3474 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); __must_hold() 3620 if ((device->ldev->md.flags & flag) != flag) { __must_hold() 3622 device->ldev->md.flags |= flag; __must_hold() 3628 if ((device->ldev->md.flags & flag) != 0) { __must_hold() 3630 device->ldev->md.flags &= ~flag; __must_hold() 3635 return (bdev->md.flags & flag) != 0; drbd_md_test_flag()
|
H A D | drbd_nl.c | 667 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; drbd_set_role() 680 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced) drbd_set_role() 683 device->ldev->md.uuid[UI_CURRENT] |= (u64)1; drbd_set_role() 751 /* Initializes the md.*_offset members, so we are able to find 776 unsigned int al_size_sect = bdev->md.al_size_4k * 8; drbd_md_set_sector_offsets() 778 bdev->md.md_offset = drbd_md_ss(bdev); drbd_md_set_sector_offsets() 780 switch (bdev->md.meta_dev_idx) { drbd_md_set_sector_offsets() 783 bdev->md.md_size_sect = MD_128MB_SECT; drbd_md_set_sector_offsets() 784 bdev->md.al_offset = MD_4kB_SECT; drbd_md_set_sector_offsets() 785 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect; drbd_md_set_sector_offsets() 789 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); drbd_md_set_sector_offsets() 790 bdev->md.al_offset = MD_4kB_SECT; drbd_md_set_sector_offsets() 791 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect; drbd_md_set_sector_offsets() 796 bdev->md.al_offset = -al_size_sect; drbd_md_set_sector_offsets() 807 bdev->md.md_size_sect = md_size_sect; drbd_md_set_sector_offsets() 809 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT; drbd_md_set_sector_offsets() 876 struct drbd_md *md = &device->ldev->md; __must_hold() local 906 prev_size = device->ldev->md.md_size_sect; __must_hold() 907 la_size_sect = device->ldev->md.la_size_sect; __must_hold() 912 prev_al_stripes = md->al_stripes; __must_hold() 913 prev_al_stripe_size_4k = md->al_stripe_size_4k; __must_hold() 915 md->al_stripes = rs->al_stripes; __must_hold() 916 md->al_stripe_size_4k = rs->al_stripe_size / 4; __must_hold() 917 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4; __must_hold() 961 device->ldev->md.la_size_sect = size; __must_hold() 968 la_size_changed = (la_size_sect != device->ldev->md.la_size_sect); __must_hold() 971 || prev_size != device->ldev->md.md_size_sect; __must_hold() 982 prev_flags = md->flags; __must_hold() 983 md->flags &= ~MDF_PRIMARY_IND; __must_hold() 987 la_size_changed && md_moved ? "size changed and md moved" : __must_hold() 988 la_size_changed ? "size changed" : "md moved"); __must_hold() 994 md->flags = prev_flags; __must_hold() 999 md->al_stripes, md->al_stripe_size_4k * 4); __must_hold() 1010 md->al_stripes = prev_al_stripes; __must_hold() 1011 md->al_stripe_size_4k = prev_al_stripe_size_4k; __must_hold() 1012 md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k; __must_hold() 1030 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */ drbd_new_dev_size() 1299 unsigned int al_size_4k = bdev->md.al_size_4k; drbd_al_extents_max() 1411 device->ldev->md.flags &= ~MDF_AL_DISABLED; drbd_adm_disk_opts() 1413 device->ldev->md.flags |= MDF_AL_DISABLED; drbd_adm_disk_opts() 1511 spin_lock_init(&nbc->md.uuid_lock); drbd_adm_attach() 1634 drbd_warn(device, "refusing attach: md-device too small, " drbd_adm_attach() 1689 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { drbd_adm_attach() 1704 drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) { drbd_adm_attach() 1827 device->ldev->md.flags &= ~MDF_AL_DISABLED; drbd_adm_attach() 1829 device->ldev->md.flags |= MDF_AL_DISABLED; drbd_adm_attach() 1856 device->ldev->md.uuid[UI_CURRENT] |= (u64)1; drbd_adm_attach() 1858 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; drbd_adm_attach() 2516 rs.al_stripes = device->ldev->md.al_stripes; drbd_adm_resize() 2517 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4; drbd_adm_resize() 2554 if (device->ldev->md.al_stripes != rs.al_stripes || drbd_adm_resize() 2555 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) { drbd_adm_resize() 2996 spin_lock_irq(&device->ldev->md.uuid_lock); nla_put_status_info() 2997 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid); nla_put_status_info() 2998 spin_unlock_irq(&device->ldev->md.uuid_lock); nla_put_status_info() 3003 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) || nla_put_status_info() 3360 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { drbd_adm_new_c_uuid()
|
H A D | drbd_proc.c | 108 * progress bars shamelessly adapted from driver/md/md.c 151 /* see drivers/md/md.c drbd_syncer_progress()
|
H A D | drbd_actlog.c | 205 drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n", drbd_md_sync_page_io() 443 const unsigned int stripes = device->ldev->md.al_stripes; al_tr_number_to_on_disk_sector() 444 const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k; al_tr_number_to_on_disk_sector() 447 unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k); al_tr_number_to_on_disk_sector() 456 return device->ldev->md.md_offset + device->ldev->md.al_offset + t; al_tr_number_to_on_disk_sector() 612 struct drbd_md *md = &device->ldev->md; drbd_initialize_al() local 613 sector_t al_base = md->md_offset + md->al_offset; drbd_initialize_al() 614 int al_size_4k = md->al_stripes * md->al_stripe_size_4k; drbd_initialize_al()
|
H A D | drbd_debugfs.c | 758 struct drbd_md *md; device_data_gen_id_show() local 764 md = &device->ldev->md; device_data_gen_id_show() 765 spin_lock_irq(&md->uuid_lock); device_data_gen_id_show() 767 seq_printf(m, "0x%016llX\n", md->uuid[idx]); device_data_gen_id_show() 769 spin_unlock_irq(&md->uuid_lock); device_data_gen_id_show()
|
H A D | drbd_int.h | 612 struct drbd_md md; member in struct:drbd_backing_dev 1189 * variables at create-md time (or even re-configurable at runtime?). 1816 switch (bdev->md.meta_dev_idx) { drbd_md_first_sector() 1819 return bdev->md.md_offset + bdev->md.bm_offset; drbd_md_first_sector() 1822 return bdev->md.md_offset; drbd_md_first_sector() 1832 switch (bdev->md.meta_dev_idx) { drbd_md_last_sector() 1835 return bdev->md.md_offset + MD_4kB_SECT -1; drbd_md_last_sector() 1838 return bdev->md.md_offset + bdev->md.md_size_sect -1; drbd_md_last_sector() 1861 switch (bdev->md.meta_dev_idx) { drbd_get_max_capacity() 1874 BM_EXT_TO_SECT(bdev->md.md_size_sect drbd_get_max_capacity() 1875 - bdev->md.bm_offset)); drbd_get_max_capacity() 1890 const int meta_dev_idx = bdev->md.meta_dev_idx; drbd_md_ss() 1902 return MD_128MB_SECT * bdev->md.meta_dev_idx; drbd_md_ss()
|
H A D | drbd_receiver.c | 2727 self = device->ldev->md.uuid[UI_BITMAP] & 1; __must_hold() 2941 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); __must_hold() 2961 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) { __must_hold() 2966 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) && __must_hold() 2967 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { __must_hold() 2970 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; __must_hold() 2971 device->ldev->md.uuid[UI_BITMAP] = 0; __must_hold() 2973 drbd_uuid_dump(device, "self", device->ldev->md.uuid, __must_hold() 2984 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) { __must_hold() 2989 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) && __must_hold() 2990 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) { __must_hold() 3033 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == __must_hold() 3053 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1); __must_hold() 3061 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); __must_hold() 3067 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); __must_hold() 3070 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == __must_hold() 3072 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { __must_hold() 3079 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]); __must_hold() 3080 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]); __must_hold() 3083 drbd_uuid_dump(device, "self", device->ldev->md.uuid, __must_hold() 3094 self = device->ldev->md.uuid[i] & ~((u64)1); __must_hold() 3100 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1); __must_hold() 3107 self = device->ldev->md.uuid[i] & ~((u64)1); __must_hold() 3137 spin_lock_irq(&device->ldev->md.uuid_lock); __must_hold() 3138 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0); __must_hold() 3143 spin_unlock_irq(&device->ldev->md.uuid_lock); __must_hold() 3906 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && receive_uuids()
|
H A D | drbd_bitmap.c | 620 if (ldev->md.al_offset == 8) drbd_md_on_disk_bits() 621 bitmap_sectors = ldev->md.md_size_sect - ldev->md.bm_offset; drbd_md_on_disk_bits() 623 bitmap_sectors = ldev->md.al_offset - ldev->md.bm_offset; drbd_md_on_disk_bits() 994 device->ldev->md.md_offset + device->ldev->md.bm_offset; __must_hold()
|
H A D | drbd_state.c | 809 if (device->ed_uuid == device->ldev->md.uuid[UI_CURRENT]) { sanitize_state() 1121 u32 mdf = device->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND| __drbd_set_state() 1139 if (mdf != device->ldev->md.flags) { __drbd_set_state() 1140 device->ldev->md.flags = mdf; __drbd_set_state() 1144 drbd_set_ed_uuid(device, device->ldev->md.uuid[UI_CURRENT]); __drbd_set_state() 1367 device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { 1381 device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
|
H A D | drbd_worker.c | 931 drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_CURRENT]); drbd_resync_finished() 948 device->p_uuid[i] = device->ldev->md.uuid[i]; drbd_resync_finished()
|
/linux-4.4.14/drivers/staging/lustre/lnet/selftest/ |
H A D | rpc.c | 361 lnet_md_t md; srpc_post_passive_rdma() local 372 md.threshold = 1; srpc_post_passive_rdma() 373 md.user_ptr = ev; srpc_post_passive_rdma() 374 md.start = buf; srpc_post_passive_rdma() 375 md.length = len; srpc_post_passive_rdma() 376 md.options = options; srpc_post_passive_rdma() 377 md.eq_handle = srpc_data.rpc_lnet_eq; srpc_post_passive_rdma() 379 rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh); srpc_post_passive_rdma() 401 lnet_md_t md; srpc_post_active_rdma() local 403 md.user_ptr = ev; srpc_post_active_rdma() 404 md.start = buf; srpc_post_active_rdma() 405 md.length = len; srpc_post_active_rdma() 406 md.eq_handle = srpc_data.rpc_lnet_eq; srpc_post_active_rdma() 407 md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1; srpc_post_active_rdma() 408 md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET); srpc_post_active_rdma() 410 rc = LNetMDBind(md, LNET_UNLINK, mdh); srpc_post_active_rdma() 1405 srpc_event_t *rpcev = ev->md.user_ptr; srpc_lnet_ev_handler() 1474 buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg); srpc_lnet_ev_handler()
|
/linux-4.4.14/drivers/usb/gadget/function/ |
H A D | f_uvc.c | 765 struct uvc_color_matching_descriptor *md; uvc_alloc_inst() local 812 md = &opts->uvc_color_matching; uvc_alloc_inst() 813 md->bLength = UVC_DT_COLOR_MATCHING_SIZE; uvc_alloc_inst() 814 md->bDescriptorType = USB_DT_CS_INTERFACE; uvc_alloc_inst() 815 md->bDescriptorSubType = UVC_VS_COLORFORMAT; uvc_alloc_inst() 816 md->bColorPrimaries = 1; uvc_alloc_inst() 817 md->bTransferCharacteristics = 1; uvc_alloc_inst() 818 md->bMatrixCoefficients = 4; uvc_alloc_inst()
|
/linux-4.4.14/crypto/ |
H A D | xor.c | 167 /* when built-in xor.o must initialize before drivers/md/md.o */
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | diag.c | 1514 struct capture_md md; snoop_recv_handler() local 1556 memset(&md, 0, sizeof(struct capture_md)); snoop_recv_handler() 1557 md.port = 1; snoop_recv_handler() 1558 md.dir = PKT_DIR_INGRESS; snoop_recv_handler() 1559 md.u.rhf = packet->rhf; snoop_recv_handler() 1560 memcpy(s_packet->data, &md, md_len); snoop_recv_handler() 1651 struct capture_md md; snoop_send_pio_handler() local 1656 md.u.pbc = 0; snoop_send_pio_handler() 1677 memset(&md, 0, sizeof(struct capture_md)); snoop_send_pio_handler() 1678 md.port = 1; snoop_send_pio_handler() 1679 md.dir = PKT_DIR_EGRESS; snoop_send_pio_handler() 1682 md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen); snoop_send_pio_handler() 1684 md.u.pbc = 0; snoop_send_pio_handler() 1686 memcpy(s_packet->data, &md, md_len); snoop_send_pio_handler() 1688 md.u.pbc = pbc; snoop_send_pio_handler() 1787 md.u.pbc); snoop_send_pio_handler() 1800 struct capture_md md; snoop_inline_pio_send() local 1848 memset(&md, 0, sizeof(struct capture_md)); snoop_inline_pio_send() 1849 md.port = 1; snoop_inline_pio_send() 1850 md.dir = PKT_DIR_EGRESS; snoop_inline_pio_send() 1851 md.u.pbc = pbc; snoop_inline_pio_send() 1852 memcpy(s_packet->data, &md, md_len); snoop_inline_pio_send()
|
/linux-4.4.14/drivers/media/pci/ivtv/ |
H A D | ivtv-alsa.h | 4 * Copyright (C) 2009,2012 Andy Walls <awalls@md.metrocast.net>
|
H A D | ivtv-alsa-mixer.c | 5 * Copyright (C) 2009,2012 Andy Walls <awalls@md.metrocast.net>
|
H A D | ivtv-alsa-main.c | 4 * Copyright (C) 2009,2012 Andy Walls <awalls@md.metrocast.net>
|
H A D | ivtv-alsa-pcm.c | 5 * Copyright (C) 2009,2012 Andy Walls <awalls@md.metrocast.net>
|
/linux-4.4.14/arch/m68k/ifpsp060/src/ |
H A D | ilsp.S | 548 mov.l %d1,%d4 # md in d4 550 swap %d4 # hi(md) in lo d4 553 mulu.w %d1,%d0 # [1] lo(mr) * lo(md) 554 mulu.w %d3,%d1 # [2] hi(mr) * lo(md) 555 mulu.w %d4,%d2 # [3] lo(mr) * hi(md) 556 mulu.w %d4,%d3 # [4] hi(mr) * hi(md) 674 mov.l %d1,%d4 # md in d4 676 swap %d4 # hi(md) in lo d4 679 mulu.w %d1,%d0 # [1] lo(mr) * lo(md) 680 mulu.w %d3,%d1 # [2] hi(mr) * lo(md) 681 mulu.w %d4,%d2 # [3] lo(mr) * hi(md) 682 mulu.w %d4,%d3 # [4] hi(mr) * hi(md)
|
H A D | isp.S | 2584 mov.l %d4, %d7 # md in %d7 2586 swap %d7 # hi(md) in lo %d7 2589 mulu.w %d4, %d3 # [1] lo(mr) * lo(md) 2590 mulu.w %d6, %d4 # [2] hi(mr) * lo(md) 2591 mulu.w %d7, %d5 # [3] lo(mr) * hi(md) 2592 mulu.w %d7, %d6 # [4] hi(mr) * hi(md)
|
/linux-4.4.14/drivers/firmware/efi/libstub/ |
H A D | efi-stub-helper.c | 119 efi_memory_desc_t *md; get_dram_base() local 128 for_each_efi_memory_desc(&map, md) get_dram_base() 129 if (md->attribute & EFI_MEMORY_WB) get_dram_base() 130 if (membase > md->phys_addr) get_dram_base() 131 membase = md->phys_addr; get_dram_base()
|
/linux-4.4.14/tools/perf/ |
H A D | builtin-record.c | 78 struct perf_mmap *md = &rec->evlist->mmap[idx]; record__mmap_read() local 79 u64 head = perf_mmap__read_head(md); record__mmap_read() 80 u64 old = md->prev; record__mmap_read() 81 unsigned char *data = md->base + page_size; record__mmap_read() 93 if ((old & md->mask) + size != (head & md->mask)) { record__mmap_read() 94 buf = &data[old & md->mask]; record__mmap_read() 95 size = md->mask + 1 - (old & md->mask); record__mmap_read() 104 buf = &data[old & md->mask]; record__mmap_read() 113 md->prev = old; record__mmap_read()
|
/linux-4.4.14/drivers/net/ |
H A D | vxlan.c | 1152 struct vxlan_metadata *md, u32 vni, vxlan_rcv() 1205 skb->mark = md->gbp; vxlan_rcv() 1253 struct vxlan_metadata *md = &_md; vxlan_udp_encap_recv() local 1290 cpu_to_be64(vni >> 8), sizeof(*md)); vxlan_udp_encap_recv() 1295 md = ip_tunnel_info_opts(&tun_dst->u.tun_info); vxlan_udp_encap_recv() 1297 memset(md, 0, sizeof(*md)); vxlan_udp_encap_recv() 1307 md->gbp = ntohs(gbp->policy_id); vxlan_udp_encap_recv() 1311 tun_dst->u.tun_info.options_len = sizeof(*md); vxlan_udp_encap_recv() 1315 md->gbp |= VXLAN_GBP_DONT_LEARN; vxlan_udp_encap_recv() 1318 md->gbp |= VXLAN_GBP_POLICY_APPLIED; vxlan_udp_encap_recv() 1336 vxlan_rcv(vs, skb, md, vni >> 8, tun_dst); vxlan_udp_encap_recv() 1664 struct vxlan_metadata *md) vxlan_build_gbp_hdr() 1668 if (!md->gbp) vxlan_build_gbp_hdr() 1674 if (md->gbp & VXLAN_GBP_DONT_LEARN) vxlan_build_gbp_hdr() 1677 if (md->gbp & VXLAN_GBP_POLICY_APPLIED) vxlan_build_gbp_hdr() 1680 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); vxlan_build_gbp_hdr() 1689 struct vxlan_metadata *md, bool xnet, u32 vxflags) vxlan6_xmit_skb() 1757 vxlan_build_gbp_hdr(vxh, vxflags, md); vxlan6_xmit_skb() 1774 struct vxlan_metadata *md, bool xnet, u32 vxflags) vxlan_xmit_skb() 1836 vxlan_build_gbp_hdr(vxh, vxflags, md); vxlan_xmit_skb() 1930 struct vxlan_metadata *md = &_md; vxlan_xmit_one() local 1987 md = ip_tunnel_info_opts(info); vxlan_xmit_one() 1989 md->gbp = skb->mark; vxlan_xmit_one() 2049 src_port, dst_port, htonl(vni << 8), md, vxlan_xmit_one() 2112 0, ttl, src_port, dst_port, htonl(vni << 8), md, vxlan_xmit_one() 1151 vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, struct vxlan_metadata *md, u32 vni, struct metadata_dst *tun_dst) vxlan_rcv() argument 1663 vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, struct vxlan_metadata *md) vxlan_build_gbp_hdr() argument 1684 vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port, __be32 vni, struct vxlan_metadata *md, bool xnet, u32 vxflags) vxlan6_xmit_skb() argument 1771 vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni, struct vxlan_metadata *md, bool xnet, u32 vxflags) vxlan_xmit_skb() argument
|
/linux-4.4.14/drivers/clk/st/ |
H A D | clkgen-fsyn.c | 560 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", quadfs_pll_fs660c32_round_rate() 699 u32 md; member in struct:st_clk_quadfs_fsynth 712 * notice of the new md/pe values with a glitchless transition. quadfs_fsynth_program_enable() 723 * Ensure the md/pe parameters are ignored while we are quadfs_fsynth_program_rate() 729 CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md); quadfs_fsynth_program_rate() 883 fs->md = params->mdiv; quadfs_fsynt_get_hw_value_for_recalc() 964 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", quadfs_round_rate() 976 fs->md = params->mdiv; quadfs_program_and_enable()
|
/linux-4.4.14/drivers/scsi/ |
H A D | mesh.c | 305 volatile struct dbdma_regs __iomem *md = ms->dma; mesh_dump_regs() local 310 ms, mr, md); mesh_dump_regs() 320 in_le32(&md->status), in_le32(&md->cmdptr)); mesh_dump_regs() 360 volatile struct dbdma_regs __iomem *md = ms->dma; mesh_init() local 366 out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ mesh_init() 682 volatile struct dbdma_regs __iomem *md = ms->dma; start_phase() local 786 out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds)); start_phase() 787 out_le32(&md->control, (RUN << 16) | RUN); start_phase() 1320 volatile struct dbdma_regs __iomem *md = ms->dma; halt_dma() local 1329 && (in_le32(&md->status) & ACTIVE) != 0) { halt_dma() 1334 out_le32(&md->control, RUN << 16); /* turn off RUN bit */ halt_dma() 1707 volatile struct dbdma_regs __iomem *md = ms->dma; mesh_host_reset() local 1715 out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ mesh_host_reset()
|
H A D | sg.c | 1676 struct rq_map_data *md, map_data; sg_start_req() local 1730 md = NULL; sg_start_req() 1732 md = &map_data; sg_start_req() 1734 if (md) { sg_start_req() 1743 md->pages = req_schp->pages; sg_start_req() 1744 md->page_order = req_schp->page_order; sg_start_req() 1745 md->nr_entries = req_schp->k_use_sg; sg_start_req() 1746 md->offset = 0; sg_start_req() 1747 md->null_mapped = hp->dxferp ? 0 : 1; sg_start_req() 1749 md->from_user = 1; sg_start_req() 1751 md->from_user = 0; sg_start_req() 1764 res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); sg_start_req() 1767 res = blk_rq_map_user(q, rq, md, hp->dxferp, sg_start_req() 1773 if (!md) { sg_start_req()
|
/linux-4.4.14/drivers/iio/imu/ |
H A D | adis16480.c | 156 u16 md, year; adis16480_show_firmware_date() local 165 ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_DM, &md); adis16480_show_firmware_date() 170 md >> 8, md & 0xff, year); adis16480_show_firmware_date()
|
/linux-4.4.14/drivers/media/usb/hdpvr/ |
H A D | hdpvr-i2c.c | 8 * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
|
/linux-4.4.14/drivers/staging/lustre/lustre/lov/ |
H A D | lov_obd.c | 2156 void lov_stripe_lock(struct lov_stripe_md *md) 2157 __acquires(&md->lsm_lock) 2159 LASSERT(md->lsm_lock_owner != current_pid()); 2160 spin_lock(&md->lsm_lock); 2161 LASSERT(md->lsm_lock_owner == 0); 2162 md->lsm_lock_owner = current_pid(); 2166 void lov_stripe_unlock(struct lov_stripe_md *md) 2167 __releases(&md->lsm_lock) 2169 LASSERT(md->lsm_lock_owner == current_pid()); 2170 md->lsm_lock_owner = 0; 2171 spin_unlock(&md->lsm_lock);
|
/linux-4.4.14/drivers/crypto/ |
H A D | hifn_795x.c | 1194 u16 md = 0; hifn_setup_cmd_desc() local 1197 md |= HIFN_CRYPT_CMD_NEW_KEY; hifn_setup_cmd_desc() 1199 md |= HIFN_CRYPT_CMD_NEW_IV; hifn_setup_cmd_desc() 1203 md |= HIFN_CRYPT_CMD_MODE_ECB; hifn_setup_cmd_desc() 1206 md |= HIFN_CRYPT_CMD_MODE_CBC; hifn_setup_cmd_desc() 1209 md |= HIFN_CRYPT_CMD_MODE_CFB; hifn_setup_cmd_desc() 1212 md |= HIFN_CRYPT_CMD_MODE_OFB; hifn_setup_cmd_desc() 1222 md |= HIFN_CRYPT_CMD_KSZ_128 | hifn_setup_cmd_desc() 1228 md |= HIFN_CRYPT_CMD_KSZ_192 | hifn_setup_cmd_desc() 1234 md |= HIFN_CRYPT_CMD_KSZ_256 | hifn_setup_cmd_desc() 1240 md |= HIFN_CRYPT_CMD_ALG_3DES; hifn_setup_cmd_desc() 1245 md |= HIFN_CRYPT_CMD_ALG_DES; hifn_setup_cmd_desc() 1253 rctx->iv, rctx->ivsize, md); hifn_setup_cmd_desc()
|
/linux-4.4.14/arch/cris/arch-v32/drivers/ |
H A D | cryptocop.c | 428 cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); create_pad_descriptor() 487 key_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); setup_key_dl_desc() 518 iv_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo); setup_cipher_iv_desc() 580 outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); create_input_descriptors() 596 outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); create_input_descriptors() 642 cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, (*meta_out)); create_output_descriptors() 683 (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); append_input_descriptors() 687 (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi); append_input_descriptors() 1242 ed->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, ed_mo); cryptocop_setup_dma_list() 3193 td->md, print_dma_descriptors() 3221 td->md, print_dma_descriptors() 3404 dd->md, print_user_dma_lists() 3435 dd->md, print_user_dma_lists()
|
/linux-4.4.14/drivers/staging/lustre/lustre/lclient/ |
H A D | lcommon_cl.c | 1004 * \param md new file metadata from MDS 1008 int cl_file_inode_init(struct inode *inode, struct lustre_md *md) cl_file_inode_init() argument 1018 .coc_md = md cl_file_inode_init() 1024 LASSERT(md->body->valid & OBD_MD_FLID); cl_file_inode_init() 1051 lli->lli_has_smd = lsm_has_objects(md->lsm); cl_file_inode_init()
|
/linux-4.4.14/drivers/video/fbdev/core/ |
H A D | modedb.c | 1181 const struct fb_videomode *m, *m1 = NULL, *md = NULL, *best = NULL; fb_find_best_display() local 1198 md = m; list_for_each() 1205 best = md; 1224 if (md) { 1225 best = md;
|
/linux-4.4.14/drivers/net/wireless/realtek/rtl8xxxu/ |
H A D | rtl8xxxu.h | 90 u32 md:1; member in struct:rtl8xxxu_rx_desc 147 u32 md:1;
|
/linux-4.4.14/drivers/staging/rtl8188eu/hal/ |
H A D | rtl8188e_rxdesc.c | 113 pattrib->mdata = (u8)((le32_to_cpu(report.rxdw1) >> 26) & 0x1);/* u8)prxreport->md; */ update_recvframe_attrib_88e()
|
/linux-4.4.14/drivers/staging/rtl8723au/hal/ |
H A D | rtl8723au_recv.c | 175 pattrib->mdata = (u8)prxreport->md; update_recvframe_attrib()
|
/linux-4.4.14/crypto/async_tx/ |
H A D | raid6test.c | 5 * based on drivers/md/raid6test/test.c:
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdecho/ |
H A D | echo_client.c | 973 struct lustre_md *md; cl_echo_object_find() local 975 md = &info->eti_md; cl_echo_object_find() 976 memset(md, 0, sizeof(*md)); cl_echo_object_find() 977 md->lsm = lsm; cl_echo_object_find() 978 conf->eoc_cl.u.coc_md = md; cl_echo_object_find() 1340 CERROR("Cannot allocate md: rc = %d\n", rc); echo_create_object()
|
/linux-4.4.14/drivers/media/tuners/ |
H A D | mt2063.c | 693 * ma mb mc md 723 u32 ma, mb, mc, md, me, mf; IsSpurInBand() local 747 md = (n * ((f_LO1 + hgds) / gd_Scale) - IsSpurInBand() 751 if (md >= pAS_Info->maxH1) IsSpurInBand() 758 if (md == ma) IsSpurInBand() 763 if (mc != md) { IsSpurInBand()
|
/linux-4.4.14/include/net/ |
H A D | ip_tunnels.h | 280 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
|
/linux-4.4.14/include/uapi/linux/ |
H A D | capability.h | 248 /* Allow administration of md devices (mostly the above, but some
|
/linux-4.4.14/drivers/media/platform/exynos4-is/ |
H A D | media-dev.c | 1343 strlcpy(v4l2_dev->name, "s5p-fimc-md", sizeof(v4l2_dev->name)); fimc_md_probe() 1455 { .name = "s5p-fimc-md" }, 1471 .name = "s5p-fimc-md",
|
/linux-4.4.14/net/wireless/ |
H A D | trace.h | 1877 __field(u16, md) 1883 __entry->md = ftie->md; 1886 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", md: 0x%x", 1887 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->md)
|
/linux-4.4.14/drivers/net/wireless/ |
H A D | mac80211_hwsim.c | 899 struct mac80211_hwsim_addr_match_data *md = data; mac80211_hwsim_addr_iter() local 901 if (memcmp(mac, md->addr, ETH_ALEN) == 0) mac80211_hwsim_addr_iter() 902 md->ret = true; mac80211_hwsim_addr_iter() 908 struct mac80211_hwsim_addr_match_data md = { mac80211_hwsim_addr_match() local 915 memcpy(md.addr, addr, ETH_ALEN); mac80211_hwsim_addr_match() 920 &md); mac80211_hwsim_addr_match() 922 return md.ret; mac80211_hwsim_addr_match()
|
/linux-4.4.14/kernel/debug/kdb/ |
H A D | kdb_main.c | 156 "MDCOUNT=8", /* lines of md output */ 1031 * e.g., md1c20 should match md. 1465 * kdb_md - This function implements the 'md', 'md1', 'md2', 'md4', 1468 * md|mds [<addr arg> [<line count> [<radix>]]] 1572 /* Assume 'md <addr>' and start with environment values */ kdb_md() 1597 } else if (strcmp(argv[0], "md") == 0) kdb_md() 2813 kdb_register_flags("md", kdb_md, "<vaddr>", kdb_inittab()
|
/linux-4.4.14/drivers/staging/lustre/lustre/lmv/ |
H A D | lmv_obd.c | 2559 struct lustre_md *md) lmv_get_lustre_md() 2563 return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md); lmv_get_lustre_md() 2566 static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md) lmv_free_lustre_md() argument 2571 if (md->mea) lmv_free_lustre_md() 2572 obd_free_memmd(exp, (void *)&md->mea); lmv_free_lustre_md() 2573 return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md); lmv_free_lustre_md() 2555 lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req, struct obd_export *dt_exp, struct obd_export *md_exp, struct lustre_md *md) lmv_get_lustre_md() argument
|
/linux-4.4.14/lib/ |
H A D | inflate.c | 601 unsigned ml, md; /* masks for bl and bd bits */ inflate_codes() local 613 md = mask_bits[bd]; inflate_codes() 649 if ((e = (t = td + ((unsigned)b & md))->e) > 16) inflate_codes()
|
/linux-4.4.14/sound/oss/dmasound/ |
H A D | dmasound_core.c | 926 static int shared_resources_are_mine(fmode_t md) shared_resources_are_mine() argument 929 return (shared_resource_owner & md) != 0; shared_resources_are_mine() 931 shared_resource_owner = md ; shared_resources_are_mine()
|
/linux-4.4.14/drivers/staging/rtl8723au/include/ |
H A D | rtl8723a_hal.h | 438 u32 md:1; member in struct:rxreport_8723a
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/ |
H A D | lclient.h | 353 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
|
H A D | obd_class.h | 1591 struct lustre_md *md) md_get_lustre_md() 1595 return MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md); md_get_lustre_md() 1599 struct lustre_md *md) md_free_lustre_md() 1603 return MDP(exp->exp_obd, free_lustre_md)(exp, md); md_free_lustre_md() 1587 md_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req, struct obd_export *dt_exp, struct obd_export *md_exp, struct lustre_md *md) md_get_lustre_md() argument 1598 md_free_lustre_md(struct obd_export *exp, struct lustre_md *md) md_free_lustre_md() argument
|
H A D | lustre_lib.h | 107 * For md echo client
|
H A D | obd.h | 166 void lov_stripe_lock(struct lov_stripe_md *md); 167 void lov_stripe_unlock(struct lov_stripe_md *md);
|
H A D | lu_object.h | 975 * Thread on md server
|
/linux-4.4.14/net/core/ |
H A D | filter.c | 1593 struct metadata_dst *md = this_cpu_ptr(md_dst); bpf_skb_set_tunnel_key() local 1600 dst_hold((struct dst_entry *) md); bpf_skb_set_tunnel_key() 1601 skb_dst_set(skb, (struct dst_entry *) md); bpf_skb_set_tunnel_key() 1603 info = &md->u.tun_info; bpf_skb_set_tunnel_key()
|
/linux-4.4.14/drivers/isdn/hardware/eicon/ |
H A D | io.c | 196 DBG_FTL(("md 0x%08x|%08x resvd 0x%08x class 0x%08x", dump_trap_frame()
|
/linux-4.4.14/arch/arm64/mm/ |
H A D | mmu.c | 245 * mapping specified by 'md'.
|
/linux-4.4.14/drivers/dma/ |
H A D | imx-sdma.c | 246 * @md: burst dma data register 264 u32 md; member in struct:sdma_context_data
|
/linux-4.4.14/drivers/scsi/ibmvscsi/ |
H A D | ibmvfc.c | 1304 * @md: memory descriptor list to initialize 1307 struct srp_direct_buf *md) ibmvfc_map_sg_list() 1313 md[i].va = cpu_to_be64(sg_dma_address(sg)); scsi_for_each_sg() 1314 md[i].len = cpu_to_be32(sg_dma_len(sg)); scsi_for_each_sg() 1315 md[i].key = 0; scsi_for_each_sg() 1306 ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg, struct srp_direct_buf *md) ibmvfc_map_sg_list() argument
|
/linux-4.4.14/drivers/md/bcache/ |
H A D | bcache.h | 11 * like a md raid array and its component devices. Most of the code doesn't care
|
/linux-4.4.14/drivers/media/i2c/cx25840/ |
H A D | cx25840-ir.c | 6 * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
|
/linux-4.4.14/block/ |
H A D | blk-settings.c | 147 * such as md or lvm) do not benefit from the processing on the
|