Searched refs:bdev (Results 1 - 200 of 344) sorted by relevance

12

/linux-4.1.27/drivers/staging/media/bcm2048/
H A Dradio-bcm2048.c358 static int bcm2048_send_command(struct bcm2048_device *bdev, unsigned int reg, bcm2048_send_command() argument
361 struct i2c_client *client = bdev->client; bcm2048_send_command()
364 if (!bdev->power_state) { bcm2048_send_command()
365 dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n"); bcm2048_send_command()
375 dev_err(&bdev->client->dev, "BCM I2C error!\n"); bcm2048_send_command()
376 dev_err(&bdev->client->dev, "Is Bluetooth up and running?\n"); bcm2048_send_command()
380 static int bcm2048_recv_command(struct bcm2048_device *bdev, unsigned int reg, bcm2048_recv_command() argument
383 struct i2c_client *client = bdev->client; bcm2048_recv_command()
385 if (!bdev->power_state) { bcm2048_recv_command()
386 dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n"); bcm2048_recv_command()
395 static int bcm2048_recv_duples(struct bcm2048_device *bdev, unsigned int reg, bcm2048_recv_duples() argument
398 struct i2c_client *client = bdev->client; bcm2048_recv_duples()
403 if (!bdev->power_state) { bcm2048_recv_duples()
404 dev_err(&bdev->client->dev, "bcm2048: chip not powered!\n"); bcm2048_recv_duples()
427 static int bcm2048_set_power_state(struct bcm2048_device *bdev, u8 power) bcm2048_set_power_state() argument
431 mutex_lock(&bdev->mutex); bcm2048_set_power_state()
434 bdev->power_state = BCM2048_POWER_ON; bcm2048_set_power_state()
435 bdev->cache_fm_rds_system |= BCM2048_FM_ON; bcm2048_set_power_state()
437 bdev->cache_fm_rds_system &= ~BCM2048_FM_ON; bcm2048_set_power_state()
446 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, bcm2048_set_power_state()
447 bdev->cache_fm_rds_system); bcm2048_set_power_state()
451 bdev->power_state = BCM2048_POWER_OFF; bcm2048_set_power_state()
453 mutex_unlock(&bdev->mutex); bcm2048_set_power_state()
457 static int bcm2048_get_power_state(struct bcm2048_device *bdev) bcm2048_get_power_state() argument
462 mutex_lock(&bdev->mutex); bcm2048_get_power_state()
464 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value); bcm2048_get_power_state()
466 mutex_unlock(&bdev->mutex); bcm2048_get_power_state()
474 static int bcm2048_set_rds_no_lock(struct bcm2048_device *bdev, u8 rds_on) bcm2048_set_rds_no_lock() argument
479 bdev->cache_fm_rds_system &= ~BCM2048_RDS_ON; bcm2048_set_rds_no_lock()
482 bdev->cache_fm_rds_system |= BCM2048_RDS_ON; bcm2048_set_rds_no_lock()
483 bdev->rds_state = BCM2048_RDS_ON; bcm2048_set_rds_no_lock()
485 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1, bcm2048_set_rds_no_lock()
489 bdev->rds_state = 0; bcm2048_set_rds_no_lock()
490 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1, bcm2048_set_rds_no_lock()
492 memset(&bdev->rds_info, 0, sizeof(bdev->rds_info)); bcm2048_set_rds_no_lock()
495 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, bcm2048_set_rds_no_lock()
496 bdev->cache_fm_rds_system); bcm2048_set_rds_no_lock()
501 static int bcm2048_get_rds_no_lock(struct bcm2048_device *bdev) bcm2048_get_rds_no_lock() argument
506 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_SYSTEM, &value); bcm2048_get_rds_no_lock()
514 static int bcm2048_set_rds(struct bcm2048_device *bdev, u8 rds_on) bcm2048_set_rds() argument
518 mutex_lock(&bdev->mutex); bcm2048_set_rds()
520 err = bcm2048_set_rds_no_lock(bdev, rds_on); bcm2048_set_rds()
522 mutex_unlock(&bdev->mutex); bcm2048_set_rds()
526 static int bcm2048_get_rds(struct bcm2048_device *bdev) bcm2048_get_rds() argument
530 mutex_lock(&bdev->mutex); bcm2048_get_rds()
532 err = bcm2048_get_rds_no_lock(bdev); bcm2048_get_rds()
534 mutex_unlock(&bdev->mutex); bcm2048_get_rds()
538 static int bcm2048_get_rds_pi(struct bcm2048_device *bdev) bcm2048_get_rds_pi() argument
540 return bdev->rds_info.rds_pi; bcm2048_get_rds_pi()
543 static int bcm2048_set_fm_automatic_stereo_mono(struct bcm2048_device *bdev, bcm2048_set_fm_automatic_stereo_mono() argument
548 mutex_lock(&bdev->mutex); bcm2048_set_fm_automatic_stereo_mono()
550 bdev->cache_fm_ctrl &= ~BCM2048_STEREO_MONO_AUTO_SELECT; bcm2048_set_fm_automatic_stereo_mono()
553 bdev->cache_fm_ctrl |= BCM2048_STEREO_MONO_AUTO_SELECT; bcm2048_set_fm_automatic_stereo_mono()
555 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL, bcm2048_set_fm_automatic_stereo_mono()
556 bdev->cache_fm_ctrl); bcm2048_set_fm_automatic_stereo_mono()
558 mutex_unlock(&bdev->mutex); bcm2048_set_fm_automatic_stereo_mono()
562 static int bcm2048_set_fm_hi_lo_injection(struct bcm2048_device *bdev, bcm2048_set_fm_hi_lo_injection() argument
567 mutex_lock(&bdev->mutex); bcm2048_set_fm_hi_lo_injection()
569 bdev->cache_fm_ctrl &= ~BCM2048_HI_LO_INJECTION; bcm2048_set_fm_hi_lo_injection()
572 bdev->cache_fm_ctrl |= BCM2048_HI_LO_INJECTION; bcm2048_set_fm_hi_lo_injection()
574 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_CTRL, bcm2048_set_fm_hi_lo_injection()
575 bdev->cache_fm_ctrl); bcm2048_set_fm_hi_lo_injection()
577 mutex_unlock(&bdev->mutex); bcm2048_set_fm_hi_lo_injection()
581 static int bcm2048_get_fm_hi_lo_injection(struct bcm2048_device *bdev) bcm2048_get_fm_hi_lo_injection() argument
586 mutex_lock(&bdev->mutex); bcm2048_get_fm_hi_lo_injection()
588 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CTRL, &value); bcm2048_get_fm_hi_lo_injection()
590 mutex_unlock(&bdev->mutex); bcm2048_get_fm_hi_lo_injection()
598 static int bcm2048_set_fm_frequency(struct bcm2048_device *bdev, u32 frequency) bcm2048_set_fm_frequency() argument
602 if (frequency < bdev->region_info.bottom_frequency || bcm2048_set_fm_frequency()
603 frequency > bdev->region_info.top_frequency) bcm2048_set_fm_frequency()
608 mutex_lock(&bdev->mutex); bcm2048_set_fm_frequency()
610 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ0, lsb(frequency)); bcm2048_set_fm_frequency()
611 err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_FREQ1, bcm2048_set_fm_frequency()
615 bdev->frequency = frequency; bcm2048_set_fm_frequency()
617 mutex_unlock(&bdev->mutex); bcm2048_set_fm_frequency()
621 static int bcm2048_get_fm_frequency(struct bcm2048_device *bdev) bcm2048_get_fm_frequency() argument
626 mutex_lock(&bdev->mutex); bcm2048_get_fm_frequency()
628 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ0, &lsb); bcm2048_get_fm_frequency()
629 err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_FREQ1, &msb); bcm2048_get_fm_frequency()
631 mutex_unlock(&bdev->mutex); bcm2048_get_fm_frequency()
642 static int bcm2048_set_fm_af_frequency(struct bcm2048_device *bdev, bcm2048_set_fm_af_frequency() argument
647 if (frequency < bdev->region_info.bottom_frequency || bcm2048_set_fm_af_frequency()
648 frequency > bdev->region_info.top_frequency) bcm2048_set_fm_af_frequency()
653 mutex_lock(&bdev->mutex); bcm2048_set_fm_af_frequency()
655 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ0, bcm2048_set_fm_af_frequency()
657 err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_AF_FREQ1, bcm2048_set_fm_af_frequency()
660 bdev->frequency = frequency; bcm2048_set_fm_af_frequency()
662 mutex_unlock(&bdev->mutex); bcm2048_set_fm_af_frequency()
666 static int bcm2048_get_fm_af_frequency(struct bcm2048_device *bdev) bcm2048_get_fm_af_frequency() argument
671 mutex_lock(&bdev->mutex); bcm2048_get_fm_af_frequency()
673 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ0, &lsb); bcm2048_get_fm_af_frequency()
674 err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_AF_FREQ1, &msb); bcm2048_get_fm_af_frequency()
676 mutex_unlock(&bdev->mutex); bcm2048_get_fm_af_frequency()
687 static int bcm2048_set_fm_deemphasis(struct bcm2048_device *bdev, int d) bcm2048_set_fm_deemphasis() argument
697 mutex_lock(&bdev->mutex); bcm2048_set_fm_deemphasis()
699 bdev->cache_fm_audio_ctrl0 &= ~BCM2048_DE_EMPHASIS_SELECT; bcm2048_set_fm_deemphasis()
700 bdev->cache_fm_audio_ctrl0 |= deemphasis; bcm2048_set_fm_deemphasis()
702 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, bcm2048_set_fm_deemphasis()
703 bdev->cache_fm_audio_ctrl0); bcm2048_set_fm_deemphasis()
706 bdev->region_info.deemphasis = d; bcm2048_set_fm_deemphasis()
708 mutex_unlock(&bdev->mutex); bcm2048_set_fm_deemphasis()
713 static int bcm2048_get_fm_deemphasis(struct bcm2048_device *bdev) bcm2048_get_fm_deemphasis() argument
718 mutex_lock(&bdev->mutex); bcm2048_get_fm_deemphasis()
720 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value); bcm2048_get_fm_deemphasis()
722 mutex_unlock(&bdev->mutex); bcm2048_get_fm_deemphasis()
734 static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region) bcm2048_set_region() argument
742 mutex_lock(&bdev->mutex); bcm2048_set_region()
743 bdev->region_info = region_configs[region]; bcm2048_set_region()
744 mutex_unlock(&bdev->mutex); bcm2048_set_region()
746 if (bdev->frequency < region_configs[region].bottom_frequency || bcm2048_set_region()
747 bdev->frequency > region_configs[region].top_frequency) bcm2048_set_region()
751 err = bcm2048_set_fm_frequency(bdev, new_frequency); bcm2048_set_region()
757 err = bcm2048_set_fm_deemphasis(bdev, bcm2048_set_region()
764 static int bcm2048_get_region(struct bcm2048_device *bdev) bcm2048_get_region() argument
768 mutex_lock(&bdev->mutex); bcm2048_get_region()
769 err = bdev->region_info.region; bcm2048_get_region()
770 mutex_unlock(&bdev->mutex); bcm2048_get_region()
775 static int bcm2048_set_mute(struct bcm2048_device *bdev, u16 mute) bcm2048_set_mute() argument
779 mutex_lock(&bdev->mutex); bcm2048_set_mute()
781 bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_RF_MUTE | BCM2048_MANUAL_MUTE); bcm2048_set_mute()
784 bdev->cache_fm_audio_ctrl0 |= (BCM2048_RF_MUTE | bcm2048_set_mute()
787 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, bcm2048_set_mute()
788 bdev->cache_fm_audio_ctrl0); bcm2048_set_mute()
791 bdev->mute_state = mute; bcm2048_set_mute()
793 mutex_unlock(&bdev->mutex); bcm2048_set_mute()
797 static int bcm2048_get_mute(struct bcm2048_device *bdev) bcm2048_get_mute() argument
802 mutex_lock(&bdev->mutex); bcm2048_get_mute()
804 if (bdev->power_state) { bcm2048_get_mute()
805 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, bcm2048_get_mute()
810 err = bdev->mute_state; bcm2048_get_mute()
813 mutex_unlock(&bdev->mutex); bcm2048_get_mute()
817 static int bcm2048_set_audio_route(struct bcm2048_device *bdev, u8 route) bcm2048_set_audio_route() argument
821 mutex_lock(&bdev->mutex); bcm2048_set_audio_route()
824 bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_AUDIO_ROUTE_DAC | bcm2048_set_audio_route()
826 bdev->cache_fm_audio_ctrl0 |= route; bcm2048_set_audio_route()
828 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, bcm2048_set_audio_route()
829 bdev->cache_fm_audio_ctrl0); bcm2048_set_audio_route()
831 mutex_unlock(&bdev->mutex); bcm2048_set_audio_route()
835 static int bcm2048_get_audio_route(struct bcm2048_device *bdev) bcm2048_get_audio_route() argument
840 mutex_lock(&bdev->mutex); bcm2048_get_audio_route()
842 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value); bcm2048_get_audio_route()
844 mutex_unlock(&bdev->mutex); bcm2048_get_audio_route()
853 static int bcm2048_set_dac_output(struct bcm2048_device *bdev, u8 channels) bcm2048_set_dac_output() argument
857 mutex_lock(&bdev->mutex); bcm2048_set_dac_output()
859 bdev->cache_fm_audio_ctrl0 &= ~(BCM2048_DAC_OUTPUT_LEFT | bcm2048_set_dac_output()
861 bdev->cache_fm_audio_ctrl0 |= channels; bcm2048_set_dac_output()
863 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, bcm2048_set_dac_output()
864 bdev->cache_fm_audio_ctrl0); bcm2048_set_dac_output()
866 mutex_unlock(&bdev->mutex); bcm2048_set_dac_output()
870 static int bcm2048_get_dac_output(struct bcm2048_device *bdev) bcm2048_get_dac_output() argument
875 mutex_lock(&bdev->mutex); bcm2048_get_dac_output()
877 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_AUDIO_CTRL0, &value); bcm2048_get_dac_output()
879 mutex_unlock(&bdev->mutex); bcm2048_get_dac_output()
888 static int bcm2048_set_fm_search_rssi_threshold(struct bcm2048_device *bdev, bcm2048_set_fm_search_rssi_threshold() argument
893 mutex_lock(&bdev->mutex); bcm2048_set_fm_search_rssi_threshold()
896 bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_RSSI_THRESHOLD; bcm2048_set_fm_search_rssi_threshold()
897 bdev->cache_fm_search_ctrl0 |= threshold; bcm2048_set_fm_search_rssi_threshold()
899 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, bcm2048_set_fm_search_rssi_threshold()
900 bdev->cache_fm_search_ctrl0); bcm2048_set_fm_search_rssi_threshold()
902 mutex_unlock(&bdev->mutex); bcm2048_set_fm_search_rssi_threshold()
906 static int bcm2048_get_fm_search_rssi_threshold(struct bcm2048_device *bdev) bcm2048_get_fm_search_rssi_threshold() argument
911 mutex_lock(&bdev->mutex); bcm2048_get_fm_search_rssi_threshold()
913 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value); bcm2048_get_fm_search_rssi_threshold()
915 mutex_unlock(&bdev->mutex); bcm2048_get_fm_search_rssi_threshold()
923 static int bcm2048_set_fm_search_mode_direction(struct bcm2048_device *bdev, bcm2048_set_fm_search_mode_direction() argument
928 mutex_lock(&bdev->mutex); bcm2048_set_fm_search_mode_direction()
930 bdev->cache_fm_search_ctrl0 &= ~BCM2048_SEARCH_DIRECTION; bcm2048_set_fm_search_mode_direction()
933 bdev->cache_fm_search_ctrl0 |= BCM2048_SEARCH_DIRECTION; bcm2048_set_fm_search_mode_direction()
935 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, bcm2048_set_fm_search_mode_direction()
936 bdev->cache_fm_search_ctrl0); bcm2048_set_fm_search_mode_direction()
938 mutex_unlock(&bdev->mutex); bcm2048_set_fm_search_mode_direction()
942 static int bcm2048_get_fm_search_mode_direction(struct bcm2048_device *bdev) bcm2048_get_fm_search_mode_direction() argument
947 mutex_lock(&bdev->mutex); bcm2048_get_fm_search_mode_direction()
949 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_CTRL0, &value); bcm2048_get_fm_search_mode_direction()
951 mutex_unlock(&bdev->mutex); bcm2048_get_fm_search_mode_direction()
959 static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev, bcm2048_set_fm_search_tune_mode() argument
970 mutex_lock(&bdev->mutex); bcm2048_set_fm_search_tune_mode()
979 if (bcm2048_get_rds_no_lock(bdev)) { bcm2048_set_fm_search_tune_mode()
980 err = bcm2048_set_rds_no_lock(bdev, 0); bcm2048_set_fm_search_tune_mode()
986 err = bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK0, flags); bcm2048_set_fm_search_tune_mode()
991 bcm2048_send_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, value); bcm2048_set_fm_search_tune_mode()
998 if (!wait_for_completion_timeout(&bdev->compl, bcm2048_set_fm_search_tune_mode()
1000 dev_err(&bdev->client->dev, "IRQ timeout.\n"); bcm2048_set_fm_search_tune_mode()
1003 if (!bdev->scan_state) bcm2048_set_fm_search_tune_mode()
1008 err |= bcm2048_set_rds_no_lock(bdev, 1); bcm2048_set_fm_search_tune_mode()
1010 mutex_unlock(&bdev->mutex); bcm2048_set_fm_search_tune_mode()
1015 static int bcm2048_get_fm_search_tune_mode(struct bcm2048_device *bdev) bcm2048_get_fm_search_tune_mode() argument
1020 mutex_lock(&bdev->mutex); bcm2048_get_fm_search_tune_mode()
1022 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_SEARCH_TUNE_MODE, bcm2048_get_fm_search_tune_mode()
1025 mutex_unlock(&bdev->mutex); bcm2048_get_fm_search_tune_mode()
1033 static int bcm2048_set_rds_b_block_mask(struct bcm2048_device *bdev, u16 mask) bcm2048_set_rds_b_block_mask() argument
1037 mutex_lock(&bdev->mutex); bcm2048_set_rds_b_block_mask()
1039 err = bcm2048_send_command(bdev, bcm2048_set_rds_b_block_mask()
1041 err |= bcm2048_send_command(bdev, bcm2048_set_rds_b_block_mask()
1044 mutex_unlock(&bdev->mutex); bcm2048_set_rds_b_block_mask()
1048 static int bcm2048_get_rds_b_block_mask(struct bcm2048_device *bdev) bcm2048_get_rds_b_block_mask() argument
1053 mutex_lock(&bdev->mutex); bcm2048_get_rds_b_block_mask()
1055 err = bcm2048_recv_command(bdev, bcm2048_get_rds_b_block_mask()
1057 err |= bcm2048_recv_command(bdev, bcm2048_get_rds_b_block_mask()
1060 mutex_unlock(&bdev->mutex); bcm2048_get_rds_b_block_mask()
1068 static int bcm2048_set_rds_b_block_match(struct bcm2048_device *bdev, bcm2048_set_rds_b_block_match() argument
1073 mutex_lock(&bdev->mutex); bcm2048_set_rds_b_block_match()
1075 err = bcm2048_send_command(bdev, bcm2048_set_rds_b_block_match()
1077 err |= bcm2048_send_command(bdev, bcm2048_set_rds_b_block_match()
1080 mutex_unlock(&bdev->mutex); bcm2048_set_rds_b_block_match()
1084 static int bcm2048_get_rds_b_block_match(struct bcm2048_device *bdev) bcm2048_get_rds_b_block_match() argument
1089 mutex_lock(&bdev->mutex); bcm2048_get_rds_b_block_match()
1091 err = bcm2048_recv_command(bdev, bcm2048_get_rds_b_block_match()
1093 err |= bcm2048_recv_command(bdev, bcm2048_get_rds_b_block_match()
1096 mutex_unlock(&bdev->mutex); bcm2048_get_rds_b_block_match()
1104 static int bcm2048_set_rds_pi_mask(struct bcm2048_device *bdev, u16 mask) bcm2048_set_rds_pi_mask() argument
1108 mutex_lock(&bdev->mutex); bcm2048_set_rds_pi_mask()
1110 err = bcm2048_send_command(bdev, bcm2048_set_rds_pi_mask()
1112 err |= bcm2048_send_command(bdev, bcm2048_set_rds_pi_mask()
1115 mutex_unlock(&bdev->mutex); bcm2048_set_rds_pi_mask()
1119 static int bcm2048_get_rds_pi_mask(struct bcm2048_device *bdev) bcm2048_get_rds_pi_mask() argument
1124 mutex_lock(&bdev->mutex); bcm2048_get_rds_pi_mask()
1126 err = bcm2048_recv_command(bdev, bcm2048_get_rds_pi_mask()
1128 err |= bcm2048_recv_command(bdev, bcm2048_get_rds_pi_mask()
1131 mutex_unlock(&bdev->mutex); bcm2048_get_rds_pi_mask()
1139 static int bcm2048_set_rds_pi_match(struct bcm2048_device *bdev, u16 match) bcm2048_set_rds_pi_match() argument
1143 mutex_lock(&bdev->mutex); bcm2048_set_rds_pi_match()
1145 err = bcm2048_send_command(bdev, bcm2048_set_rds_pi_match()
1147 err |= bcm2048_send_command(bdev, bcm2048_set_rds_pi_match()
1150 mutex_unlock(&bdev->mutex); bcm2048_set_rds_pi_match()
1154 static int bcm2048_get_rds_pi_match(struct bcm2048_device *bdev) bcm2048_get_rds_pi_match() argument
1159 mutex_lock(&bdev->mutex); bcm2048_get_rds_pi_match()
1161 err = bcm2048_recv_command(bdev, bcm2048_get_rds_pi_match()
1163 err |= bcm2048_recv_command(bdev, bcm2048_get_rds_pi_match()
1166 mutex_unlock(&bdev->mutex); bcm2048_get_rds_pi_match()
1174 static int bcm2048_set_fm_rds_mask(struct bcm2048_device *bdev, u16 mask) bcm2048_set_fm_rds_mask() argument
1178 mutex_lock(&bdev->mutex); bcm2048_set_fm_rds_mask()
1180 err = bcm2048_send_command(bdev, bcm2048_set_fm_rds_mask()
1182 err |= bcm2048_send_command(bdev, bcm2048_set_fm_rds_mask()
1185 mutex_unlock(&bdev->mutex); bcm2048_set_fm_rds_mask()
1189 static int bcm2048_get_fm_rds_mask(struct bcm2048_device *bdev) bcm2048_get_fm_rds_mask() argument
1194 mutex_lock(&bdev->mutex); bcm2048_get_fm_rds_mask()
1196 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK0, &value0); bcm2048_get_fm_rds_mask()
1197 err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_MASK1, &value1); bcm2048_get_fm_rds_mask()
1199 mutex_unlock(&bdev->mutex); bcm2048_get_fm_rds_mask()
1207 static int bcm2048_get_fm_rds_flags(struct bcm2048_device *bdev) bcm2048_get_fm_rds_flags() argument
1212 mutex_lock(&bdev->mutex); bcm2048_get_fm_rds_flags()
1214 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &value0); bcm2048_get_fm_rds_flags()
1215 err |= bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &value1); bcm2048_get_fm_rds_flags()
1217 mutex_unlock(&bdev->mutex); bcm2048_get_fm_rds_flags()
1225 static int bcm2048_get_region_bottom_frequency(struct bcm2048_device *bdev) bcm2048_get_region_bottom_frequency() argument
1227 return bdev->region_info.bottom_frequency; bcm2048_get_region_bottom_frequency()
1230 static int bcm2048_get_region_top_frequency(struct bcm2048_device *bdev) bcm2048_get_region_top_frequency() argument
1232 return bdev->region_info.top_frequency; bcm2048_get_region_top_frequency()
1235 static int bcm2048_set_fm_best_tune_mode(struct bcm2048_device *bdev, u8 mode) bcm2048_set_fm_best_tune_mode() argument
1240 mutex_lock(&bdev->mutex); bcm2048_set_fm_best_tune_mode()
1243 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE, bcm2048_set_fm_best_tune_mode()
1249 err |= bcm2048_send_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE, bcm2048_set_fm_best_tune_mode()
1252 mutex_unlock(&bdev->mutex); bcm2048_set_fm_best_tune_mode()
1256 static int bcm2048_get_fm_best_tune_mode(struct bcm2048_device *bdev) bcm2048_get_fm_best_tune_mode() argument
1261 mutex_lock(&bdev->mutex); bcm2048_get_fm_best_tune_mode()
1263 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_BEST_TUNE_MODE, bcm2048_get_fm_best_tune_mode()
1266 mutex_unlock(&bdev->mutex); bcm2048_get_fm_best_tune_mode()
1274 static int bcm2048_get_fm_carrier_error(struct bcm2048_device *bdev) bcm2048_get_fm_carrier_error() argument
1279 mutex_lock(&bdev->mutex); bcm2048_get_fm_carrier_error()
1280 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_CARRIER, &value); bcm2048_get_fm_carrier_error()
1281 mutex_unlock(&bdev->mutex); bcm2048_get_fm_carrier_error()
1289 static int bcm2048_get_fm_rssi(struct bcm2048_device *bdev) bcm2048_get_fm_rssi() argument
1294 mutex_lock(&bdev->mutex); bcm2048_get_fm_rssi()
1295 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RSSI, &value); bcm2048_get_fm_rssi()
1296 mutex_unlock(&bdev->mutex); bcm2048_get_fm_rssi()
1304 static int bcm2048_set_rds_wline(struct bcm2048_device *bdev, u8 wline) bcm2048_set_rds_wline() argument
1308 mutex_lock(&bdev->mutex); bcm2048_set_rds_wline()
1310 err = bcm2048_send_command(bdev, BCM2048_I2C_RDS_WLINE, wline); bcm2048_set_rds_wline()
1313 bdev->fifo_size = wline; bcm2048_set_rds_wline()
1315 mutex_unlock(&bdev->mutex); bcm2048_set_rds_wline()
1319 static int bcm2048_get_rds_wline(struct bcm2048_device *bdev) bcm2048_get_rds_wline() argument
1324 mutex_lock(&bdev->mutex); bcm2048_get_rds_wline()
1326 err = bcm2048_recv_command(bdev, BCM2048_I2C_RDS_WLINE, &value); bcm2048_get_rds_wline()
1328 mutex_unlock(&bdev->mutex); bcm2048_get_rds_wline()
1331 bdev->fifo_size = value; bcm2048_get_rds_wline()
1338 static int bcm2048_checkrev(struct bcm2048_device *bdev) bcm2048_checkrev() argument
1343 mutex_lock(&bdev->mutex); bcm2048_checkrev()
1345 err = bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_REV, &version); bcm2048_checkrev()
1347 mutex_unlock(&bdev->mutex); bcm2048_checkrev()
1350 dev_info(&bdev->client->dev, "BCM2048 Version 0x%x\n", bcm2048_checkrev()
1358 static int bcm2048_get_rds_rt(struct bcm2048_device *bdev, char *data) bcm2048_get_rds_rt() argument
1363 mutex_lock(&bdev->mutex); bcm2048_get_rds_rt()
1365 if (!bdev->rds_info.text_len) { bcm2048_get_rds_rt()
1371 if (bdev->rds_info.rds_rt[i]) { bcm2048_get_rds_rt()
1374 if (bdev->rds_info.rds_rt[i] != 0x0d) { bcm2048_get_rds_rt()
1375 data_buffer[j++] = bdev->rds_info.rds_rt[i]; bcm2048_get_rds_rt()
1387 if (!bdev->rds_info.rds_rt[i]) { bcm2048_get_rds_rt()
1404 mutex_unlock(&bdev->mutex); bcm2048_get_rds_rt()
1408 static int bcm2048_get_rds_ps(struct bcm2048_device *bdev, char *data) bcm2048_get_rds_ps() argument
1413 mutex_lock(&bdev->mutex); bcm2048_get_rds_ps()
1415 if (!bdev->rds_info.text_len) { bcm2048_get_rds_ps()
1421 if (bdev->rds_info.rds_ps[i]) { bcm2048_get_rds_ps()
1422 data_buffer[j++] = bdev->rds_info.rds_ps[i]; bcm2048_get_rds_ps()
1437 mutex_unlock(&bdev->mutex); bcm2048_get_rds_ps()
1441 static void bcm2048_parse_rds_pi(struct bcm2048_device *bdev) bcm2048_parse_rds_pi() argument
1446 for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) { bcm2048_parse_rds_pi()
1449 if (bdev->rds_info.radio_text[i] == BCM2048_RDS_BLOCK_A) { bcm2048_parse_rds_pi()
1451 pi = (bdev->rds_info.radio_text[i+1] << 8) + bcm2048_parse_rds_pi()
1452 bdev->rds_info.radio_text[i+2]; bcm2048_parse_rds_pi()
1454 if (!bdev->rds_info.rds_pi) { bcm2048_parse_rds_pi()
1455 bdev->rds_info.rds_pi = pi; bcm2048_parse_rds_pi()
1458 if (pi != bdev->rds_info.rds_pi) { bcm2048_parse_rds_pi()
1461 bdev->rds_info.rds_pi = pi; bcm2048_parse_rds_pi()
1471 static int bcm2048_rds_block_crc(struct bcm2048_device *bdev, int i) bcm2048_rds_block_crc() argument
1473 return bdev->rds_info.radio_text[i] & BCM2048_RDS_CRC_MASK; bcm2048_rds_block_crc()
1476 static void bcm2048_parse_rds_rt_block(struct bcm2048_device *bdev, int i, bcm2048_parse_rds_rt_block() argument
1481 if (!bdev->rds_info.rds_rt[index]) bcm2048_parse_rds_rt_block()
1482 bdev->rds_info.rds_rt[index] = bcm2048_parse_rds_rt_block()
1483 bdev->rds_info.radio_text[i+1]; bcm2048_parse_rds_rt_block()
1484 if (!bdev->rds_info.rds_rt[index+1]) bcm2048_parse_rds_rt_block()
1485 bdev->rds_info.rds_rt[index+1] = bcm2048_parse_rds_rt_block()
1486 bdev->rds_info.radio_text[i+2]; bcm2048_parse_rds_rt_block()
1488 bdev->rds_info.rds_rt[index] = bdev->rds_info.radio_text[i+1]; bcm2048_parse_rds_rt_block()
1489 bdev->rds_info.rds_rt[index+1] = bcm2048_parse_rds_rt_block()
1490 bdev->rds_info.radio_text[i+2]; bcm2048_parse_rds_rt_block()
1494 static int bcm2048_parse_rt_match_b(struct bcm2048_device *bdev, int i) bcm2048_parse_rt_match_b() argument
1498 crc = bcm2048_rds_block_crc(bdev, i); bcm2048_parse_rt_match_b()
1503 if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == bcm2048_parse_rt_match_b()
1506 rt_id = bdev->rds_info.radio_text[i+1] & bcm2048_parse_rt_match_b()
1508 rt_group_b = bdev->rds_info.radio_text[i+1] & bcm2048_parse_rt_match_b()
1510 rt_ab = bdev->rds_info.radio_text[i+2] & bcm2048_parse_rt_match_b()
1513 if (rt_group_b != bdev->rds_info.rds_rt_group_b) { bcm2048_parse_rt_match_b()
1514 memset(bdev->rds_info.rds_rt, 0, bcm2048_parse_rt_match_b()
1515 sizeof(bdev->rds_info.rds_rt)); bcm2048_parse_rt_match_b()
1516 bdev->rds_info.rds_rt_group_b = rt_group_b; bcm2048_parse_rt_match_b()
1521 if (rt_ab != bdev->rds_info.rds_rt_ab) { bcm2048_parse_rt_match_b()
1522 memset(bdev->rds_info.rds_rt, 0, bcm2048_parse_rt_match_b()
1523 sizeof(bdev->rds_info.rds_rt)); bcm2048_parse_rt_match_b()
1524 bdev->rds_info.rds_rt_ab = rt_ab; bcm2048_parse_rt_match_b()
1527 index = bdev->rds_info.radio_text[i+2] & bcm2048_parse_rt_match_b()
1530 if (bdev->rds_info.rds_rt_group_b) bcm2048_parse_rt_match_b()
1542 static int bcm2048_parse_rt_match_c(struct bcm2048_device *bdev, int i, bcm2048_parse_rt_match_c() argument
1547 crc = bcm2048_rds_block_crc(bdev, i); bcm2048_parse_rt_match_c()
1554 if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == bcm2048_parse_rt_match_c()
1556 if (bdev->rds_info.rds_rt_group_b) bcm2048_parse_rt_match_c()
1558 bcm2048_parse_rds_rt_block(bdev, i, index, crc); bcm2048_parse_rt_match_c()
1565 static void bcm2048_parse_rt_match_d(struct bcm2048_device *bdev, int i, bcm2048_parse_rt_match_d() argument
1570 crc = bcm2048_rds_block_crc(bdev, i); bcm2048_parse_rt_match_d()
1577 if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == bcm2048_parse_rt_match_d()
1579 bcm2048_parse_rds_rt_block(bdev, i, index+2, crc); bcm2048_parse_rt_match_d()
1582 static void bcm2048_parse_rds_rt(struct bcm2048_device *bdev) bcm2048_parse_rds_rt() argument
1586 for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) { bcm2048_parse_rds_rt()
1590 index = bcm2048_parse_rt_match_b(bdev, i); bcm2048_parse_rds_rt()
1596 if (bcm2048_parse_rt_match_c(bdev, i, index)) bcm2048_parse_rds_rt()
1601 bcm2048_parse_rt_match_d(bdev, i, index); bcm2048_parse_rds_rt()
1606 if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) bcm2048_parse_rds_rt()
1608 crc = bcm2048_rds_block_crc(bdev, i); bcm2048_parse_rds_rt()
1612 if (((bdev->rds_info.radio_text[i+1] << 8) + bcm2048_parse_rds_rt()
1613 bdev->rds_info.radio_text[i+2]) == bcm2048_parse_rds_rt()
1614 bdev->rds_info.rds_pi) bcm2048_parse_rds_rt()
1620 static void bcm2048_parse_rds_ps_block(struct bcm2048_device *bdev, int i, bcm2048_parse_rds_ps_block() argument
1625 if (!bdev->rds_info.rds_ps[index]) bcm2048_parse_rds_ps_block()
1626 bdev->rds_info.rds_ps[index] = bcm2048_parse_rds_ps_block()
1627 bdev->rds_info.radio_text[i+1]; bcm2048_parse_rds_ps_block()
1628 if (!bdev->rds_info.rds_ps[index+1]) bcm2048_parse_rds_ps_block()
1629 bdev->rds_info.rds_ps[index+1] = bcm2048_parse_rds_ps_block()
1630 bdev->rds_info.radio_text[i+2]; bcm2048_parse_rds_ps_block()
1632 bdev->rds_info.rds_ps[index] = bdev->rds_info.radio_text[i+1]; bcm2048_parse_rds_ps_block()
1633 bdev->rds_info.rds_ps[index+1] = bcm2048_parse_rds_ps_block()
1634 bdev->rds_info.radio_text[i+2]; bcm2048_parse_rds_ps_block()
1638 static int bcm2048_parse_ps_match_c(struct bcm2048_device *bdev, int i, bcm2048_parse_ps_match_c() argument
1643 crc = bcm2048_rds_block_crc(bdev, i); bcm2048_parse_ps_match_c()
1648 if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == bcm2048_parse_ps_match_c()
1655 static void bcm2048_parse_ps_match_d(struct bcm2048_device *bdev, int i, bcm2048_parse_ps_match_d() argument
1660 crc = bcm2048_rds_block_crc(bdev, i); bcm2048_parse_ps_match_d()
1665 if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == bcm2048_parse_ps_match_d()
1667 bcm2048_parse_rds_ps_block(bdev, i, index, crc); bcm2048_parse_ps_match_d()
1670 static int bcm2048_parse_ps_match_b(struct bcm2048_device *bdev, int i) bcm2048_parse_ps_match_b() argument
1674 crc = bcm2048_rds_block_crc(bdev, i); bcm2048_parse_ps_match_b()
1680 if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) == bcm2048_parse_ps_match_b()
1682 ps_id = bdev->rds_info.radio_text[i+1] & bcm2048_parse_ps_match_b()
1684 ps_group = bdev->rds_info.radio_text[i+1] & bcm2048_parse_ps_match_b()
1691 if (ps_group != bdev->rds_info.rds_ps_group) { bcm2048_parse_ps_match_b()
1693 bdev->rds_info.rds_ps_group_cnt++; bcm2048_parse_ps_match_b()
1694 if (bdev->rds_info.rds_ps_group_cnt > 2) { bcm2048_parse_ps_match_b()
1695 bdev->rds_info.rds_ps_group = ps_group; bcm2048_parse_ps_match_b()
1696 bdev->rds_info.rds_ps_group_cnt = 0; bcm2048_parse_ps_match_b()
1697 dev_err(&bdev->client->dev, bcm2048_parse_ps_match_b()
1703 bdev->rds_info.rds_ps_group_cnt = 0; bcm2048_parse_ps_match_b()
1708 index = bdev->rds_info.radio_text[i+2] & bcm2048_parse_ps_match_b()
1718 static void bcm2048_parse_rds_ps(struct bcm2048_device *bdev) bcm2048_parse_rds_ps() argument
1722 for (i = 0; i < bdev->fifo_size; i += BCM2048_RDS_FIFO_DUPLE_SIZE) { bcm2048_parse_rds_ps()
1726 index = bcm2048_parse_ps_match_b(bdev, i); bcm2048_parse_rds_ps()
1732 if (bcm2048_parse_ps_match_c(bdev, i, index)) bcm2048_parse_rds_ps()
1737 bcm2048_parse_ps_match_d(bdev, i, index); bcm2048_parse_rds_ps()
1742 if ((bdev->rds_info.radio_text[i] & BCM2048_RDS_BLOCK_MASK) bcm2048_parse_rds_ps()
1744 crc = bcm2048_rds_block_crc(bdev, i); bcm2048_parse_rds_ps()
1748 if (((bdev->rds_info.radio_text[i+1] << 8) + bcm2048_parse_rds_ps()
1749 bdev->rds_info.radio_text[i+2]) == bcm2048_parse_rds_ps()
1750 bdev->rds_info.rds_pi) bcm2048_parse_rds_ps()
1756 static void bcm2048_rds_fifo_receive(struct bcm2048_device *bdev) bcm2048_rds_fifo_receive() argument
1760 mutex_lock(&bdev->mutex); bcm2048_rds_fifo_receive()
1762 err = bcm2048_recv_duples(bdev, BCM2048_I2C_RDS_DATA, bcm2048_rds_fifo_receive()
1763 bdev->rds_info.radio_text, bdev->fifo_size); bcm2048_rds_fifo_receive()
1765 dev_err(&bdev->client->dev, "RDS Read problem\n"); bcm2048_rds_fifo_receive()
1766 mutex_unlock(&bdev->mutex); bcm2048_rds_fifo_receive()
1770 bdev->rds_info.text_len = bdev->fifo_size; bcm2048_rds_fifo_receive()
1772 bcm2048_parse_rds_pi(bdev); bcm2048_rds_fifo_receive()
1773 bcm2048_parse_rds_rt(bdev); bcm2048_rds_fifo_receive()
1774 bcm2048_parse_rds_ps(bdev); bcm2048_rds_fifo_receive()
1776 mutex_unlock(&bdev->mutex); bcm2048_rds_fifo_receive()
1778 wake_up_interruptible(&bdev->read_queue); bcm2048_rds_fifo_receive()
1781 static int bcm2048_get_rds_data(struct bcm2048_device *bdev, char *data) bcm2048_get_rds_data() argument
1786 mutex_lock(&bdev->mutex); bcm2048_get_rds_data()
1788 if (!bdev->rds_info.text_len) { bcm2048_get_rds_data()
1799 for (i = 0; i < bdev->rds_info.text_len; i++) { bcm2048_get_rds_data()
1801 bdev->rds_info.radio_text[i]); bcm2048_get_rds_data()
1808 mutex_unlock(&bdev->mutex); bcm2048_get_rds_data()
1815 static int bcm2048_init(struct bcm2048_device *bdev) bcm2048_init() argument
1819 err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON); bcm2048_init()
1823 err = bcm2048_set_audio_route(bdev, BCM2048_AUDIO_ROUTE_DAC); bcm2048_init()
1827 err = bcm2048_set_dac_output(bdev, BCM2048_DAC_OUTPUT_LEFT | bcm2048_init()
1837 static int bcm2048_deinit(struct bcm2048_device *bdev) bcm2048_deinit() argument
1841 err = bcm2048_set_audio_route(bdev, 0); bcm2048_deinit()
1845 err = bcm2048_set_dac_output(bdev, 0); bcm2048_deinit()
1849 err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF); bcm2048_deinit()
1860 static int bcm2048_probe(struct bcm2048_device *bdev) bcm2048_probe() argument
1864 err = bcm2048_set_power_state(bdev, BCM2048_POWER_ON); bcm2048_probe()
1868 err = bcm2048_checkrev(bdev); bcm2048_probe()
1872 err = bcm2048_set_mute(bdev, BCM2048_DEFAULT_MUTE); bcm2048_probe()
1876 err = bcm2048_set_region(bdev, BCM2048_DEFAULT_REGION); bcm2048_probe()
1880 err = bcm2048_set_fm_search_rssi_threshold(bdev, bcm2048_probe()
1885 err = bcm2048_set_fm_automatic_stereo_mono(bdev, BCM2048_ITEM_ENABLED); bcm2048_probe()
1889 err = bcm2048_get_rds_wline(bdev); bcm2048_probe()
1891 err = bcm2048_set_rds_wline(bdev, BCM2048_DEFAULT_RDS_WLINE); bcm2048_probe()
1895 err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF); bcm2048_probe()
1897 init_waitqueue_head(&bdev->read_queue); bcm2048_probe()
1898 bdev->rds_data_available = 0; bcm2048_probe()
1899 bdev->rd_index = 0; bcm2048_probe()
1900 bdev->users = 0; bcm2048_probe()
1911 struct bcm2048_device *bdev; bcm2048_work() local
1914 bdev = container_of(work, struct bcm2048_device, work); bcm2048_work()
1915 bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG0, &flag_lsb); bcm2048_work()
1916 bcm2048_recv_command(bdev, BCM2048_I2C_FM_RDS_FLAG1, &flag_msb); bcm2048_work()
1922 bdev->scan_state = BCM2048_SCAN_FAIL; bcm2048_work()
1924 bdev->scan_state = BCM2048_SCAN_OK; bcm2048_work()
1926 complete(&bdev->compl); bcm2048_work()
1930 bcm2048_rds_fifo_receive(bdev); bcm2048_work()
1931 if (bdev->rds_state) { bcm2048_work()
1933 bcm2048_send_command(bdev, BCM2048_I2C_FM_RDS_MASK1, bcm2048_work()
1936 bdev->rds_data_available = 1; bcm2048_work()
1937 bdev->rd_index = 0; /* new data, new start */ bcm2048_work()
1946 struct bcm2048_device *bdev = dev; bcm2048_handler() local
1948 dev_dbg(&bdev->client->dev, "IRQ called, queuing work\n"); bcm2048_handler()
1949 if (bdev->power_state) bcm2048_handler()
1950 schedule_work(&bdev->work); bcm2048_handler()
1964 struct bcm2048_device *bdev = dev_get_drvdata(dev); \
1968 if (!bdev) \
1977 err = bcm2048_set_##prop(bdev, value); \
1987 struct bcm2048_device *bdev = dev_get_drvdata(dev); \
1990 if (!bdev) \
1993 value = bcm2048_get_##prop(bdev); \
2006 struct bcm2048_device *bdev = dev_get_drvdata(dev); \
2009 if (!bdev) \
2012 value = bcm2048_get_##prop(bdev); \
2028 struct bcm2048_device *bdev = dev_get_drvdata(dev); \
2032 if (!bdev) \
2039 bcm2048_get_##prop(bdev, out); \
2145 static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev, bcm2048_sysfs_unregister_properties() argument
2151 device_remove_file(&bdev->client->dev, &attrs[i]); bcm2048_sysfs_unregister_properties()
2156 static int bcm2048_sysfs_register_properties(struct bcm2048_device *bdev) bcm2048_sysfs_register_properties() argument
2162 if (device_create_file(&bdev->client->dev, &attrs[i]) != 0) { bcm2048_sysfs_register_properties()
2163 dev_err(&bdev->client->dev, bcm2048_sysfs_register_properties()
2166 bcm2048_sysfs_unregister_properties(bdev, i); bcm2048_sysfs_register_properties()
2177 struct bcm2048_device *bdev = video_drvdata(file); bcm2048_fops_open() local
2179 bdev->users++; bcm2048_fops_open()
2180 bdev->rd_index = 0; bcm2048_fops_open()
2181 bdev->rds_data_available = 0; bcm2048_fops_open()
2188 struct bcm2048_device *bdev = video_drvdata(file); bcm2048_fops_release() local
2190 bdev->users--; bcm2048_fops_release()
2198 struct bcm2048_device *bdev = video_drvdata(file); bcm2048_fops_poll() local
2201 poll_wait(file, &bdev->read_queue, pts); bcm2048_fops_poll()
2203 if (bdev->rds_data_available) bcm2048_fops_poll()
2212 struct bcm2048_device *bdev = video_drvdata(file); bcm2048_fops_read() local
2221 while (!bdev->rds_data_available) { bcm2048_fops_read()
2226 /* interruptible_sleep_on(&bdev->read_queue); */ bcm2048_fops_read()
2227 if (wait_event_interruptible(bdev->read_queue, bcm2048_fops_read()
2228 bdev->rds_data_available) < 0) { bcm2048_fops_read()
2234 mutex_lock(&bdev->mutex); bcm2048_fops_read()
2236 i = bdev->fifo_size - bdev->rd_index; bcm2048_fops_read()
2244 tmpbuf[i] = bdev->rds_info.radio_text[bdev->rd_index+i+2]; bcm2048_fops_read()
2245 tmpbuf[i+1] = bdev->rds_info.radio_text[bdev->rd_index+i+1]; bcm2048_fops_read()
2246 tmpbuf[i+2] = (bdev->rds_info.radio_text[bdev->rd_index + i] & 0xf0) >> 4; bcm2048_fops_read()
2247 if ((bdev->rds_info.radio_text[bdev->rd_index+i] & bcm2048_fops_read()
2257 bdev->rd_index += i; bcm2048_fops_read()
2258 if (bdev->rd_index >= bdev->fifo_size) bcm2048_fops_read()
2259 bdev->rds_data_available = 0; bcm2048_fops_read()
2261 mutex_unlock(&bdev->mutex); bcm2048_fops_read()
2320 struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); bcm2048_vidioc_querycap() local
2326 snprintf(capability->bus_info, 32, "I2C: 0x%X", bdev->client->addr); bcm2048_vidioc_querycap()
2370 struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); bcm2048_vidioc_g_ctrl() local
2373 if (!bdev) bcm2048_vidioc_g_ctrl()
2378 err = bcm2048_get_mute(bdev); bcm2048_vidioc_g_ctrl()
2390 struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); bcm2048_vidioc_s_ctrl() local
2393 if (!bdev) bcm2048_vidioc_s_ctrl()
2399 if (bdev->power_state) { bcm2048_vidioc_s_ctrl()
2400 err = bcm2048_set_mute(bdev, ctrl->value); bcm2048_vidioc_s_ctrl()
2401 err |= bcm2048_deinit(bdev); bcm2048_vidioc_s_ctrl()
2404 if (!bdev->power_state) { bcm2048_vidioc_s_ctrl()
2405 err = bcm2048_init(bdev); bcm2048_vidioc_s_ctrl()
2406 err |= bcm2048_set_mute(bdev, ctrl->value); bcm2048_vidioc_s_ctrl()
2439 struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); bcm2048_vidioc_g_tuner() local
2443 if (!bdev) bcm2048_vidioc_g_tuner()
2452 dev_to_v4l2(bcm2048_get_region_bottom_frequency(bdev)); bcm2048_vidioc_g_tuner()
2454 dev_to_v4l2(bcm2048_get_region_top_frequency(bdev)); bcm2048_vidioc_g_tuner()
2459 if (bdev->power_state) { bcm2048_vidioc_g_tuner()
2464 f_error = bcm2048_get_fm_carrier_error(bdev); bcm2048_vidioc_g_tuner()
2473 rssi = bcm2048_get_fm_rssi(bdev); bcm2048_vidioc_g_tuner()
2494 struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); bcm2048_vidioc_s_tuner() local
2496 if (!bdev) bcm2048_vidioc_s_tuner()
2508 struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); bcm2048_vidioc_g_frequency() local
2512 if (!bdev->power_state) bcm2048_vidioc_g_frequency()
2516 f = bcm2048_get_fm_frequency(bdev); bcm2048_vidioc_g_frequency()
2529 struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); bcm2048_vidioc_s_frequency() local
2535 if (!bdev->power_state) bcm2048_vidioc_s_frequency()
2538 err = bcm2048_set_fm_frequency(bdev, v4l2_to_dev(freq->frequency)); bcm2048_vidioc_s_frequency()
2539 err |= bcm2048_set_fm_search_tune_mode(bdev, BCM2048_FM_PRE_SET_MODE); bcm2048_vidioc_s_frequency()
2547 struct bcm2048_device *bdev = video_get_drvdata(video_devdata(file)); bcm2048_vidioc_s_hw_freq_seek() local
2550 if (!bdev->power_state) bcm2048_vidioc_s_hw_freq_seek()
2556 err = bcm2048_set_fm_search_mode_direction(bdev, seek->seek_upward); bcm2048_vidioc_s_hw_freq_seek()
2557 err |= bcm2048_set_fm_search_tune_mode(bdev, bcm2048_vidioc_s_hw_freq_seek()
2595 struct bcm2048_device *bdev; bcm2048_i2c_driver_probe() local
2598 bdev = kzalloc(sizeof(*bdev), GFP_KERNEL); bcm2048_i2c_driver_probe()
2599 if (!bdev) { bcm2048_i2c_driver_probe()
2604 bdev->client = client; bcm2048_i2c_driver_probe()
2605 i2c_set_clientdata(client, bdev); bcm2048_i2c_driver_probe()
2606 mutex_init(&bdev->mutex); bcm2048_i2c_driver_probe()
2607 init_completion(&bdev->compl); bcm2048_i2c_driver_probe()
2608 INIT_WORK(&bdev->work, bcm2048_work); bcm2048_i2c_driver_probe()
2613 client->name, bdev); bcm2048_i2c_driver_probe()
2623 bdev->videodev = bcm2048_viddev_template; bcm2048_i2c_driver_probe()
2624 video_set_drvdata(&bdev->videodev, bdev); bcm2048_i2c_driver_probe()
2625 if (video_register_device(&bdev->videodev, VFL_TYPE_RADIO, radio_nr)) { bcm2048_i2c_driver_probe()
2631 err = bcm2048_sysfs_register_properties(bdev); bcm2048_i2c_driver_probe()
2637 err = bcm2048_probe(bdev); bcm2048_i2c_driver_probe()
2646 bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs)); bcm2048_i2c_driver_probe()
2648 video_unregister_device(&bdev->videodev); bcm2048_i2c_driver_probe()
2652 free_irq(client->irq, bdev); bcm2048_i2c_driver_probe()
2655 kfree(bdev); bcm2048_i2c_driver_probe()
2662 struct bcm2048_device *bdev = i2c_get_clientdata(client); bcm2048_i2c_driver_remove() local
2667 if (bdev) { bcm2048_i2c_driver_remove()
2668 bcm2048_sysfs_unregister_properties(bdev, ARRAY_SIZE(attrs)); bcm2048_i2c_driver_remove()
2669 video_unregister_device(&bdev->videodev); bcm2048_i2c_driver_remove()
2671 if (bdev->power_state) bcm2048_i2c_driver_remove()
2672 bcm2048_set_power_state(bdev, BCM2048_POWER_OFF); bcm2048_i2c_driver_remove()
2675 free_irq(client->irq, bdev); bcm2048_i2c_driver_remove()
2677 cancel_work_sync(&bdev->work); bcm2048_i2c_driver_remove()
2679 kfree(bdev); bcm2048_i2c_driver_remove()
/linux-4.1.27/drivers/input/misc/
H A Dcobalt_btns.c50 struct buttons_dev *bdev = dev->private; handle_buttons() local
55 status = ~readl(bdev->reg) >> 24; handle_buttons()
57 for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) { handle_buttons()
59 if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) { handle_buttons()
61 input_report_key(input, bdev->keymap[i], 1); handle_buttons()
65 if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) { handle_buttons()
67 input_report_key(input, bdev->keymap[i], 0); handle_buttons()
70 bdev->count[i] = 0; handle_buttons()
77 struct buttons_dev *bdev; cobalt_buttons_probe() local
83 bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL); cobalt_buttons_probe()
85 if (!bdev || !poll_dev) { cobalt_buttons_probe()
90 memcpy(bdev->keymap, cobalt_map, sizeof(bdev->keymap)); cobalt_buttons_probe()
92 poll_dev->private = bdev; cobalt_buttons_probe()
102 input->keycode = bdev->keymap; cobalt_buttons_probe()
103 input->keycodemax = ARRAY_SIZE(bdev->keymap); cobalt_buttons_probe()
109 __set_bit(bdev->keymap[i], input->keybit); cobalt_buttons_probe()
118 bdev->poll_dev = poll_dev; cobalt_buttons_probe()
119 bdev->reg = ioremap(res->start, resource_size(res)); cobalt_buttons_probe()
120 dev_set_drvdata(&pdev->dev, bdev); cobalt_buttons_probe()
129 iounmap(bdev->reg); cobalt_buttons_probe()
132 kfree(bdev); cobalt_buttons_probe()
139 struct buttons_dev *bdev = dev_get_drvdata(dev); cobalt_buttons_remove() local
141 input_unregister_polled_device(bdev->poll_dev); cobalt_buttons_remove()
142 input_free_polled_device(bdev->poll_dev); cobalt_buttons_remove()
143 iounmap(bdev->reg); cobalt_buttons_remove()
144 kfree(bdev); cobalt_buttons_remove()
H A Dsgi_btns.c68 struct buttons_dev *bdev = dev->private; handle_buttons() local
75 for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) { handle_buttons()
77 if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) { handle_buttons()
79 input_report_key(input, bdev->keymap[i], 1); handle_buttons()
83 if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) { handle_buttons()
85 input_report_key(input, bdev->keymap[i], 0); handle_buttons()
88 bdev->count[i] = 0; handle_buttons()
95 struct buttons_dev *bdev; sgi_buttons_probe() local
100 bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL); sgi_buttons_probe()
102 if (!bdev || !poll_dev) { sgi_buttons_probe()
107 memcpy(bdev->keymap, sgi_map, sizeof(bdev->keymap)); sgi_buttons_probe()
109 poll_dev->private = bdev; sgi_buttons_probe()
119 input->keycode = bdev->keymap; sgi_buttons_probe()
120 input->keycodemax = ARRAY_SIZE(bdev->keymap); sgi_buttons_probe()
126 __set_bit(bdev->keymap[i], input->keybit); sgi_buttons_probe()
129 bdev->poll_dev = poll_dev; sgi_buttons_probe()
130 platform_set_drvdata(pdev, bdev); sgi_buttons_probe()
140 kfree(bdev); sgi_buttons_probe()
146 struct buttons_dev *bdev = platform_get_drvdata(pdev); sgi_buttons_remove() local
148 input_unregister_polled_device(bdev->poll_dev); sgi_buttons_remove()
149 input_free_polled_device(bdev->poll_dev); sgi_buttons_remove()
150 kfree(bdev); sgi_buttons_remove()
/linux-4.1.27/drivers/s390/block/
H A Dscm_blk.c148 static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) scm_permit_request() argument
150 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; scm_permit_request()
183 struct scm_blk_dev *bdev = scmrq->bdev; scm_request_prepare() local
184 struct scm_device *scmdev = bdev->gendisk->private_data; scm_request_prepare()
220 static inline void scm_request_init(struct scm_blk_dev *bdev, scm_request_init() argument
229 aobrq->scmdev = bdev->scmdev; scm_request_init()
232 scmrq->bdev = bdev; scm_request_init()
240 static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) scm_ensure_queue_restart() argument
242 if (atomic_read(&bdev->queued_reqs)) { scm_ensure_queue_restart()
246 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); scm_ensure_queue_restart()
251 struct scm_blk_dev *bdev = scmrq->bdev; scm_request_requeue() local
256 blk_requeue_request(bdev->rq, scmrq->request[i]); scm_request_requeue()
258 atomic_dec(&bdev->queued_reqs); scm_request_requeue()
260 scm_ensure_queue_restart(bdev); scm_request_requeue()
265 struct scm_blk_dev *bdev = scmrq->bdev; scm_request_finish() local
272 atomic_dec(&bdev->queued_reqs); scm_request_finish()
278 struct scm_blk_dev *bdev = scmrq->bdev; scm_request_start() local
281 atomic_inc(&bdev->queued_reqs); scm_request_start()
298 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); scm_blk_request() local
310 if (!scm_permit_request(bdev, req)) scm_blk_request()
319 scm_request_init(bdev, scmrq); scm_blk_request()
340 atomic_inc(&bdev->queued_reqs); scm_blk_request()
367 scm_ensure_queue_restart(bdev); scm_blk_request()
390 struct scm_blk_dev *bdev = scmrq->bdev; scm_blk_irq() local
396 spin_lock(&bdev->lock); scm_blk_irq()
397 list_add_tail(&scmrq->list, &bdev->finished_requests); scm_blk_irq()
398 spin_unlock(&bdev->lock); scm_blk_irq()
399 tasklet_hi_schedule(&bdev->tasklet); scm_blk_irq()
404 struct scm_blk_dev *bdev = scmrq->bdev; scm_blk_handle_error() local
413 spin_lock_irqsave(&bdev->lock, flags); scm_blk_handle_error()
414 if (bdev->state != SCM_WR_PROHIBIT) scm_blk_handle_error()
416 (unsigned long) bdev->scmdev->address); scm_blk_handle_error()
417 bdev->state = SCM_WR_PROHIBIT; scm_blk_handle_error()
418 spin_unlock_irqrestore(&bdev->lock, flags); scm_blk_handle_error()
429 spin_lock_irqsave(&bdev->rq_lock, flags); scm_blk_handle_error()
431 spin_unlock_irqrestore(&bdev->rq_lock, flags); scm_blk_handle_error()
434 static void scm_blk_tasklet(struct scm_blk_dev *bdev) scm_blk_tasklet() argument
439 spin_lock_irqsave(&bdev->lock, flags); scm_blk_tasklet()
440 while (!list_empty(&bdev->finished_requests)) { scm_blk_tasklet()
441 scmrq = list_first_entry(&bdev->finished_requests, scm_blk_tasklet()
444 spin_unlock_irqrestore(&bdev->lock, flags); scm_blk_tasklet()
450 spin_lock_irqsave(&bdev->lock, flags); scm_blk_tasklet()
456 spin_lock_irqsave(&bdev->lock, flags); scm_blk_tasklet()
461 spin_lock_irqsave(&bdev->lock, flags); scm_blk_tasklet()
463 spin_unlock_irqrestore(&bdev->lock, flags); scm_blk_tasklet()
465 blk_run_queue(bdev->rq); scm_blk_tasklet()
472 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) scm_blk_dev_setup() argument
485 bdev->scmdev = scmdev; scm_blk_dev_setup()
486 bdev->state = SCM_OPER; scm_blk_dev_setup()
487 spin_lock_init(&bdev->rq_lock); scm_blk_dev_setup()
488 spin_lock_init(&bdev->lock); scm_blk_dev_setup()
489 INIT_LIST_HEAD(&bdev->finished_requests); scm_blk_dev_setup()
490 atomic_set(&bdev->queued_reqs, 0); scm_blk_dev_setup()
491 tasklet_init(&bdev->tasklet, scm_blk_dev_setup()
493 (unsigned long) bdev); scm_blk_dev_setup()
495 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); scm_blk_dev_setup()
499 bdev->rq = rq; scm_blk_dev_setup()
508 scm_blk_dev_cluster_setup(bdev); scm_blk_dev_setup()
510 bdev->gendisk = alloc_disk(SCM_NR_PARTS); scm_blk_dev_setup()
511 if (!bdev->gendisk) scm_blk_dev_setup()
515 bdev->gendisk->driverfs_dev = &scmdev->dev; scm_blk_dev_setup()
516 bdev->gendisk->private_data = scmdev; scm_blk_dev_setup()
517 bdev->gendisk->fops = &scm_blk_devops; scm_blk_dev_setup()
518 bdev->gendisk->queue = rq; scm_blk_dev_setup()
519 bdev->gendisk->major = scm_major; scm_blk_dev_setup()
520 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; scm_blk_dev_setup()
522 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); scm_blk_dev_setup()
524 len += snprintf(bdev->gendisk->disk_name + len, scm_blk_dev_setup()
529 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", scm_blk_dev_setup()
533 set_capacity(bdev->gendisk, scmdev->size >> 9); scm_blk_dev_setup()
534 add_disk(bdev->gendisk); scm_blk_dev_setup()
544 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) scm_blk_dev_cleanup() argument
546 tasklet_kill(&bdev->tasklet); scm_blk_dev_cleanup()
547 del_gendisk(bdev->gendisk); scm_blk_dev_cleanup()
548 blk_cleanup_queue(bdev->gendisk->queue); scm_blk_dev_cleanup()
549 put_disk(bdev->gendisk); scm_blk_dev_cleanup()
552 void scm_blk_set_available(struct scm_blk_dev *bdev) scm_blk_set_available() argument
556 spin_lock_irqsave(&bdev->lock, flags); scm_blk_set_available()
557 if (bdev->state == SCM_WR_PROHIBIT) scm_blk_set_available()
559 (unsigned long) bdev->scmdev->address); scm_blk_set_available()
560 bdev->state = SCM_OPER; scm_blk_set_available()
561 spin_unlock_irqrestore(&bdev->lock, flags); scm_blk_set_available()
H A Dscm_drv.c18 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); scm_notify() local
30 scm_blk_set_available(bdev); scm_notify()
37 struct scm_blk_dev *bdev; scm_probe() local
46 bdev = kzalloc(sizeof(*bdev), GFP_KERNEL); scm_probe()
47 if (!bdev) scm_probe()
50 dev_set_drvdata(&scmdev->dev, bdev); scm_probe()
51 ret = scm_blk_dev_setup(bdev, scmdev); scm_probe()
54 kfree(bdev); scm_probe()
64 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); scm_remove() local
66 scm_blk_dev_cleanup(bdev); scm_remove()
68 kfree(bdev); scm_remove()
H A Dscm_blk_cluster.c78 struct scm_blk_dev *bdev = scmrq->bdev; scm_reserve_cluster() local
85 spin_lock(&bdev->lock); scm_reserve_cluster()
86 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { scm_reserve_cluster()
99 spin_unlock(&bdev->lock); scm_reserve_cluster()
105 list_add(&scmrq->cluster.list, &bdev->cluster_list); scm_reserve_cluster()
106 spin_unlock(&bdev->lock); scm_reserve_cluster()
113 struct scm_blk_dev *bdev = scmrq->bdev; scm_release_cluster() local
119 spin_lock_irqsave(&bdev->lock, flags); scm_release_cluster()
121 spin_unlock_irqrestore(&bdev->lock, flags); scm_release_cluster()
124 void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) scm_blk_dev_cluster_setup() argument
126 INIT_LIST_HEAD(&bdev->cluster_list); scm_blk_dev_cluster_setup()
127 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); scm_blk_dev_cluster_setup()
132 struct scm_blk_dev *bdev = scmrq->bdev; scm_prepare_cluster_request() local
133 struct scm_device *scmdev = bdev->gendisk->private_data; scm_prepare_cluster_request()
226 struct scm_blk_dev *bdev = scmrq->bdev; scm_cluster_request_irq() local
239 spin_lock_irqsave(&bdev->rq_lock, flags); scm_cluster_request_irq()
241 spin_unlock_irqrestore(&bdev->rq_lock, flags); scm_cluster_request_irq()
H A Ddasd_genhd.c101 struct block_device *bdev; dasd_scan_partitions() local
105 bdev = bdget_disk(block->gdp, 0); dasd_scan_partitions()
106 if (!bdev) { dasd_scan_partitions()
112 rc = blkdev_get(bdev, FMODE_READ, NULL); dasd_scan_partitions()
123 rc = ioctl_by_bdev(bdev, BLKRRPART, 0); dasd_scan_partitions()
126 rc = ioctl_by_bdev(bdev, BLKRRPART, 0); dasd_scan_partitions()
137 * 0 to 1. This is done by setting device->bdev (see dasd_scan_partitions()
140 * is why the assignment to device->bdev is done AFTER dasd_scan_partitions()
143 block->bdev = bdev; dasd_scan_partitions()
156 struct block_device *bdev; dasd_destroy_partitions() local
159 * Get the bdev pointer from the device structure and clear dasd_destroy_partitions()
160 * device->bdev to lower the offline open_count limit again. dasd_destroy_partitions()
162 bdev = block->bdev; dasd_destroy_partitions()
163 block->bdev = NULL; dasd_destroy_partitions()
175 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); dasd_destroy_partitions()
179 blkdev_put(bdev, FMODE_READ); dasd_destroy_partitions()
H A Ddasd_ioctl.c44 dasd_ioctl_enable(struct block_device *bdev) dasd_ioctl_enable() argument
51 base = dasd_device_from_gendisk(bdev->bd_disk); dasd_ioctl_enable()
57 mutex_lock(&bdev->bd_mutex); dasd_ioctl_enable()
58 i_size_write(bdev->bd_inode, dasd_ioctl_enable()
60 mutex_unlock(&bdev->bd_mutex); dasd_ioctl_enable()
70 dasd_ioctl_disable(struct block_device *bdev) dasd_ioctl_disable() argument
77 base = dasd_device_from_gendisk(bdev->bd_disk); dasd_ioctl_disable()
93 mutex_lock(&bdev->bd_mutex); dasd_ioctl_disable()
94 i_size_write(bdev->bd_inode, 0); dasd_ioctl_disable()
95 mutex_unlock(&bdev->bd_mutex); dasd_ioctl_disable()
231 struct block_device *bdev = bdget_disk(block->gdp, 0); dasd_format() local
232 bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize); dasd_format()
233 bdput(bdev); dasd_format()
266 dasd_ioctl_format(struct block_device *bdev, void __user *argp) dasd_ioctl_format() argument
276 base = dasd_device_from_gendisk(bdev->bd_disk); dasd_ioctl_format()
288 if (bdev != bdev->bd_contains) { dasd_ioctl_format()
413 if (!block->bdev) dasd_ioctl_information()
461 dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) dasd_ioctl_set_ro() argument
468 if (bdev != bdev->bd_contains) dasd_ioctl_set_ro()
473 base = dasd_device_from_gendisk(bdev->bd_disk); dasd_ioctl_set_ro()
480 set_disk_ro(bdev->bd_disk, intval); dasd_ioctl_set_ro()
499 int dasd_ioctl(struct block_device *bdev, fmode_t mode, dasd_ioctl() argument
517 base = dasd_device_from_gendisk(bdev->bd_disk); dasd_ioctl()
524 rc = dasd_ioctl_disable(bdev); dasd_ioctl()
527 rc = dasd_ioctl_enable(bdev); dasd_ioctl()
542 rc = dasd_ioctl_format(bdev, argp); dasd_ioctl()
557 rc = dasd_ioctl_set_ro(bdev, argp); dasd_ioctl()
H A Dscm_blk.h32 struct scm_blk_dev *bdev; member in struct:scm_request
87 static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {} scm_need_cluster_request() argument
H A Ddcssblk.c28 static int dcssblk_open(struct block_device *bdev, fmode_t mode);
31 static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
773 dcssblk_open(struct block_device *bdev, fmode_t mode) dcssblk_open() argument
778 dev_info = bdev->bd_disk->private_data; dcssblk_open()
784 bdev->bd_block_size = 4096; dcssblk_open()
881 dcssblk_direct_access (struct block_device *bdev, sector_t secnum, dcssblk_direct_access() argument
887 dev_info = bdev->bd_disk->private_data; dcssblk_direct_access()
/linux-4.1.27/fs/
H A Dblock_dev.c34 struct block_device bdev; member in struct:bdev_inode
47 return &BDEV_I(inode)->bdev; I_BDEV()
63 void kill_bdev(struct block_device *bdev) kill_bdev() argument
65 struct address_space *mapping = bdev->bd_inode->i_mapping; kill_bdev()
76 void invalidate_bdev(struct block_device *bdev) invalidate_bdev() argument
78 struct address_space *mapping = bdev->bd_inode->i_mapping; invalidate_bdev()
86 /* 99% of the time, we don't need to flush the cleancache on the bdev. invalidate_bdev()
93 int set_blocksize(struct block_device *bdev, int size) set_blocksize() argument
100 if (size < bdev_logical_block_size(bdev)) set_blocksize()
104 if (bdev->bd_block_size != size) { set_blocksize()
105 sync_blockdev(bdev); set_blocksize()
106 bdev->bd_block_size = size; set_blocksize()
107 bdev->bd_inode->i_blkbits = blksize_bits(size); set_blocksize()
108 kill_bdev(bdev); set_blocksize()
159 int __sync_blockdev(struct block_device *bdev, int wait) __sync_blockdev() argument
161 if (!bdev) __sync_blockdev()
164 return filemap_flush(bdev->bd_inode->i_mapping); __sync_blockdev()
165 return filemap_write_and_wait(bdev->bd_inode->i_mapping); __sync_blockdev()
172 int sync_blockdev(struct block_device *bdev) sync_blockdev() argument
174 return __sync_blockdev(bdev, 1); sync_blockdev()
183 int fsync_bdev(struct block_device *bdev) fsync_bdev() argument
185 struct super_block *sb = get_super(bdev); fsync_bdev()
191 return sync_blockdev(bdev); fsync_bdev()
197 * @bdev: blockdevice to lock
207 struct super_block *freeze_bdev(struct block_device *bdev) freeze_bdev() argument
212 mutex_lock(&bdev->bd_fsfreeze_mutex); freeze_bdev()
213 if (++bdev->bd_fsfreeze_count > 1) { freeze_bdev()
219 sb = get_super(bdev); freeze_bdev()
221 mutex_unlock(&bdev->bd_fsfreeze_mutex); freeze_bdev()
225 sb = get_active_super(bdev); freeze_bdev()
234 bdev->bd_fsfreeze_count--; freeze_bdev()
235 mutex_unlock(&bdev->bd_fsfreeze_mutex); freeze_bdev()
240 sync_blockdev(bdev); freeze_bdev()
241 mutex_unlock(&bdev->bd_fsfreeze_mutex); freeze_bdev()
248 * @bdev: blockdevice to unlock
253 int thaw_bdev(struct block_device *bdev, struct super_block *sb) thaw_bdev() argument
257 mutex_lock(&bdev->bd_fsfreeze_mutex); thaw_bdev()
258 if (!bdev->bd_fsfreeze_count) thaw_bdev()
262 if (--bdev->bd_fsfreeze_count > 0) thaw_bdev()
273 bdev->bd_fsfreeze_count++; thaw_bdev()
274 mutex_unlock(&bdev->bd_fsfreeze_mutex); thaw_bdev()
278 mutex_unlock(&bdev->bd_fsfreeze_mutex); thaw_bdev()
339 struct block_device *bdev = I_BDEV(bd_inode); blkdev_fsync() local
351 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL); blkdev_fsync()
361 * @bdev: The device to read the page from
375 int bdev_read_page(struct block_device *bdev, sector_t sector, bdev_read_page() argument
378 const struct block_device_operations *ops = bdev->bd_disk->fops; bdev_read_page()
381 return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); bdev_read_page()
387 * @bdev: The device to write the page to
404 int bdev_write_page(struct block_device *bdev, sector_t sector, bdev_write_page() argument
409 const struct block_device_operations *ops = bdev->bd_disk->fops; bdev_write_page()
413 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); bdev_write_page()
424 * @bdev: The device containing the memory
439 long bdev_direct_access(struct block_device *bdev, sector_t sector, bdev_direct_access() argument
443 const struct block_device_operations *ops = bdev->bd_disk->fops; bdev_direct_access()
450 part_nr_sects_read(bdev->bd_part)) bdev_direct_access()
452 sector += get_start_sect(bdev); bdev_direct_access()
455 avail = ops->direct_access(bdev, sector, addr, pfn, size); bdev_direct_access()
493 struct block_device *bdev = &ei->bdev; init_once() local
495 memset(bdev, 0, sizeof(*bdev)); init_once()
496 mutex_init(&bdev->bd_mutex); init_once()
497 INIT_LIST_HEAD(&bdev->bd_inodes); init_once()
498 INIT_LIST_HEAD(&bdev->bd_list); init_once()
500 INIT_LIST_HEAD(&bdev->bd_holder_disks); init_once()
504 mutex_init(&bdev->bd_fsfreeze_mutex); init_once()
516 struct block_device *bdev = &BDEV_I(inode)->bdev; bdev_evict_inode() local
522 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) { bdev_evict_inode()
525 list_del_init(&bdev->bd_list); bdev_evict_inode()
540 return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); bd_mount()
544 .name = "bdev",
562 panic("Cannot register bdev pseudo-fs"); bdev_cache_init()
565 panic("Cannot create bdev pseudo-fs"); bdev_cache_init()
581 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; bdev_test()
586 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; bdev_set()
594 struct block_device *bdev; bdget() local
603 bdev = &BDEV_I(inode)->bdev; bdget()
606 bdev->bd_contains = NULL; bdget()
607 bdev->bd_super = NULL; bdget()
608 bdev->bd_inode = inode; bdget()
609 bdev->bd_block_size = (1 << inode->i_blkbits); bdget()
610 bdev->bd_part_count = 0; bdget()
611 bdev->bd_invalidated = 0; bdget()
614 inode->i_bdev = bdev; bdget()
618 list_add(&bdev->bd_list, &all_bdevs); bdget()
622 return bdev; bdget()
629 * @bdev: Block device to grab a reference to.
631 struct block_device *bdgrab(struct block_device *bdev) bdgrab() argument
633 ihold(bdev->bd_inode); bdgrab()
634 return bdev; bdgrab()
640 struct block_device *bdev; nr_blockdev_pages() local
643 list_for_each_entry(bdev, &all_bdevs, bd_list) { nr_blockdev_pages()
644 ret += bdev->bd_inode->i_mapping->nrpages; nr_blockdev_pages()
650 void bdput(struct block_device *bdev) bdput() argument
652 iput(bdev->bd_inode); bdput()
659 struct block_device *bdev; bd_acquire() local
662 bdev = inode->i_bdev; bd_acquire()
663 if (bdev) { bd_acquire()
664 ihold(bdev->bd_inode); bd_acquire()
666 return bdev; bd_acquire()
670 bdev = bdget(inode->i_rdev); bd_acquire()
671 if (bdev) { bd_acquire()
680 ihold(bdev->bd_inode); bd_acquire()
681 inode->i_bdev = bdev; bd_acquire()
682 inode->i_mapping = bdev->bd_inode->i_mapping; bd_acquire()
683 list_add(&inode->i_devices, &bdev->bd_inodes); bd_acquire()
687 return bdev; bd_acquire()
699 struct block_device *bdev = NULL; bd_forget() local
703 bdev = inode->i_bdev; bd_forget()
707 if (bdev) bd_forget()
708 iput(bdev->bd_inode); bd_forget()
713 * @bdev: block device of interest
714 * @whole: whole block device containing @bdev, may equal @bdev
715 * @holder: holder trying to claim @bdev
717 * Test whether @bdev can be claimed by @holder.
723 * %true if @bdev can be claimed, %false otherwise.
725 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, bd_may_claim() argument
728 if (bdev->bd_holder == holder) bd_may_claim()
730 else if (bdev->bd_holder != NULL) bd_may_claim()
732 else if (bdev->bd_contains == bdev) bd_may_claim()
745 * @bdev: block device of interest
746 * @whole: the whole device containing @bdev, may equal @bdev
747 * @holder: holder trying to claim @bdev
749 * Prepare to claim @bdev. This function fails if @bdev is already
759 * 0 if @bdev can be claimed, -EBUSY otherwise.
761 static int bd_prepare_to_claim(struct block_device *bdev, bd_prepare_to_claim() argument
766 if (!bd_may_claim(bdev, whole, holder)) bd_prepare_to_claim()
788 * @bdev: block device of interest
789 * @holder: holder trying to claim @bdev
791 * @bdev is about to be opened exclusively. Check @bdev can be opened
806 * Pointer to the block device containing @bdev on success, ERR_PTR()
809 static struct block_device *bd_start_claiming(struct block_device *bdev, bd_start_claiming() argument
819 * @bdev might not have been initialized properly yet, look up bd_start_claiming()
822 disk = get_gendisk(bdev->bd_dev, &partno); bd_start_claiming()
827 * Normally, @bdev should equal what's returned from bdget_disk() bd_start_claiming()
829 * bdev's for the same physical device and @bdev may be one of the bd_start_claiming()
830 * aliases. Keep @bdev if partno is 0. This means claimer bd_start_claiming()
837 whole = bdgrab(bdev); bd_start_claiming()
847 err = bd_prepare_to_claim(bdev, whole, holder); bd_start_claiming()
866 static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev, bd_find_holder_disk() argument
871 list_for_each_entry(holder, &bdev->bd_holder_disks, list) bd_find_holder_disk()
888 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
889 * @bdev: the claimed slave bdev
896 * - from "slaves" directory of the holder @disk to the claimed @bdev
897 * - from "holders" directory of the @bdev to the holder @disk
905 * The caller must have claimed @bdev before calling this function and
906 * ensure that both @bdev and @disk are valid during the creation and
915 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) bd_link_disk_holder() argument
920 mutex_lock(&bdev->bd_mutex); bd_link_disk_holder()
922 WARN_ON_ONCE(!bdev->bd_holder); bd_link_disk_holder()
925 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) bd_link_disk_holder()
928 holder = bd_find_holder_disk(bdev, disk); bd_link_disk_holder()
944 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); bd_link_disk_holder()
948 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); bd_link_disk_holder()
952 * bdev could be deleted beneath us which would implicitly destroy bd_link_disk_holder()
955 kobject_get(bdev->bd_part->holder_dir); bd_link_disk_holder()
957 list_add(&holder->list, &bdev->bd_holder_disks); bd_link_disk_holder()
961 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); bd_link_disk_holder()
965 mutex_unlock(&bdev->bd_mutex); bd_link_disk_holder()
972 * @bdev: the calimed slave bdev
980 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) bd_unlink_disk_holder() argument
984 mutex_lock(&bdev->bd_mutex); bd_unlink_disk_holder()
986 holder = bd_find_holder_disk(bdev, disk); bd_unlink_disk_holder()
989 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); bd_unlink_disk_holder()
990 del_symlink(bdev->bd_part->holder_dir, bd_unlink_disk_holder()
992 kobject_put(bdev->bd_part->holder_dir); bd_unlink_disk_holder()
997 mutex_unlock(&bdev->bd_mutex); bd_unlink_disk_holder()
1005 * @bdev: struct block device to be flushed
1012 static void flush_disk(struct block_device *bdev, bool kill_dirty) flush_disk() argument
1014 if (__invalidate_device(bdev, kill_dirty)) { flush_disk()
1017 if (bdev->bd_disk) flush_disk()
1018 disk_name(bdev->bd_disk, 0, name); flush_disk()
1023 if (!bdev->bd_disk) flush_disk()
1025 if (disk_part_scan_enabled(bdev->bd_disk)) flush_disk()
1026 bdev->bd_invalidated = 1; flush_disk()
1030 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1032 * @bdev: struct bdev to adjust.
1034 * This routine checks to see if the bdev size does not match the disk size
1037 void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) check_disk_size_change() argument
1042 bdev_size = i_size_read(bdev->bd_inode); check_disk_size_change()
1050 i_size_write(bdev->bd_inode, disk_size); check_disk_size_change()
1051 flush_disk(bdev, false); check_disk_size_change()
1066 struct block_device *bdev; revalidate_disk() local
1072 bdev = bdget_disk(disk, 0); revalidate_disk()
1073 if (!bdev) revalidate_disk()
1076 mutex_lock(&bdev->bd_mutex); revalidate_disk()
1077 check_disk_size_change(disk, bdev); revalidate_disk()
1078 bdev->bd_invalidated = 0; revalidate_disk()
1079 mutex_unlock(&bdev->bd_mutex); revalidate_disk()
1080 bdput(bdev); revalidate_disk()
1094 int check_disk_change(struct block_device *bdev) check_disk_change() argument
1096 struct gendisk *disk = bdev->bd_disk; check_disk_change()
1105 flush_disk(bdev, true); check_disk_change()
1107 bdops->revalidate_disk(bdev->bd_disk); check_disk_change()
1113 void bd_set_size(struct block_device *bdev, loff_t size) bd_set_size() argument
1115 unsigned bsize = bdev_logical_block_size(bdev); bd_set_size()
1117 mutex_lock(&bdev->bd_inode->i_mutex); bd_set_size()
1118 i_size_write(bdev->bd_inode, size); bd_set_size()
1119 mutex_unlock(&bdev->bd_inode->i_mutex); bd_set_size()
1125 bdev->bd_block_size = bsize; bd_set_size()
1126 bdev->bd_inode->i_blkbits = blksize_bits(bsize); bd_set_size()
1130 static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
1139 static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) __blkdev_get() argument
1155 ret = devcgroup_inode_permission(bdev->bd_inode, perm); __blkdev_get()
1157 bdput(bdev); __blkdev_get()
1165 disk = get_gendisk(bdev->bd_dev, &partno); __blkdev_get()
1171 mutex_lock_nested(&bdev->bd_mutex, for_part); __blkdev_get()
1172 if (!bdev->bd_openers) { __blkdev_get()
1173 bdev->bd_disk = disk; __blkdev_get()
1174 bdev->bd_queue = disk->queue; __blkdev_get()
1175 bdev->bd_contains = bdev; __blkdev_get()
1178 bdev->bd_part = disk_get_part(disk, partno); __blkdev_get()
1179 if (!bdev->bd_part) __blkdev_get()
1184 ret = disk->fops->open(bdev, mode); __blkdev_get()
1190 disk_put_part(bdev->bd_part); __blkdev_get()
1191 bdev->bd_part = NULL; __blkdev_get()
1192 bdev->bd_disk = NULL; __blkdev_get()
1193 bdev->bd_queue = NULL; __blkdev_get()
1194 mutex_unlock(&bdev->bd_mutex); __blkdev_get()
1203 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); __blkdev_get()
1211 if (bdev->bd_invalidated) { __blkdev_get()
1213 rescan_partitions(disk, bdev); __blkdev_get()
1215 invalidate_partitions(disk, bdev); __blkdev_get()
1229 bdev->bd_contains = whole; __blkdev_get()
1230 bdev->bd_part = disk_get_part(disk, partno); __blkdev_get()
1232 !bdev->bd_part || !bdev->bd_part->nr_sects) { __blkdev_get()
1236 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); __blkdev_get()
1241 if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) || __blkdev_get()
1242 (bdev->bd_part->nr_sects % (PAGE_SIZE / 512))) __blkdev_get()
1243 bdev->bd_inode->i_flags &= ~S_DAX; __blkdev_get()
1246 if (bdev->bd_contains == bdev) { __blkdev_get()
1248 if (bdev->bd_disk->fops->open) __blkdev_get()
1249 ret = bdev->bd_disk->fops->open(bdev, mode); __blkdev_get()
1251 if (bdev->bd_invalidated) { __blkdev_get()
1253 rescan_partitions(bdev->bd_disk, bdev); __blkdev_get()
1255 invalidate_partitions(bdev->bd_disk, bdev); __blkdev_get()
1264 bdev->bd_openers++; __blkdev_get()
1266 bdev->bd_part_count++; __blkdev_get()
1267 mutex_unlock(&bdev->bd_mutex); __blkdev_get()
1272 disk_put_part(bdev->bd_part); __blkdev_get()
1273 bdev->bd_disk = NULL; __blkdev_get()
1274 bdev->bd_part = NULL; __blkdev_get()
1275 bdev->bd_queue = NULL; __blkdev_get()
1276 if (bdev != bdev->bd_contains) __blkdev_get()
1277 __blkdev_put(bdev->bd_contains, mode, 1); __blkdev_get()
1278 bdev->bd_contains = NULL; __blkdev_get()
1280 mutex_unlock(&bdev->bd_mutex); __blkdev_get()
1285 bdput(bdev); __blkdev_get()
1292 * @bdev: block_device to open
1296 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1300 * On success, the reference count of @bdev is unchanged. On failure,
1301 * @bdev is put.
1309 int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) blkdev_get() argument
1317 whole = bd_start_claiming(bdev, holder); blkdev_get()
1319 bdput(bdev); blkdev_get()
1324 res = __blkdev_get(bdev, mode, 0); blkdev_get()
1330 mutex_lock(&bdev->bd_mutex); blkdev_get()
1334 BUG_ON(!bd_may_claim(bdev, whole, holder)); blkdev_get()
1343 bdev->bd_holders++; blkdev_get()
1344 bdev->bd_holder = holder; blkdev_get()
1361 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder && blkdev_get()
1363 bdev->bd_write_holder = true; blkdev_get()
1367 mutex_unlock(&bdev->bd_mutex); blkdev_get()
1395 struct block_device *bdev; blkdev_get_by_path() local
1398 bdev = lookup_bdev(path); blkdev_get_by_path()
1399 if (IS_ERR(bdev)) blkdev_get_by_path()
1400 return bdev; blkdev_get_by_path()
1402 err = blkdev_get(bdev, mode, holder); blkdev_get_by_path()
1406 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { blkdev_get_by_path()
1407 blkdev_put(bdev, mode); blkdev_get_by_path()
1411 return bdev; blkdev_get_by_path()
1439 struct block_device *bdev; blkdev_get_by_dev() local
1442 bdev = bdget(dev); blkdev_get_by_dev()
1443 if (!bdev) blkdev_get_by_dev()
1446 err = blkdev_get(bdev, mode, holder); blkdev_get_by_dev()
1450 return bdev; blkdev_get_by_dev()
1456 struct block_device *bdev; blkdev_open() local
1473 bdev = bd_acquire(inode); blkdev_open()
1474 if (bdev == NULL) blkdev_open()
1477 filp->f_mapping = bdev->bd_inode->i_mapping; blkdev_open()
1479 return blkdev_get(bdev, filp->f_mode, filp); blkdev_open()
1482 static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) __blkdev_put() argument
1484 struct gendisk *disk = bdev->bd_disk; __blkdev_put()
1487 mutex_lock_nested(&bdev->bd_mutex, for_part); __blkdev_put()
1489 bdev->bd_part_count--; __blkdev_put()
1491 if (!--bdev->bd_openers) { __blkdev_put()
1492 WARN_ON_ONCE(bdev->bd_holders); __blkdev_put()
1493 sync_blockdev(bdev); __blkdev_put()
1494 kill_bdev(bdev); __blkdev_put()
1499 bdev_write_inode(bdev->bd_inode); __blkdev_put()
1501 if (bdev->bd_contains == bdev) { __blkdev_put()
1505 if (!bdev->bd_openers) { __blkdev_put()
1508 disk_put_part(bdev->bd_part); __blkdev_put()
1509 bdev->bd_part = NULL; __blkdev_put()
1510 bdev->bd_disk = NULL; __blkdev_put()
1511 if (bdev != bdev->bd_contains) __blkdev_put()
1512 victim = bdev->bd_contains; __blkdev_put()
1513 bdev->bd_contains = NULL; __blkdev_put()
1518 mutex_unlock(&bdev->bd_mutex); __blkdev_put()
1519 bdput(bdev); __blkdev_put()
1524 void blkdev_put(struct block_device *bdev, fmode_t mode) blkdev_put() argument
1526 mutex_lock(&bdev->bd_mutex); blkdev_put()
1538 WARN_ON_ONCE(--bdev->bd_holders < 0); blkdev_put()
1539 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); blkdev_put()
1542 if ((bdev_free = !bdev->bd_holders)) blkdev_put()
1543 bdev->bd_holder = NULL; blkdev_put()
1544 if (!bdev->bd_contains->bd_holders) blkdev_put()
1545 bdev->bd_contains->bd_holder = NULL; blkdev_put()
1553 if (bdev_free && bdev->bd_write_holder) { blkdev_put()
1554 disk_unblock_events(bdev->bd_disk); blkdev_put()
1555 bdev->bd_write_holder = false; blkdev_put()
1564 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE); blkdev_put()
1566 mutex_unlock(&bdev->bd_mutex); blkdev_put()
1568 __blkdev_put(bdev, mode, 0); blkdev_put()
1574 struct block_device *bdev = I_BDEV(filp->f_mapping->host); blkdev_close() local
1575 blkdev_put(bdev, filp->f_mode); blkdev_close()
1581 struct block_device *bdev = I_BDEV(file->f_mapping->host); block_ioctl() local
1593 return blkdev_ioctl(bdev, mode, cmd, arg); block_ioctl()
1657 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; blkdev_releasepage()
1693 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg) ioctl_by_bdev() argument
1698 res = blkdev_ioctl(bdev, 0, cmd, arg); ioctl_by_bdev()
1715 struct block_device *bdev; lookup_bdev() local
1735 bdev = bd_acquire(inode); lookup_bdev()
1736 if (!bdev) lookup_bdev()
1740 return bdev; lookup_bdev()
1742 bdev = ERR_PTR(error); lookup_bdev()
1747 int __invalidate_device(struct block_device *bdev, bool kill_dirty) __invalidate_device() argument
1749 struct super_block *sb = get_super(bdev); __invalidate_device()
1763 invalidate_bdev(bdev); __invalidate_device()
H A Dsuper.c570 * @bdev: device to get the superblock for
576 struct super_block *get_super(struct block_device *bdev) get_super() argument
580 if (!bdev) get_super()
588 if (sb->s_bdev == bdev) { get_super()
610 * @bdev: device to get the superblock for
617 struct super_block *get_super_thawed(struct block_device *bdev) get_super_thawed() argument
620 struct super_block *s = get_super(bdev); get_super_thawed()
633 * @bdev: device to get the superblock for
639 struct super_block *get_active_super(struct block_device *bdev) get_active_super() argument
643 if (!bdev) get_active_super()
651 if (sb->s_bdev == bdev) { get_active_super()
757 * bdev buffer cache (eg. use a private mapping, or directories in do_remount_sb()
760 * from bdev, we could get stale data, so invalidate it to give a best do_remount_sb()
953 struct block_device *bdev; mount_bdev() local
961 bdev = blkdev_get_by_path(dev_name, mode, fs_type); mount_bdev()
962 if (IS_ERR(bdev)) mount_bdev()
963 return ERR_CAST(bdev); mount_bdev()
970 mutex_lock(&bdev->bd_fsfreeze_mutex); mount_bdev()
971 if (bdev->bd_fsfreeze_count > 0) { mount_bdev()
972 mutex_unlock(&bdev->bd_fsfreeze_mutex); mount_bdev()
977 bdev); mount_bdev()
978 mutex_unlock(&bdev->bd_fsfreeze_mutex); mount_bdev()
997 blkdev_put(bdev, mode); mount_bdev()
1003 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); mount_bdev()
1004 sb_set_blocksize(s, block_size(bdev)); mount_bdev()
1012 bdev->bd_super = s; mount_bdev()
1020 blkdev_put(bdev, mode); mount_bdev()
1028 struct block_device *bdev = sb->s_bdev; kill_block_super() local
1031 bdev->bd_super = NULL; kill_block_super()
1033 sync_blockdev(bdev); kill_block_super()
1035 blkdev_put(bdev, mode | FMODE_EXCL); kill_block_super()
H A Dmbcache.c393 * @bdev: which device's cache entries to shrink
396 mb_cache_shrink(struct block_device *bdev) mb_cache_shrink() argument
407 if (ce->e_bdev == bdev) { mb_cache_shrink()
574 * @bdev: device the cache entry belongs to
579 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, mb_cache_entry_insert() argument
590 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), mb_cache_entry_insert()
595 if (lce->e_bdev == bdev && lce->e_block == block) { hlist_bl_for_each_entry()
603 ce->e_bdev = bdev;
661 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, mb_cache_entry_get() argument
669 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), mb_cache_entry_get()
676 if (ce->e_bdev == bdev && ce->e_block == block) { hlist_bl_for_each_entry()
720 struct block_device *bdev, unsigned int key) __mb_cache_entry_find()
729 if (ce->e_bdev == bdev && ce->e_index.o_key == key) { __mb_cache_entry_find()
782 * @bdev: the device the cache entry should belong to
786 mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev, mb_cache_entry_find_first() argument
798 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key); mb_cache_entry_find_first()
820 * @bdev: the device the cache entry should belong to
825 struct block_device *bdev, unsigned int key) mb_cache_entry_find_next()
838 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key); mb_cache_entry_find_next()
719 __mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head, struct block_device *bdev, unsigned int key) __mb_cache_entry_find() argument
824 mb_cache_entry_find_next(struct mb_cache_entry *prev, struct block_device *bdev, unsigned int key) mb_cache_entry_find_next() argument
H A Dmpage.c67 mpage_alloc(struct block_device *bdev, mpage_alloc() argument
81 bio->bi_bdev = bdev; mpage_alloc()
154 struct block_device *bdev = NULL; do_mpage_readpage() local
191 bdev = map_bh->b_bdev; do_mpage_readpage()
246 bdev = map_bh->b_bdev; do_mpage_readpage()
275 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), do_mpage_readpage()
279 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), do_mpage_readpage()
280 min_t(int, nr_pages, bio_get_nr_vecs(bdev)), do_mpage_readpage()
477 struct block_device *bdev = NULL; __mpage_writepage() local
521 bdev = bh->b_bdev; __mpage_writepage()
562 bdev = map_bh.b_bdev; __mpage_writepage()
598 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), __mpage_writepage()
604 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), __mpage_writepage()
605 bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH); __mpage_writepage()
H A Dinternal.h25 extern int __sync_blockdev(struct block_device *bdev, int wait);
32 static inline int __sync_blockdev(struct block_device *bdev, int wait) __sync_blockdev() argument
H A Dsync.c82 static void fdatawrite_one_bdev(struct block_device *bdev, void *arg) fdatawrite_one_bdev() argument
84 filemap_fdatawrite(bdev->bd_inode->i_mapping); fdatawrite_one_bdev()
87 static void fdatawait_one_bdev(struct block_device *bdev, void *arg) fdatawait_one_bdev() argument
89 filemap_fdatawait(bdev->bd_inode->i_mapping); fdatawait_one_bdev()
H A Dbuffer.c198 __find_get_block_slow(struct block_device *bdev, sector_t block) __find_get_block_slow() argument
200 struct inode *bd_inode = bdev->bd_inode; __find_get_block_slow()
244 printk("device %s blocksize: %d\n", bdevname(bdev, b), __find_get_block_slow()
588 void write_boundary_block(struct block_device *bdev, write_boundary_block() argument
591 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); write_boundary_block()
925 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) blkdev_max_block() argument
928 loff_t sz = i_size_read(bdev->bd_inode); blkdev_max_block()
941 init_page_buffers(struct page *page, struct block_device *bdev, init_page_buffers() argument
947 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size); init_page_buffers()
952 bh->b_bdev = bdev; init_page_buffers()
975 grow_dev_page(struct block_device *bdev, sector_t block, grow_dev_page() argument
978 struct inode *inode = bdev->bd_inode; grow_dev_page()
1004 end_block = init_page_buffers(page, bdev, grow_dev_page()
1027 end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, grow_dev_page()
1043 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) grow_buffers() argument
1065 bdevname(bdev, b)); grow_buffers()
1070 return grow_dev_page(bdev, block, index, size, sizebits, gfp); grow_buffers()
1074 __getblk_slow(struct block_device *bdev, sector_t block, __getblk_slow() argument
1078 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || __getblk_slow()
1083 bdev_logical_block_size(bdev)); __getblk_slow()
1093 bh = __find_get_block(bdev, block, size); __getblk_slow()
1097 ret = grow_buffers(bdev, block, size, gfp); __getblk_slow()
1306 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size) lookup_bh_lru() argument
1316 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev && lookup_bh_lru()
1341 __find_get_block(struct block_device *bdev, sector_t block, unsigned size) __find_get_block() argument
1343 struct buffer_head *bh = lookup_bh_lru(bdev, block, size); __find_get_block()
1347 bh = __find_get_block_slow(bdev, block); __find_get_block()
1366 __getblk_gfp(struct block_device *bdev, sector_t block, __getblk_gfp() argument
1369 struct buffer_head *bh = __find_get_block(bdev, block, size); __getblk_gfp()
1373 bh = __getblk_slow(bdev, block, size, gfp); __getblk_gfp()
1381 void __breadahead(struct block_device *bdev, sector_t block, unsigned size) __breadahead() argument
1383 struct buffer_head *bh = __getblk(bdev, block, size); __breadahead()
1393 * @bdev: the block_device to read from
1404 __bread_gfp(struct block_device *bdev, sector_t block, __bread_gfp() argument
1407 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); __bread_gfp()
1610 void unmap_underlying_metadata(struct block_device *bdev, sector_t block) unmap_underlying_metadata() argument
1616 old_bh = __find_get_block_slow(bdev, block); unmap_underlying_metadata()
H A Ddirect-io.c356 struct block_device *bdev, dio_bio_alloc()
367 bio->bi_bdev = bdev; dio_bio_alloc()
1099 struct block_device *bdev, struct iov_iter *iter, do_blockdev_direct_IO()
1116 * Avoid references to bdev if not absolutely needed to give do_blockdev_direct_IO()
1121 if (bdev) do_blockdev_direct_IO()
1122 blkbits = blksize_bits(bdev_logical_block_size(bdev)); do_blockdev_direct_IO()
1313 struct block_device *bdev, struct iov_iter *iter, __blockdev_direct_IO()
1326 prefetch(&bdev->bd_disk->part_tbl); __blockdev_direct_IO()
1327 prefetch(bdev->bd_queue); __blockdev_direct_IO()
1328 prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); __blockdev_direct_IO()
1330 return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block, __blockdev_direct_IO()
355 dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, struct block_device *bdev, sector_t first_sector, int nr_vecs) dio_bio_alloc() argument
1098 do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, loff_t offset, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) do_blockdev_direct_IO() argument
1312 __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, loff_t offset, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) __blockdev_direct_IO() argument
/linux-4.1.27/block/
H A Dioctl.c12 static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) blkpg_ioctl() argument
29 disk = bdev->bd_disk; blkpg_ioctl()
30 if (bdev != bdev->bd_contains) blkpg_ioctl()
48 mutex_lock(&bdev->bd_mutex); blkpg_ioctl()
57 mutex_unlock(&bdev->bd_mutex); blkpg_ioctl()
66 mutex_unlock(&bdev->bd_mutex); blkpg_ioctl()
88 mutex_lock_nested(&bdev->bd_mutex, 1); blkpg_ioctl()
90 mutex_unlock(&bdev->bd_mutex); blkpg_ioctl()
116 mutex_lock_nested(&bdev->bd_mutex, 1); blkpg_ioctl()
119 mutex_unlock(&bdev->bd_mutex); blkpg_ioctl()
134 mutex_unlock(&bdev->bd_mutex); blkpg_ioctl()
144 mutex_unlock(&bdev->bd_mutex); blkpg_ioctl()
153 static int blkdev_reread_part(struct block_device *bdev) blkdev_reread_part() argument
155 struct gendisk *disk = bdev->bd_disk; blkdev_reread_part()
158 if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains) blkdev_reread_part()
162 if (!mutex_trylock(&bdev->bd_mutex)) blkdev_reread_part()
164 res = rescan_partitions(disk, bdev); blkdev_reread_part()
165 mutex_unlock(&bdev->bd_mutex); blkdev_reread_part()
169 static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, blk_ioctl_discard() argument
181 if (start + len > (i_size_read(bdev->bd_inode) >> 9)) blk_ioctl_discard()
185 return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); blk_ioctl_discard()
188 static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start, blk_ioctl_zeroout() argument
198 if (start + len > (i_size_read(bdev->bd_inode) >> 9)) blk_ioctl_zeroout()
201 return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false); blk_ioctl_zeroout()
234 int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, __blkdev_driver_ioctl() argument
237 struct gendisk *disk = bdev->bd_disk; __blkdev_driver_ioctl()
240 return disk->fops->ioctl(bdev, mode, cmd, arg); __blkdev_driver_ioctl()
274 int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, blkdev_ioctl() argument
277 struct gendisk *disk = bdev->bd_disk; blkdev_ioctl()
288 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); blkdev_ioctl()
292 fsync_bdev(bdev); blkdev_ioctl()
293 invalidate_bdev(bdev); blkdev_ioctl()
297 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); blkdev_ioctl()
304 set_device_ro(bdev, n); blkdev_ioctl()
317 return blk_ioctl_discard(bdev, range[0], range[1], blkdev_ioctl()
329 return blk_ioctl_zeroout(bdev, range[0], range[1]); blkdev_ioctl()
345 geo.start = get_start_sect(bdev); blkdev_ioctl()
346 ret = disk->fops->getgeo(bdev, &geo); blkdev_ioctl()
358 bdi = blk_get_backing_dev_info(bdev); blkdev_ioctl()
361 return put_int(arg, bdev_read_only(bdev) != 0); blkdev_ioctl()
363 return put_int(arg, block_size(bdev)); blkdev_ioctl()
365 return put_int(arg, bdev_logical_block_size(bdev)); blkdev_ioctl()
367 return put_uint(arg, bdev_physical_block_size(bdev)); blkdev_ioctl()
369 return put_uint(arg, bdev_io_min(bdev)); blkdev_ioctl()
371 return put_uint(arg, bdev_io_opt(bdev)); blkdev_ioctl()
373 return put_int(arg, bdev_alignment_offset(bdev)); blkdev_ioctl()
375 return put_uint(arg, bdev_discard_zeroes_data(bdev)); blkdev_ioctl()
378 queue_max_sectors(bdev_get_queue(bdev))); blkdev_ioctl()
381 return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev))); blkdev_ioctl()
386 bdi = blk_get_backing_dev_info(bdev); blkdev_ioctl()
398 bdgrab(bdev); blkdev_ioctl()
399 if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) blkdev_ioctl()
402 ret = set_blocksize(bdev, n); blkdev_ioctl()
404 blkdev_put(bdev, mode | FMODE_EXCL); blkdev_ioctl()
407 ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg); blkdev_ioctl()
410 ret = blkdev_reread_part(bdev); blkdev_ioctl()
413 size = i_size_read(bdev->bd_inode); blkdev_ioctl()
418 return put_u64(arg, i_size_read(bdev->bd_inode)); blkdev_ioctl()
423 ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg); blkdev_ioctl()
426 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); blkdev_ioctl()
H A Dblk-lib.c31 * @bdev: blockdev to issue discard for
40 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, blkdev_issue_discard() argument
44 struct request_queue *q = bdev_get_queue(bdev); blkdev_issue_discard()
61 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; blkdev_issue_discard()
113 bio->bi_bdev = bdev; blkdev_issue_discard()
146 * @bdev: target blockdev
155 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, blkdev_issue_write_same() argument
160 struct request_queue *q = bdev_get_queue(bdev); blkdev_issue_write_same()
187 bio->bi_bdev = bdev; blkdev_issue_write_same()
192 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); blkdev_issue_write_same()
220 * @bdev: blockdev to issue
229 static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, __blkdev_issue_zeroout() argument
252 bio->bi_bdev = bdev; __blkdev_issue_zeroout()
282 * @bdev: blockdev to write
301 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, blkdev_issue_zeroout() argument
304 struct request_queue *q = bdev_get_queue(bdev); blkdev_issue_zeroout()
307 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) blkdev_issue_zeroout()
310 if (bdev_write_same(bdev) && blkdev_issue_zeroout()
311 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, blkdev_issue_zeroout()
315 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); blkdev_issue_zeroout()
H A Dcompat_ioctl.c51 static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev, compat_hdio_getgeo() argument
67 geo.start = get_start_sect(bdev); compat_hdio_getgeo()
68 ret = disk->fops->getgeo(bdev, &geo); compat_hdio_getgeo()
80 static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode, compat_hdio_ioctl() argument
89 error = __blkdev_driver_ioctl(bdev, mode, compat_hdio_ioctl()
120 static int compat_cdrom_read_audio(struct block_device *bdev, fmode_t mode, compat_cdrom_read_audio() argument
143 return __blkdev_driver_ioctl(bdev, mode, cmd, compat_cdrom_read_audio()
147 static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode, compat_cdrom_generic_command() argument
176 return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cgc); compat_cdrom_generic_command()
186 static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode, compat_blkpg_ioctl() argument
205 return blkdev_ioctl(bdev, mode, cmd, (unsigned long)a); compat_blkpg_ioctl()
303 static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode, compat_fd_ioctl() argument
408 err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg); compat_fd_ioctl()
524 static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, compat_blkdev_driver_ioctl() argument
539 return compat_hdio_ioctl(bdev, mode, cmd, arg); compat_blkdev_driver_ioctl()
549 return compat_fd_ioctl(bdev, mode, cmd, arg); compat_blkdev_driver_ioctl()
551 return compat_cdrom_read_audio(bdev, mode, cmd, arg); compat_blkdev_driver_ioctl()
553 return compat_cdrom_generic_command(bdev, mode, cmd, arg); compat_blkdev_driver_ioctl()
651 return __blkdev_driver_ioctl(bdev, mode, cmd, arg); compat_blkdev_driver_ioctl()
661 struct block_device *bdev = inode->i_bdev; compat_blkdev_ioctl() local
662 struct gendisk *disk = bdev->bd_disk; compat_blkdev_ioctl()
679 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); compat_blkdev_ioctl()
681 return compat_put_uint(arg, bdev_physical_block_size(bdev)); compat_blkdev_ioctl()
683 return compat_put_uint(arg, bdev_io_min(bdev)); compat_blkdev_ioctl()
685 return compat_put_uint(arg, bdev_io_opt(bdev)); compat_blkdev_ioctl()
687 return compat_put_int(arg, bdev_alignment_offset(bdev)); compat_blkdev_ioctl()
689 return compat_put_uint(arg, bdev_discard_zeroes_data(bdev)); compat_blkdev_ioctl()
700 return blkdev_ioctl(bdev, mode, cmd, compat_blkdev_ioctl()
703 return blkdev_ioctl(bdev, mode, BLKBSZSET, compat_blkdev_ioctl()
706 return compat_blkpg_ioctl(bdev, mode, cmd, compat_ptr(arg)); compat_blkdev_ioctl()
711 bdi = blk_get_backing_dev_info(bdev); compat_blkdev_ioctl()
715 return compat_put_int(arg, bdev_read_only(bdev) != 0); compat_blkdev_ioctl()
717 return compat_put_int(arg, block_size(bdev)); compat_blkdev_ioctl()
719 return compat_put_int(arg, bdev_logical_block_size(bdev)); compat_blkdev_ioctl()
722 queue_max_sectors(bdev_get_queue(bdev))); compat_blkdev_ioctl()
726 !blk_queue_nonrot(bdev_get_queue(bdev))); compat_blkdev_ioctl()
731 bdi = blk_get_backing_dev_info(bdev); compat_blkdev_ioctl()
735 size = i_size_read(bdev->bd_inode); compat_blkdev_ioctl()
741 return compat_put_u64(arg, i_size_read(bdev->bd_inode)); compat_blkdev_ioctl()
747 ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); compat_blkdev_ioctl()
751 ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); compat_blkdev_ioctl()
753 ret = compat_blkdev_driver_ioctl(bdev, mode, cmd, arg); compat_blkdev_ioctl()
H A Dpartition-generic.c46 const char *bdevname(struct block_device *bdev, char *buf) bdevname() argument
48 return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf); bdevname()
394 static int drop_partitions(struct gendisk *disk, struct block_device *bdev) drop_partitions() argument
400 if (bdev->bd_part_count) drop_partitions()
414 int rescan_partitions(struct gendisk *disk, struct block_device *bdev) rescan_partitions() argument
425 res = drop_partitions(disk, bdev); rescan_partitions()
431 check_disk_size_change(disk, bdev); rescan_partitions()
432 bdev->bd_invalidated = 0; rescan_partitions()
433 if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) rescan_partitions()
532 int invalidate_partitions(struct gendisk *disk, struct block_device *bdev) invalidate_partitions() argument
536 if (!bdev->bd_invalidated) invalidate_partitions()
539 res = drop_partitions(disk, bdev); invalidate_partitions()
544 check_disk_size_change(disk, bdev); invalidate_partitions()
545 bdev->bd_invalidated = 0; invalidate_partitions()
552 unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) read_dev_sector() argument
554 struct address_space *mapping = bdev->bd_inode->i_mapping; read_dev_sector()
H A Dgenhd.c510 struct block_device *bdev; register_disk() local
551 bdev = bdget_disk(disk, 0); register_disk()
552 if (!bdev) register_disk()
555 bdev->bd_invalidated = 1; register_disk()
556 err = blkdev_get(bdev, FMODE_READ, NULL); register_disk()
559 blkdev_put(bdev, FMODE_READ); register_disk()
612 /* Register BDI before referencing it from bdev */ add_disk()
722 struct block_device *bdev = NULL; bdget_disk() local
726 bdev = bdget(part_devt(part)); bdget_disk()
729 return bdev; bdget_disk()
1338 void set_device_ro(struct block_device *bdev, int flag) set_device_ro() argument
1340 bdev->bd_part->policy = flag; set_device_ro()
1363 int bdev_read_only(struct block_device *bdev) bdev_read_only() argument
1365 if (!bdev) bdev_read_only()
1367 return bdev->bd_part->policy; bdev_read_only()
1375 struct block_device *bdev = bdget_disk(disk, partno); invalidate_partition() local
1376 if (bdev) { invalidate_partition()
1377 fsync_bdev(bdev); invalidate_partition()
1378 res = __invalidate_device(bdev, true); invalidate_partition()
1379 bdput(bdev); invalidate_partition()
1534 * If @mask is non-zero must be called with bdev->bd_mutex held.
H A Dcmdline-parser.c212 const char *bdev) cmdline_parts_find()
214 while (parts && strncmp(bdev, parts->name, sizeof(parts->name))) cmdline_parts_find()
211 cmdline_parts_find(struct cmdline_parts *parts, const char *bdev) cmdline_parts_find() argument
H A Dblk-flush.c439 * @bdev: blockdev to issue flush for
449 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, blkdev_issue_flush() argument
456 if (bdev->bd_disk == NULL) blkdev_issue_flush()
459 q = bdev_get_queue(bdev); blkdev_issue_flush()
473 bio->bi_bdev = bdev; blkdev_issue_flush()
H A Dblk-settings.c657 * @bdev: the component block_device (bottom)
665 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, bdev_stack_limits() argument
668 struct request_queue *bq = bdev_get_queue(bdev); bdev_stack_limits()
670 start += get_start_sect(bdev); bdev_stack_limits()
679 * @bdev: the underlying block device (bottom)
686 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, disk_stack_limits() argument
691 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { disk_stack_limits()
695 bdevname(bdev, bottom); disk_stack_limits()
/linux-4.1.27/drivers/hid/
H A Dhid-picolcd_backlight.c27 static int picolcd_get_brightness(struct backlight_device *bdev) picolcd_get_brightness() argument
29 struct picolcd_data *data = bl_get_data(bdev); picolcd_get_brightness()
33 static int picolcd_set_brightness(struct backlight_device *bdev) picolcd_set_brightness() argument
35 struct picolcd_data *data = bl_get_data(bdev); picolcd_set_brightness()
42 data->lcd_brightness = bdev->props.brightness & 0x0ff; picolcd_set_brightness()
43 data->lcd_power = bdev->props.power; picolcd_set_brightness()
52 static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb) picolcd_check_bl_fb() argument
54 return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev)); picolcd_check_bl_fb()
66 struct backlight_device *bdev; picolcd_init_backlight() local
79 bdev = backlight_device_register(dev_name(dev), dev, data, picolcd_init_backlight()
81 if (IS_ERR(bdev)) { picolcd_init_backlight()
83 return PTR_ERR(bdev); picolcd_init_backlight()
85 bdev->props.brightness = 0xff; picolcd_init_backlight()
87 data->backlight = bdev; picolcd_init_backlight()
88 picolcd_set_brightness(bdev); picolcd_init_backlight()
94 struct backlight_device *bdev = data->backlight; picolcd_exit_backlight() local
97 if (bdev) picolcd_exit_backlight()
98 backlight_device_unregister(bdev); picolcd_exit_backlight()
/linux-4.1.27/drivers/dma/
H A Dqcom_bam_dma.c350 struct bam_device *bdev; member in struct:bam_chan
402 * @bdev: bam device
406 static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe, bam_addr() argument
409 const struct reg_offset_data r = bdev->layout[reg]; bam_addr()
411 return bdev->regs + r.base_offset + bam_addr()
414 r.ee_mult * bdev->ee; bam_addr()
425 struct bam_device *bdev = bchan->bdev; bam_reset_channel() local
430 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); bam_reset_channel()
431 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); bam_reset_channel()
449 struct bam_device *bdev = bchan->bdev; bam_chan_init_hw() local
460 bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); bam_chan_init_hw()
462 bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); bam_chan_init_hw()
466 bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); bam_chan_init_hw()
469 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); bam_chan_init_hw()
471 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); bam_chan_init_hw()
481 writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); bam_chan_init_hw()
499 struct bam_device *bdev = bchan->bdev; bam_alloc_chan() local
505 bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bam_alloc_chan()
509 dev_err(bdev->dev, "Failed to allocate desc fifo\n"); bam_alloc_chan()
526 struct bam_device *bdev = bchan->bdev; bam_free_chan() local
533 dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); bam_free_chan()
541 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, bam_free_chan()
546 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); bam_free_chan()
548 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); bam_free_chan()
551 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); bam_free_chan()
592 struct bam_device *bdev = bchan->bdev; bam_prep_slave_sg() local
601 dev_err(bdev->dev, "invalid dma direction\n"); bam_prep_slave_sg()
695 struct bam_device *bdev = bchan->bdev; bam_pause() local
699 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); bam_pause()
714 struct bam_device *bdev = bchan->bdev; bam_resume() local
718 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); bam_resume()
727 * @bdev: bam controller
732 static u32 process_channel_irqs(struct bam_device *bdev) process_channel_irqs() argument
738 srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); process_channel_irqs()
744 for (i = 0; i < bdev->num_channels; i++) { process_channel_irqs()
745 struct bam_chan *bchan = &bdev->channels[i]; process_channel_irqs()
751 pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS)); process_channel_irqs()
753 writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); process_channel_irqs()
794 struct bam_device *bdev = data; bam_dma_irq() local
797 srcs |= process_channel_irqs(bdev); bam_dma_irq()
801 tasklet_schedule(&bdev->task); bam_dma_irq()
804 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); bam_dma_irq()
809 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); bam_dma_irq()
865 struct bam_device *bdev = bchan->bdev; bam_apply_new_config() local
873 writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); bam_apply_new_config()
885 struct bam_device *bdev = bchan->bdev; bam_start_dma() local
940 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); bam_start_dma()
951 struct bam_device *bdev = (struct bam_device *)data; dma_tasklet() local
957 for (i = 0; i < bdev->num_channels; i++) { dma_tasklet()
958 bchan = &bdev->channels[i]; dma_tasklet()
1003 struct bam_device *bdev = container_of(of->of_dma_data, bam_dma_xlate() local
1011 if (request >= bdev->num_channels) bam_dma_xlate()
1014 return dma_get_slave_channel(&(bdev->channels[request].vc.chan)); bam_dma_xlate()
1019 * @bdev: bam device
1023 static int bam_init(struct bam_device *bdev) bam_init() argument
1028 val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; bam_init()
1032 if (bdev->ee >= val) bam_init()
1035 val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); bam_init()
1036 bdev->num_channels = val & BAM_NUM_PIPES_MASK; bam_init()
1040 val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); bam_init()
1042 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); bam_init()
1044 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); bam_init()
1051 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); bam_init()
1055 bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); bam_init()
1058 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); bam_init()
1062 bam_addr(bdev, 0, BAM_IRQ_EN)); bam_init()
1065 writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); bam_init()
1070 static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, bam_channel_init() argument
1074 bchan->bdev = bdev; bam_channel_init()
1076 vchan_init(&bchan->vc, &bdev->common); bam_channel_init()
1091 struct bam_device *bdev; bam_dma_probe() local
1096 bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); bam_dma_probe()
1097 if (!bdev) bam_dma_probe()
1100 bdev->dev = &pdev->dev; bam_dma_probe()
1108 bdev->layout = match->data; bam_dma_probe()
1111 bdev->regs = devm_ioremap_resource(&pdev->dev, iores); bam_dma_probe()
1112 if (IS_ERR(bdev->regs)) bam_dma_probe()
1113 return PTR_ERR(bdev->regs); bam_dma_probe()
1115 bdev->irq = platform_get_irq(pdev, 0); bam_dma_probe()
1116 if (bdev->irq < 0) bam_dma_probe()
1117 return bdev->irq; bam_dma_probe()
1119 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee); bam_dma_probe()
1121 dev_err(bdev->dev, "Execution environment unspecified\n"); bam_dma_probe()
1125 bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); bam_dma_probe()
1126 if (IS_ERR(bdev->bamclk)) bam_dma_probe()
1127 return PTR_ERR(bdev->bamclk); bam_dma_probe()
1129 ret = clk_prepare_enable(bdev->bamclk); bam_dma_probe()
1131 dev_err(bdev->dev, "failed to prepare/enable clock\n"); bam_dma_probe()
1135 ret = bam_init(bdev); bam_dma_probe()
1139 tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev); bam_dma_probe()
1141 bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels, bam_dma_probe()
1142 sizeof(*bdev->channels), GFP_KERNEL); bam_dma_probe()
1144 if (!bdev->channels) { bam_dma_probe()
1150 INIT_LIST_HEAD(&bdev->common.channels); bam_dma_probe()
1152 for (i = 0; i < bdev->num_channels; i++) bam_dma_probe()
1153 bam_channel_init(bdev, &bdev->channels[i], i); bam_dma_probe()
1155 ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, bam_dma_probe()
1156 IRQF_TRIGGER_HIGH, "bam_dma", bdev); bam_dma_probe()
1161 bdev->common.dev = bdev->dev; bam_dma_probe()
1162 bdev->common.dev->dma_parms = &bdev->dma_parms; bam_dma_probe()
1163 ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); bam_dma_probe()
1165 dev_err(bdev->dev, "cannot set maximum segment size\n"); bam_dma_probe()
1169 platform_set_drvdata(pdev, bdev); bam_dma_probe()
1172 dma_cap_zero(bdev->common.cap_mask); bam_dma_probe()
1173 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); bam_dma_probe()
1176 bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); bam_dma_probe()
1177 bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; bam_dma_probe()
1178 bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; bam_dma_probe()
1179 bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; bam_dma_probe()
1180 bdev->common.device_alloc_chan_resources = bam_alloc_chan; bam_dma_probe()
1181 bdev->common.device_free_chan_resources = bam_free_chan; bam_dma_probe()
1182 bdev->common.device_prep_slave_sg = bam_prep_slave_sg; bam_dma_probe()
1183 bdev->common.device_config = bam_slave_config; bam_dma_probe()
1184 bdev->common.device_pause = bam_pause; bam_dma_probe()
1185 bdev->common.device_resume = bam_resume; bam_dma_probe()
1186 bdev->common.device_terminate_all = bam_dma_terminate_all; bam_dma_probe()
1187 bdev->common.device_issue_pending = bam_issue_pending; bam_dma_probe()
1188 bdev->common.device_tx_status = bam_tx_status; bam_dma_probe()
1189 bdev->common.dev = bdev->dev; bam_dma_probe()
1191 ret = dma_async_device_register(&bdev->common); bam_dma_probe()
1193 dev_err(bdev->dev, "failed to register dma async device\n"); bam_dma_probe()
1198 &bdev->common); bam_dma_probe()
1205 dma_async_device_unregister(&bdev->common); bam_dma_probe()
1207 for (i = 0; i < bdev->num_channels; i++) bam_dma_probe()
1208 tasklet_kill(&bdev->channels[i].vc.task); bam_dma_probe()
1210 tasklet_kill(&bdev->task); bam_dma_probe()
1212 clk_disable_unprepare(bdev->bamclk); bam_dma_probe()
1219 struct bam_device *bdev = platform_get_drvdata(pdev); bam_dma_remove() local
1223 dma_async_device_unregister(&bdev->common); bam_dma_remove()
1226 writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); bam_dma_remove()
1228 devm_free_irq(bdev->dev, bdev->irq, bdev); bam_dma_remove()
1230 for (i = 0; i < bdev->num_channels; i++) { bam_dma_remove()
1231 bam_dma_terminate_all(&bdev->channels[i].vc.chan); bam_dma_remove()
1232 tasklet_kill(&bdev->channels[i].vc.task); bam_dma_remove()
1234 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bam_dma_remove()
1235 bdev->channels[i].fifo_virt, bam_dma_remove()
1236 bdev->channels[i].fifo_phys); bam_dma_remove()
1239 tasklet_kill(&bdev->task); bam_dma_remove()
1241 clk_disable_unprepare(bdev->bamclk); bam_dma_remove()
/linux-4.1.27/drivers/ide/
H A Dide-disk_ioctl.c18 int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode, ide_disk_ioctl() argument
24 err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings); ide_disk_ioctl()
28 err = generic_ide_ioctl(drive, bdev, cmd, arg); ide_disk_ioctl()
H A Dide-gd.c183 static int ide_gd_open(struct block_device *bdev, fmode_t mode) ide_gd_open() argument
185 struct gendisk *disk = bdev->bd_disk; ide_gd_open()
228 check_disk_change(bdev); ide_gd_open()
241 static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode) ide_gd_unlocked_open() argument
246 ret = ide_gd_open(bdev, mode); ide_gd_unlocked_open()
275 static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo) ide_gd_getgeo() argument
277 struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj); ide_gd_getgeo()
333 static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode, ide_gd_ioctl() argument
336 struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj); ide_gd_ioctl()
339 return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg); ide_gd_ioctl()
H A Dide-ioctls.c19 int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev, ide_setting_ioctl() argument
42 if (bdev != bdev->bd_contains) ide_setting_ioctl()
234 int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev, generic_ide_ioctl() argument
239 err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings); generic_ide_ioctl()
246 if (bdev != bdev->bd_contains) generic_ide_ioctl()
H A Dide-floppy_ioctl.c273 int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev, ide_floppy_ioctl() argument
295 err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); ide_floppy_ioctl()
298 err = generic_ide_ioctl(drive, bdev, cmd, arg); ide_floppy_ioctl()
H A Dide-cd.c1591 static int idecd_open(struct block_device *bdev, fmode_t mode) idecd_open() argument
1597 info = ide_cd_get(bdev->bd_disk); idecd_open()
1601 rc = cdrom_open(&info->devinfo, bdev, mode); idecd_open()
1659 static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode, idecd_locked_ioctl() argument
1662 struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info); idecd_locked_ioctl()
1674 err = generic_ide_ioctl(info->drive, bdev, cmd, arg); idecd_locked_ioctl()
1676 err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, arg); idecd_locked_ioctl()
1681 static int idecd_ioctl(struct block_device *bdev, fmode_t mode, idecd_ioctl() argument
1687 ret = idecd_locked_ioctl(bdev, mode, cmd, arg); idecd_ioctl()
/linux-4.1.27/drivers/staging/comedi/drivers/
H A Dcomedi_bond.c90 struct bonded_device *bdev = *devs++; bonding_dio_insn_bits() local
92 if (base_chan < bdev->nchans) { bonding_dio_insn_bits()
101 b_chans = bdev->nchans - base_chan; bonding_dio_insn_bits()
108 ret = comedi_dio_bitfield2(bdev->dev, bdev->subdev, bonding_dio_insn_bits()
125 base_chan -= bdev->nchans; bonding_dio_insn_bits()
139 struct bonded_device *bdev; bonding_dio_insn_config() local
146 for (bdev = *devs++; chan >= bdev->nchans; bdev = *devs++) bonding_dio_insn_config()
147 chan -= bdev->nchans; bonding_dio_insn_config()
162 ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, data[0]); bonding_dio_insn_config()
165 ret = comedi_dio_get_config(bdev->dev, bdev->subdev, chan, bonding_dio_insn_config()
194 struct bonded_device *bdev; do_dev_config() local
234 bdev = kmalloc(sizeof(*bdev), GFP_KERNEL); do_dev_config()
235 if (!bdev) do_dev_config()
238 bdev->dev = d; do_dev_config()
239 bdev->minor = minor; do_dev_config()
240 bdev->subdev = sdev; do_dev_config()
241 bdev->nchans = nchans; do_dev_config()
245 * Now put bdev pointer at end of devpriv->devs array do_dev_config()
256 kfree(bdev); do_dev_config()
260 devpriv->devs[devpriv->ndevs++] = bdev; do_dev_config()
266 bdev->minor, bdev->subdev); do_dev_config()
331 struct bonded_device *bdev; bonding_detach() local
333 bdev = devpriv->devs[devpriv->ndevs]; bonding_detach()
334 if (!bdev) bonding_detach()
336 if (!test_and_set_bit(bdev->minor, devs_closed)) bonding_detach()
337 comedi_close(bdev->dev); bonding_detach()
338 kfree(bdev); bonding_detach()
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_bo.c70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) ttm_mem_type_debug() argument
72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; ttm_mem_type_debug()
100 ttm_mem_type_debug(bo->bdev, mem_type); ttm_bo_mem_space_debug()
140 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_release_list() local
161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); ttm_bo_release_list()
166 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_add_to_lru() local
175 man = &bdev->man[bo->mem.mem_type]; ttm_bo_add_to_lru()
236 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_add_ttm() local
244 if (bdev->need_dma32) ttm_bo_add_ttm()
252 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, ttm_bo_add_ttm()
258 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, ttm_bo_add_ttm()
281 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_handle_move_mem() local
282 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); ttm_bo_handle_move_mem()
283 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); ttm_bo_handle_move_mem()
284 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; ttm_bo_handle_move_mem()
285 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; ttm_bo_handle_move_mem()
320 if (bdev->driver->move_notify) ttm_bo_handle_move_mem()
321 bdev->driver->move_notify(bo, mem); ttm_bo_handle_move_mem()
328 if (bdev->driver->move_notify) ttm_bo_handle_move_mem()
329 bdev->driver->move_notify(bo, mem); ttm_bo_handle_move_mem()
334 else if (bdev->driver->move) ttm_bo_handle_move_mem()
335 ret = bdev->driver->move(bo, evict, interruptible, ttm_bo_handle_move_mem()
341 if (bdev->driver->move_notify) { ttm_bo_handle_move_mem()
345 bdev->driver->move_notify(bo, mem); ttm_bo_handle_move_mem()
355 if (bdev->driver->invalidate_caches) { ttm_bo_handle_move_mem()
356 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); ttm_bo_handle_move_mem()
365 bdev->man[bo->mem.mem_type].gpu_offset; ttm_bo_handle_move_mem()
373 new_man = &bdev->man[bo->mem.mem_type]; ttm_bo_handle_move_mem()
393 if (bo->bdev->driver->move_notify) ttm_bo_cleanup_memtype_use()
394 bo->bdev->driver->move_notify(bo, NULL); ttm_bo_cleanup_memtype_use()
428 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_cleanup_refs_or_queue() local
463 list_add_tail(&bo->ddestroy, &bdev->ddestroy); ttm_bo_cleanup_refs_or_queue()
466 schedule_delayed_work(&bdev->wq, ttm_bo_cleanup_refs_or_queue()
554 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) ttm_bo_delayed_delete() argument
556 struct ttm_bo_global *glob = bdev->glob; ttm_bo_delayed_delete()
561 if (list_empty(&bdev->ddestroy)) ttm_bo_delayed_delete()
564 entry = list_first_entry(&bdev->ddestroy, ttm_bo_delayed_delete()
571 if (entry->ddestroy.next != &bdev->ddestroy) { ttm_bo_delayed_delete()
612 struct ttm_bo_device *bdev = ttm_bo_delayed_workqueue() local
615 if (ttm_bo_delayed_delete(bdev, false)) { ttm_bo_delayed_workqueue()
616 schedule_delayed_work(&bdev->wq, ttm_bo_delayed_workqueue()
625 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_release() local
626 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; ttm_bo_release()
628 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); ttm_bo_release()
645 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) ttm_bo_lock_delayed_workqueue() argument
647 return cancel_delayed_work_sync(&bdev->wq); ttm_bo_lock_delayed_workqueue()
651 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) ttm_bo_unlock_delayed_workqueue() argument
654 schedule_delayed_work(&bdev->wq, ttm_bo_unlock_delayed_workqueue()
662 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_evict() local
685 bdev->driver->evict_flags(bo, &placement); ttm_bo_evict()
710 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ttm_mem_evict_first() argument
716 struct ttm_bo_global *glob = bdev->glob; ttm_mem_evict_first()
717 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; ttm_mem_evict_first()
771 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; ttm_bo_mem_put()
789 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_mem_force_space() local
790 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; ttm_bo_mem_force_space()
799 ret = ttm_mem_evict_first(bdev, mem_type, place, ttm_bo_mem_force_space()
868 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_mem_space() local
884 man = &bdev->man[mem_type]; ttm_bo_mem_space()
929 man = &bdev->man[mem_type]; ttm_bo_mem_space()
1075 int ttm_bo_init(struct ttm_bo_device *bdev, ttm_bo_init() argument
1090 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; ttm_bo_init()
1123 bo->bdev = bdev; ttm_bo_init()
1124 bo->glob = bdev->glob; ttm_bo_init()
1155 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, ttm_bo_init()
1179 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, ttm_bo_acc_size() argument
1193 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, ttm_bo_dma_acc_size() argument
1208 int ttm_bo_create(struct ttm_bo_device *bdev, ttm_bo_create() argument
1225 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); ttm_bo_create()
1226 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, ttm_bo_create()
1236 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, ttm_bo_force_list_clean() argument
1239 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; ttm_bo_force_list_clean()
1240 struct ttm_bo_global *glob = bdev->glob; ttm_bo_force_list_clean()
1250 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); ttm_bo_force_list_clean()
1264 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ttm_bo_clean_mm() argument
1273 man = &bdev->man[mem_type]; ttm_bo_clean_mm()
1286 ttm_bo_force_list_clean(bdev, mem_type, false); ttm_bo_clean_mm()
1295 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) ttm_bo_evict_mm() argument
1297 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; ttm_bo_evict_mm()
1309 return ttm_bo_force_list_clean(bdev, mem_type, true); ttm_bo_evict_mm()
1313 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ttm_bo_init_mm() argument
1320 man = &bdev->man[type]; ttm_bo_init_mm()
1327 ret = bdev->driver->init_mem_type(bdev, type, man); ttm_bo_init_mm()
1330 man->bdev = bdev; ttm_bo_init_mm()
1410 int ttm_bo_device_release(struct ttm_bo_device *bdev) ttm_bo_device_release() argument
1415 struct ttm_bo_global *glob = bdev->glob; ttm_bo_device_release()
1418 man = &bdev->man[i]; ttm_bo_device_release()
1421 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { ttm_bo_device_release()
1431 list_del(&bdev->device_list); ttm_bo_device_release()
1434 cancel_delayed_work_sync(&bdev->wq); ttm_bo_device_release()
1436 while (ttm_bo_delayed_delete(bdev, true)) ttm_bo_device_release()
1440 if (list_empty(&bdev->ddestroy)) ttm_bo_device_release()
1443 if (list_empty(&bdev->man[0].lru)) ttm_bo_device_release()
1447 drm_vma_offset_manager_destroy(&bdev->vma_manager); ttm_bo_device_release()
1453 int ttm_bo_device_init(struct ttm_bo_device *bdev, ttm_bo_device_init() argument
1462 bdev->driver = driver; ttm_bo_device_init()
1464 memset(bdev->man, 0, sizeof(bdev->man)); ttm_bo_device_init()
1470 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); ttm_bo_device_init()
1474 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, ttm_bo_device_init()
1476 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); ttm_bo_device_init()
1477 INIT_LIST_HEAD(&bdev->ddestroy); ttm_bo_device_init()
1478 bdev->dev_mapping = mapping; ttm_bo_device_init()
1479 bdev->glob = glob; ttm_bo_device_init()
1480 bdev->need_dma32 = need_dma32; ttm_bo_device_init()
1481 bdev->val_seq = 0; ttm_bo_device_init()
1483 list_add_tail(&bdev->device_list, &glob->device_list); ttm_bo_device_init()
1496 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ttm_mem_reg_is_pci() argument
1498 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; ttm_mem_reg_is_pci()
1515 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_unmap_virtual_locked() local
1517 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); ttm_bo_unmap_virtual_locked()
1523 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_unmap_virtual() local
1524 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; ttm_bo_unmap_virtual()
1676 if (bo->bdev->driver->swap_notify) ttm_bo_swapout()
1677 bo->bdev->driver->swap_notify(bo); ttm_bo_swapout()
1693 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) ttm_bo_swapout_all() argument
1695 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) ttm_bo_swapout_all()
H A Dttm_bo_util.c119 int ttm_mem_io_reserve(struct ttm_bo_device *bdev, ttm_mem_io_reserve() argument
122 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; ttm_mem_io_reserve()
125 if (!bdev->driver->io_mem_reserve) ttm_mem_io_reserve()
128 return bdev->driver->io_mem_reserve(bdev, mem); ttm_mem_io_reserve()
130 if (bdev->driver->io_mem_reserve && ttm_mem_io_reserve()
133 ret = bdev->driver->io_mem_reserve(bdev, mem); ttm_mem_io_reserve()
144 void ttm_mem_io_free(struct ttm_bo_device *bdev, ttm_mem_io_free() argument
147 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; ttm_mem_io_free()
152 if (bdev->driver->io_mem_reserve && ttm_mem_io_free()
154 bdev->driver->io_mem_free) ttm_mem_io_free()
155 bdev->driver->io_mem_free(bdev, mem); ttm_mem_io_free()
167 &bo->bdev->man[mem->mem_type]; ttm_mem_io_reserve_vm()
169 ret = ttm_mem_io_reserve(bo->bdev, mem); ttm_mem_io_reserve_vm()
187 ttm_mem_io_free(bo->bdev, mem); ttm_mem_io_free_vm()
191 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, ttm_mem_reg_ioremap() argument
194 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; ttm_mem_reg_ioremap()
200 ret = ttm_mem_io_reserve(bdev, mem); ttm_mem_reg_ioremap()
214 ttm_mem_io_free(bdev, mem); ttm_mem_reg_ioremap()
223 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, ttm_mem_reg_iounmap() argument
228 man = &bdev->man[mem->mem_type]; ttm_mem_reg_iounmap()
233 ttm_mem_io_free(bdev, mem); ttm_mem_reg_iounmap()
327 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_move_memcpy() local
328 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; ttm_bo_move_memcpy()
340 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); ttm_bo_move_memcpy()
343 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); ttm_bo_move_memcpy()
367 ret = ttm->bdev->driver->ttm_tt_populate(ttm); ttm_bo_move_memcpy()
411 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); ttm_bo_move_memcpy()
413 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); ttm_bo_move_memcpy()
540 ret = ttm->bdev->driver->ttm_tt_populate(ttm); ttm_bo_kmap_ttm()
572 &bo->bdev->man[bo->mem.mem_type]; ttm_bo_kmap()
588 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); ttm_bo_kmap()
606 &bo->bdev->man[bo->mem.mem_type]; ttm_bo_kunmap()
626 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); ttm_bo_kunmap()
639 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_move_accel_cleanup() local
640 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; ttm_bo_move_accel_cleanup()
H A Dttm_bo_vm.c90 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_vm_fault() local
101 &bdev->man[bo->mem.mem_type]; ttm_bo_vm_fault()
141 if (bdev->driver->fault_reserve_notify) { ttm_bo_vm_fault()
142 ret = bdev->driver->fault_reserve_notify(bo); ttm_bo_vm_fault()
204 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { ttm_bo_vm_fault()
265 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); ttm_bo_vm_open()
284 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, ttm_bo_vm_lookup() argument
291 drm_vma_offset_lock_lookup(&bdev->vma_manager); ttm_bo_vm_lookup()
293 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); ttm_bo_vm_lookup()
300 drm_vma_offset_unlock_lookup(&bdev->vma_manager); ttm_bo_vm_lookup()
309 struct ttm_bo_device *bdev) ttm_bo_mmap()
315 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); ttm_bo_mmap()
319 driver = bo->bdev->driver; ttm_bo_mmap()
308 ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev) ttm_bo_mmap() argument
H A Dttm_tt.c187 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, ttm_tt_init() argument
191 ttm->bdev = bdev; ttm_tt_init()
192 ttm->glob = bdev->glob; ttm_tt_init()
217 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, ttm_dma_tt_init() argument
223 ttm->bdev = bdev; ttm_dma_tt_init()
224 ttm->glob = bdev->glob; ttm_dma_tt_init()
275 ret = ttm->bdev->driver->ttm_tt_populate(ttm); ttm_tt_bind()
401 ttm->bdev->driver->ttm_tt_unpopulate(ttm); ttm_tt_unpopulate()
H A Dttm_execbuf_util.c188 struct ttm_bo_device *bdev; ttm_eu_fence_buffer_objects() local
195 bdev = bo->bdev; ttm_eu_fence_buffer_objects()
196 driver = bdev->driver; ttm_eu_fence_buffer_objects()
H A Dttm_agp_backend.c113 struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, ttm_agp_tt_create() argument
128 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { ttm_agp_tt_create()
/linux-4.1.27/include/scsi/
H A Dscsicam.h15 extern int scsicam_bios_param (struct block_device *bdev, sector_t capacity, int *ip);
18 extern unsigned char *scsi_bios_ptable(struct block_device *bdev);
/linux-4.1.27/drivers/gpu/drm/shmobile/
H A Dshmob_drm_backlight.c20 static int shmob_drm_backlight_update(struct backlight_device *bdev) shmob_drm_backlight_update() argument
22 struct shmob_drm_connector *scon = bl_get_data(bdev); shmob_drm_backlight_update()
25 int brightness = bdev->props.brightness; shmob_drm_backlight_update()
27 if (bdev->props.power != FB_BLANK_UNBLANK || shmob_drm_backlight_update()
28 bdev->props.state & BL_CORE_SUSPENDED) shmob_drm_backlight_update()
34 static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev) shmob_drm_backlight_get_brightness() argument
36 struct shmob_drm_connector *scon = bl_get_data(bdev); shmob_drm_backlight_get_brightness()
/linux-4.1.27/drivers/char/
H A Draw.c56 struct block_device *bdev; raw_open() local
69 bdev = raw_devices[minor].binding; raw_open()
71 if (!bdev) raw_open()
73 igrab(bdev->bd_inode); raw_open()
74 err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open); raw_open()
77 err = set_blocksize(bdev, bdev_logical_block_size(bdev)); raw_open()
81 filp->f_mapping = bdev->bd_inode->i_mapping; raw_open()
84 bdev->bd_inode->i_mapping; raw_open()
85 filp->private_data = bdev; raw_open()
90 blkdev_put(bdev, filp->f_mode | FMODE_EXCL); raw_open()
103 struct block_device *bdev; raw_release() local
106 bdev = raw_devices[minor].binding; raw_release()
108 /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ raw_release()
112 blkdev_put(bdev, filp->f_mode | FMODE_EXCL); raw_release()
122 struct block_device *bdev = filp->private_data; raw_ioctl() local
123 return blkdev_ioctl(bdev, 0, command, arg); raw_ioctl()
189 struct block_device *bdev; bind_get() local
197 bdev = rawdev->binding; bind_get()
198 *dev = bdev ? bdev->bd_dev : 0; bind_get()
/linux-4.1.27/block/partitions/
H A Dcmdline.c70 char bdev[BDEVNAME_SIZE]; cmdline_partition() local
87 bdevname(state->bdev, bdev); cmdline_partition()
88 parts = cmdline_parts_find(bdev_parts, bdev); cmdline_partition()
92 disk_size = get_capacity(state->bdev->bd_disk) << 9; cmdline_partition()
H A Dcheck.h10 struct block_device *bdev; member in struct:parsed_partitions
33 if (n >= get_capacity(state->bdev->bd_disk)) { read_part_sector()
37 return read_dev_sector(state->bdev, n, p); read_part_sector()
H A Dsgi.c49 bdevname(bdev, b), be32_to_cpu(magic));*/ sgi_partition()
60 bdevname(state->bdev, b)); sgi_partition()
H A Daix.c71 * @bdev: block device
78 static u64 last_lba(struct block_device *bdev) last_lba() argument
80 if (!bdev || !bdev->bd_inode) last_lba()
82 return (bdev->bd_inode->i_size >> 9) - 1ULL; last_lba()
92 * Description: Reads @count bytes from @state->bdev into @buffer.
100 if (!buffer || lba + count / 512 > last_lba(state->bdev)) read_lba()
H A Damiga.c46 bdevname(state->bdev, b), blk); amiga_partition()
68 bdevname(state->bdev, b), blk); amiga_partition()
89 bdevname(state->bdev, b), blk); amiga_partition()
H A Defi.c141 * @bdev: block device
148 static u64 last_lba(struct block_device *bdev) last_lba() argument
150 if (!bdev || !bdev->bd_inode) last_lba()
152 return div_u64(bdev->bd_inode->i_size, last_lba()
153 bdev_logical_block_size(bdev)) - 1ULL; last_lba()
248 * Description: Reads @count bytes from @state->bdev into @buffer.
255 struct block_device *bdev = state->bdev; read_lba() local
256 sector_t n = lba * (bdev_logical_block_size(bdev) / 512); read_lba()
258 if (!buffer || lba > last_lba(bdev)) read_lba()
319 * and fills a GPT header starting at @ from @state->bdev.
326 unsigned ssz = bdev_logical_block_size(state->bdev); alloc_read_gpt_header()
373 bdev_logical_block_size(state->bdev)) { is_gpt_valid()
376 bdev_logical_block_size(state->bdev)); is_gpt_valid()
412 lastlba = last_lba(state->bdev); is_gpt_valid()
597 sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9; find_valid_gpt()
603 lastlba = last_lba(state->bdev); find_valid_gpt()
690 unsigned ssz = bdev_logical_block_size(state->bdev) / 512; efi_partition()
708 if (!is_pte_valid(&ptes[i], last_lba(state->bdev))) efi_partition()
H A Dcheck.c142 check_partition(struct gendisk *hd, struct block_device *bdev) check_partition() argument
157 state->bdev = bdev; check_partition()
H A Dibm.c291 struct block_device *bdev = state->bdev; ibm_partition() local
302 blocksize = bdev_logical_block_size(bdev); ibm_partition()
305 i_size = i_size_read(bdev->bd_inode); ibm_partition()
317 if (ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) ibm_partition()
319 if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0) { ibm_partition()
H A Dsun.c71 bdevname(bdev, b), be16_to_cpu(label->magic)); */ sun_partition()
81 bdevname(state->bdev, b)); sun_partition()
H A Dmac.c131 note_bootable_part(state->bdev->bd_dev, found_root, mac_partition()
H A Datari.c50 hd_size = state->bdev->bd_inode->i_size >> 9; atari_partition()
H A Dldm.c373 num_sects = state->bdev->bd_inode->i_size >> 9; ldm_validate_privheads()
408 * @base: Offset, into @state->bdev, of the database
412 * @state->bdev and return the parsed information into @toc1.
484 * @base: Offset, into @bdev, of the database
487 * Find the vmdb of the LDM Database stored on @bdev and return the parsed
543 * ldm_validate_partition_table - Determine whether bdev might be a dynamic disk
555 * Return: 'true' @state->bdev is a dynamic disk
556 * 'false' @state->bdev is not a dynamic disk, or an error occurred
1412 * @base: Offset, into @state->bdev, of the database
1496 * This determines whether the device @bdev is a dynamic disk and if so creates
1504 * Return: 1 Success, @state->bdev is a dynamic disk and we handled it
1505 * 0 Success, @state->bdev is not a dynamic disk
1507 * Or @state->bdev is a dynamic disk, but it may be corrupted
H A Dacorn.c284 nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect; adfspart_check_ADFS()
549 size = get_capacity(state->bdev->bd_disk); adfspart_check_EESOX()
/linux-4.1.27/fs/udf/
H A Dlowlevel.c32 struct block_device *bdev = sb->s_bdev; udf_get_last_session() local
37 i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long)&ms_info); udf_get_last_session()
52 struct block_device *bdev = sb->s_bdev; udf_get_last_block() local
59 if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock) || udf_get_last_block()
61 lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits; udf_get_last_block()
/linux-4.1.27/drivers/input/keyboard/
H A Dgpio_keys_polled.c69 struct gpio_keys_polled_dev *bdev = dev->private; gpio_keys_polled_poll() local
70 const struct gpio_keys_platform_data *pdata = bdev->pdata; gpio_keys_polled_poll()
75 struct gpio_keys_button_data *bdata = &bdev->data[i]; gpio_keys_polled_poll()
87 struct gpio_keys_polled_dev *bdev = dev->private; gpio_keys_polled_open() local
88 const struct gpio_keys_platform_data *pdata = bdev->pdata; gpio_keys_polled_open()
91 pdata->enable(bdev->dev); gpio_keys_polled_open()
96 struct gpio_keys_polled_dev *bdev = dev->private; gpio_keys_polled_close() local
97 const struct gpio_keys_platform_data *pdata = bdev->pdata; gpio_keys_polled_close()
100 pdata->disable(bdev->dev); gpio_keys_polled_close()
178 struct gpio_keys_polled_dev *bdev; gpio_keys_polled_probe() local
202 bdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); gpio_keys_polled_probe()
203 if (!bdev) { gpio_keys_polled_probe()
214 poll_dev->private = bdev; gpio_keys_polled_probe()
236 struct gpio_keys_button_data *bdata = &bdev->data[i]; gpio_keys_polled_probe()
276 bdev->poll_dev = poll_dev; gpio_keys_polled_probe()
277 bdev->dev = dev; gpio_keys_polled_probe()
278 bdev->pdata = pdata; gpio_keys_polled_probe()
279 platform_set_drvdata(pdev, bdev); gpio_keys_polled_probe()
291 &bdev->data[i]); gpio_keys_polled_probe()
/linux-4.1.27/drivers/gpu/drm/qxl/
H A Dqxl_ttm.c40 static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) qxl_get_qdev() argument
45 mman = container_of(bdev, struct qxl_mman, bdev); qxl_get_qdev()
143 r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev); qxl_mmap()
155 static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) qxl_invalidate_caches() argument
160 static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, qxl_init_mem_type() argument
216 static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, qxl_ttm_io_mem_reserve() argument
219 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; qxl_ttm_io_mem_reserve()
220 struct qxl_device *qdev = qxl_get_qdev(bdev); qxl_ttm_io_mem_reserve()
249 static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev, qxl_ttm_io_mem_free() argument
316 static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev, qxl_ttm_tt_create() argument
323 qdev = qxl_get_qdev(bdev); qxl_ttm_tt_create()
329 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, qxl_ttm_tt_create()
398 r = ttm_bo_device_init(&qdev->mman.bdev, qxl_ttm_init()
409 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM, qxl_ttm_init()
415 r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0, qxl_ttm_init()
437 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM); qxl_ttm_fini()
438 ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0); qxl_ttm_fini()
439 ttm_bo_device_release(&qdev->mman.bdev); qxl_ttm_fini()
455 struct ttm_bo_global *glob = rdev->mman.bdev.glob; qxl_mm_dump_table()
480 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv; qxl_ttm_debugfs_init()
482 qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv; qxl_ttm_debugfs_init()
H A Dqxl_object.c111 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, qxl_bo_create()
147 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; qxl_bo_kmap_atomic_page()
160 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); qxl_bo_kmap_atomic_page()
189 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; qxl_bo_kunmap_atomic_page()
202 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); qxl_bo_kunmap_atomic_page()
319 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); qxl_surf_evict()
324 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM); qxl_vram_evict()
H A Dqxl_release.c437 struct ttm_bo_device *bdev; qxl_release_fence_buffer_objects() local
449 bdev = bo->bdev; qxl_release_fence_buffer_objects()
450 qdev = container_of(bdev, struct qxl_device, mman.bdev); qxl_release_fence_buffer_objects()
460 driver = bdev->driver; qxl_release_fence_buffer_objects()
/linux-4.1.27/drivers/gpu/drm/bochs/
H A Dbochs_mm.c16 return container_of(bd, struct bochs_device, ttm.bdev); bochs_bdev()
90 static int bochs_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, bochs_bo_init_mem_type() argument
134 static int bochs_ttm_io_mem_reserve(struct ttm_bo_device *bdev, bochs_ttm_io_mem_reserve() argument
137 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; bochs_ttm_io_mem_reserve()
138 struct bochs_device *bochs = bochs_bdev(bdev); bochs_ttm_io_mem_reserve()
163 static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev, bochs_ttm_io_mem_free() argument
187 static struct ttm_tt *bochs_ttm_tt_create(struct ttm_bo_device *bdev, bochs_ttm_tt_create() argument
198 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { bochs_ttm_tt_create()
219 struct ttm_bo_device *bdev = &bochs->ttm.bdev; bochs_mm_init() local
226 ret = ttm_bo_device_init(&bochs->ttm.bdev, bochs_mm_init()
237 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, bochs_mm_init()
253 ttm_bo_device_release(&bochs->ttm.bdev); bochs_mm_fini()
346 return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev); bochs_mmap()
369 bochsbo->bo.bdev = &bochs->ttm.bdev; bochs_bo_create()
370 bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; bochs_bo_create()
374 acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size, bochs_bo_create()
377 ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, bochs_bo_create()
H A Dbochs.h84 struct ttm_bo_device bdev; member in struct:bochs_device::__anon4005
H A Dbochs_fbdev.c138 drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node); bochsfb_create()
/linux-4.1.27/fs/logfs/
H A Ddev_bdev.c17 static int sync_request(struct page *page, struct block_device *bdev, int rw) sync_request() argument
29 bio.bi_bdev = bdev; sync_request()
39 struct block_device *bdev = logfs_super(sb)->s_bdev; bdev_readpage() local
42 err = sync_request(page, bdev, READ); bdev_readpage()
273 struct block_device *bdev = logfs_super(sb)->s_bdev; bdev_write_sb() local
276 return sync_request(page, bdev, WRITE); bdev_write_sb()
304 struct block_device *bdev; logfs_get_sb_bdev() local
306 bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL, logfs_get_sb_bdev()
308 if (IS_ERR(bdev)) logfs_get_sb_bdev()
309 return PTR_ERR(bdev); logfs_get_sb_bdev()
311 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { logfs_get_sb_bdev()
312 int mtdnr = MINOR(bdev->bd_dev); logfs_get_sb_bdev()
313 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); logfs_get_sb_bdev()
317 p->s_bdev = bdev; logfs_get_sb_bdev()
/linux-4.1.27/drivers/gpu/drm/ast/
H A Dast_ttm.c35 return container_of(bd, struct ast_private, ttm.bdev); ast_bdev()
113 ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, ast_bo_init_mem_type() argument
156 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev, ast_ttm_io_mem_reserve() argument
159 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; ast_ttm_io_mem_reserve()
160 struct ast_private *ast = ast_bdev(bdev); ast_ttm_io_mem_reserve()
185 static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) ast_ttm_io_mem_free() argument
211 static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, ast_ttm_tt_create() argument
221 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { ast_ttm_tt_create()
254 struct ttm_bo_device *bdev = &ast->ttm.bdev; ast_mm_init() local
260 ret = ttm_bo_device_init(&ast->ttm.bdev, ast_mm_init()
271 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, ast_mm_init()
286 ttm_bo_device_release(&ast->ttm.bdev); ast_mm_fini()
332 astbo->bo.bdev = &ast->ttm.bdev; ast_bo_create()
336 acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size, ast_bo_create()
339 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, ast_bo_create()
434 return ttm_bo_mmap(filp, vma, &ast->ttm.bdev); ast_mmap()
/linux-4.1.27/drivers/gpu/drm/cirrus/
H A Dcirrus_ttm.c35 return container_of(bd, struct cirrus_device, ttm.bdev); cirrus_bdev()
113 cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, cirrus_bo_init_mem_type() argument
156 static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev, cirrus_ttm_io_mem_reserve() argument
159 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; cirrus_ttm_io_mem_reserve()
160 struct cirrus_device *cirrus = cirrus_bdev(bdev); cirrus_ttm_io_mem_reserve()
185 static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) cirrus_ttm_io_mem_free() argument
211 static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev, cirrus_ttm_tt_create() argument
221 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { cirrus_ttm_tt_create()
254 struct ttm_bo_device *bdev = &cirrus->ttm.bdev; cirrus_mm_init() local
260 ret = ttm_bo_device_init(&cirrus->ttm.bdev, cirrus_mm_init()
271 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, cirrus_mm_init()
290 ttm_bo_device_release(&cirrus->ttm.bdev); cirrus_mm_fini()
336 cirrusbo->bo.bdev = &cirrus->ttm.bdev; cirrus_bo_create()
340 acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size, cirrus_bo_create()
343 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, cirrus_bo_create()
418 return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev); cirrus_mmap()
/linux-4.1.27/drivers/gpu/drm/mgag200/
H A Dmgag200_ttm.c35 return container_of(bd, struct mga_device, ttm.bdev); mgag200_bdev()
113 mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, mgag200_bo_init_mem_type() argument
156 static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev, mgag200_ttm_io_mem_reserve() argument
159 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; mgag200_ttm_io_mem_reserve()
160 struct mga_device *mdev = mgag200_bdev(bdev); mgag200_ttm_io_mem_reserve()
185 static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) mgag200_ttm_io_mem_free() argument
211 static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev, mgag200_ttm_tt_create() argument
221 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { mgag200_ttm_tt_create()
254 struct ttm_bo_device *bdev = &mdev->ttm.bdev; mgag200_mm_init() local
260 ret = ttm_bo_device_init(&mdev->ttm.bdev, mgag200_mm_init()
271 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT); mgag200_mm_init()
285 ttm_bo_device_release(&mdev->ttm.bdev); mgag200_mm_fini()
332 mgabo->bo.bdev = &mdev->ttm.bdev; mgag200_bo_create()
336 acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size, mgag200_bo_create()
339 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, mgag200_bo_create()
435 return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev); mgag200_mmap()
/linux-4.1.27/drivers/net/wireless/b43/
H A Dbus.c36 return 0; /* bcma_bus_may_powerdown(dev->bdev->bus); */ b43_bus_bcma_bus_may_powerdown()
45 return bcma_core_is_enabled(dev->bdev); b43_bus_bcma_device_is_enabled()
50 bcma_core_enable(dev->bdev, core_specific_flags); b43_bus_bcma_device_enable()
55 bcma_core_disable(dev->bdev, core_specific_flags); b43_bus_bcma_device_disable()
59 return bcma_read16(dev->bdev, offset); b43_bus_bcma_read16()
63 return bcma_read32(dev->bdev, offset); b43_bus_bcma_read32()
68 bcma_write16(dev->bdev, offset, value); b43_bus_bcma_write16()
73 bcma_write32(dev->bdev, offset, value); b43_bus_bcma_write32()
79 bcma_block_read(dev->bdev, buffer, count, offset, reg_width); b43_bus_bcma_block_read()
85 bcma_block_write(dev->bdev, buffer, count, offset, reg_width); b43_bus_bcma_block_write()
95 dev->bdev = core; b43_bus_dev_bcma_init()
241 return bcma_get_drvdata(dev->bdev); b43_bus_get_wldev()
256 bcma_set_drvdata(dev->bdev, wldev); b43_bus_set_wldev()
H A Dbus.h16 struct bcma_device *bdev; member in union:b43_bus_dev::__anon7767
70 return (dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI); b43_bus_host_is_pci()
H A Dphy_common.c352 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_phy_put_into_reset()
356 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_phy_put_into_reset()
359 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_phy_put_into_reset()
361 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_phy_put_into_reset()
392 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_phy_take_out_of_reset()
396 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_phy_take_out_of_reset()
400 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_phy_take_out_of_reset()
403 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_phy_take_out_of_reset()
587 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_phy_force_clock()
592 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_phy_force_clock()
H A Dmain.c1216 bcma_cc = &dev->dev->bdev->bus->drv_cc; b43_wireless_core_phy_pll_reset()
1243 flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_bcma_phy_reset()
1246 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags); b43_bcma_phy_reset()
1268 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_bcma_wireless_core_reset()
1271 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_bcma_wireless_core_reset()
1273 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_bcma_wireless_core_reset()
1275 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_bcma_wireless_core_reset()
1277 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_bcma_wireless_core_reset()
1279 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_bcma_wireless_core_reset()
1282 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST); b43_bcma_wireless_core_reset()
1284 bcma_core_pll_ctl(dev->dev->bdev, req, status, true); b43_bcma_wireless_core_reset()
2920 bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, mask, set); b43_gpio_init()
2947 bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, ~0, 0); b43_gpio_cleanup()
3030 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_mac_phy_clock_set()
3035 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_mac_phy_clock_set()
3943 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL); b43_switch_band()
3948 bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp); b43_switch_band()
4822 bcma_host_pci_down(dev->dev->bdev->bus); b43_wireless_core_exit()
4869 bcma_host_pci_irq_ctl(dev->dev->bdev->bus, b43_wireless_core_init()
4870 dev->dev->bdev, true); b43_wireless_core_init()
4871 bcma_host_pci_up(dev->dev->bdev->bus); b43_wireless_core_init()
5336 dev->dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI) b43_supported_bands()
5337 dev_id = dev->dev->bdev->bus->host_pci->device; b43_supported_bands()
5433 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST); b43_wireless_core_attach()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_ttm.h7 return container_of(bd, struct nouveau_drm, ttm.bdev); nouveau_bdev()
H A Dnouveau_sgdma.c92 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev, nouveau_sgdma_create_ttm() argument
96 struct nouveau_drm *drm = nouveau_bdev(bdev); nouveau_sgdma_create_ttm()
108 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) nouveau_sgdma_create_ttm()
H A Dnouveau_ttm.c35 struct nouveau_drm *drm = nouveau_bdev(man->bdev); nouveau_vram_manager_init()
66 struct nouveau_drm *drm = nouveau_bdev(man->bdev); nouveau_vram_manager_del()
78 struct nouveau_drm *drm = nouveau_bdev(man->bdev); nouveau_vram_manager_new()
167 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_gart_manager_new()
213 struct nouveau_drm *drm = nouveau_bdev(man->bdev); nv04_gart_manager_init()
290 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); nouveau_ttm_mmap()
381 ret = ttm_bo_device_init(&drm->ttm.bdev, nouveau_ttm_init()
395 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, nouveau_ttm_init()
412 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, nouveau_ttm_init()
428 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); nouveau_ttm_fini()
429 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); nouveau_ttm_fini()
432 ttm_bo_device_release(&drm->ttm.bdev); nouveau_ttm_fini()
H A Dnouveau_bo.c136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_del_ttm()
151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_fixup_align()
215 nvbo->bo.bdev = &drm->ttm.bdev; nouveau_bo_new()
230 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, nouveau_bo_new()
233 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, nouveau_bo_new()
262 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); set_placement_range()
315 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_pin()
389 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_unpin()
461 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_sync_for_device()
481 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_sync_for_cpu()
579 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, nouveau_ttm_tt_create() argument
583 struct nouveau_drm *drm = nouveau_bdev(bdev); nouveau_ttm_tt_create()
587 return ttm_agp_tt_create(bdev, dev->agp->bridge, size, nouveau_ttm_tt_create()
592 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); nouveau_ttm_tt_create()
596 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) nouveau_bo_invalidate_caches() argument
603 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, nouveau_bo_init_mem_type() argument
606 struct nouveau_drm *drm = nouveau_bdev(bdev); nouveau_bo_init_mem_type()
1065 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_move_m2mf()
1257 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_vm_bind()
1280 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_vm_cleanup()
1292 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_move()
1355 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) nouveau_ttm_io_mem_reserve() argument
1357 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; nouveau_ttm_io_mem_reserve()
1358 struct nouveau_drm *drm = nouveau_bdev(bdev); nouveau_ttm_io_mem_reserve()
1407 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) nouveau_ttm_io_mem_free() argument
1409 struct nouveau_drm *drm = nouveau_bdev(bdev); nouveau_ttm_io_mem_free()
1422 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_ttm_fault_reserve_notify()
1488 drm = nouveau_bdev(ttm->bdev); nouveau_ttm_tt_populate()
1553 drm = nouveau_bdev(ttm->bdev); nouveau_ttm_tt_unpopulate()
H A Dnouveau_drm.h125 struct ttm_bo_device bdev; member in struct:nouveau_drm::__anon4162
/linux-4.1.27/drivers/md/bcache/
H A Dsuper.c87 static const char *read_super(struct cache_sb *sb, struct block_device *bdev, read_super() argument
92 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); read_super()
142 if (sb->block_size << 9 < bdev_logical_block_size(bdev)) read_super()
182 if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets) read_super()
283 bio->bi_bdev = dc->bdev; bch_write_bdev_super()
329 bio->bi_bdev = ca->bdev; for_each_cache()
532 bio->bi_bdev = ca->bdev; prio_io()
692 bd_unlink_disk_holder(ca->bdev, d->disk); bcache_device_unlink()
703 bd_link_disk_holder(ca->bdev, d->disk); bcache_device_link()
863 sectors += bdev_sectors(dc->bdev); calc_cached_dev_sectors()
900 bd_link_disk_holder(dc->bdev, dc->disk.disk); bch_cached_dev_run()
938 pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); cached_dev_detach_finish()
970 bdevname(dc->bdev, buf); bch_cached_dev_attach()
1064 bcache_device_link(&dc->disk, c, "bdev"); bch_cached_dev_attach()
1070 bdevname(dc->bdev, buf), dc->disk.disk->disk_name, bch_cached_dev_attach()
1094 bd_unlink_disk_holder(dc->bdev, dc->disk.disk); cached_dev_free()
1100 if (!IS_ERR_OR_NULL(dc->bdev)) cached_dev_free()
1101 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); cached_dev_free()
1127 struct request_queue *q = bdev_get_queue(dc->bdev); cached_dev_init()
1154 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); cached_dev_init()
1159 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); cached_dev_init()
1173 struct block_device *bdev, register_bdev()
1181 dc->bdev = bdev; register_bdev()
1182 dc->bdev->bd_holder = dc; register_bdev()
1194 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj, register_bdev()
1200 pr_info("registered backing device %s", bdevname(bdev, name)); register_bdev()
1212 pr_notice("error opening %s: %s", bdevname(bdev, name), err); register_bdev()
1832 if (!IS_ERR_OR_NULL(ca->bdev)) bch_cache_release()
1833 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); bch_cache_release()
1876 struct block_device *bdev, struct cache *ca) register_cache()
1883 ca->bdev = bdev; register_cache()
1884 ca->bdev->bd_holder = ca; register_cache()
1892 if (blk_queue_discard(bdev_get_queue(ca->bdev))) register_cache()
1899 if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { register_cache()
1914 pr_info("registered cache device %s", bdevname(bdev, name)); register_cache()
1921 pr_notice("error opening %s: %s", bdevname(bdev, name), err); register_cache()
1934 static bool bch_is_open_backing(struct block_device *bdev) { bch_is_open_backing() argument
1940 if (dc->bdev == bdev) bch_is_open_backing()
1943 if (dc->bdev == bdev) bch_is_open_backing()
1948 static bool bch_is_open_cache(struct block_device *bdev) { bch_is_open_cache() argument
1955 if (ca->bdev == bdev) bch_is_open_cache()
1960 static bool bch_is_open(struct block_device *bdev) { bch_is_open() argument
1961 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); bch_is_open()
1971 struct block_device *bdev = NULL; register_bcache() local
1982 bdev = blkdev_get_by_path(strim(path), register_bcache()
1985 if (IS_ERR(bdev)) { register_bcache()
1986 if (bdev == ERR_PTR(-EBUSY)) { register_bcache()
1987 bdev = lookup_bdev(strim(path)); register_bcache()
1989 if (!IS_ERR(bdev) && bch_is_open(bdev)) register_bcache()
2001 if (set_blocksize(bdev, 4096)) register_bcache()
2004 err = read_super(sb, bdev, &sb_page); register_bcache()
2014 register_bdev(sb, sb_page, bdev, dc); register_bcache()
2021 if (register_cache(sb, sb_page, bdev, ca) != 0) register_bcache()
2033 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); register_bcache()
1172 register_bdev(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cached_dev *dc) register_bdev() argument
1875 register_cache(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cache *ca) register_cache() argument
H A Dio.c138 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; __bch_submit_bbio()
198 bdevname(ca->bdev, buf), m); bch_count_io_errors()
202 bdevname(ca->bdev, buf), m); bch_count_io_errors()
H A Ddebug.c52 bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; bch_btree_verify()
131 bdevname(dc->bdev, name), bio_for_each_segment()
H A Dwriteback.c28 int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev), __update_writeback_rate()
188 io->bio.bi_bdev = io->dc->bdev; write_dirty()
258 &w->key, 0)->bdev; read_dirty()
H A Drequest.c908 blk_queue_discard(bdev_get_queue(dc->bdev))) cached_dev_write()
961 bio->bi_bdev = dc->bdev; cached_dev_make_request()
986 !blk_queue_discard(bdev_get_queue(dc->bdev))) cached_dev_make_request()
997 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); cached_dev_ioctl()
1004 struct request_queue *q = bdev_get_queue(dc->bdev); cached_dev_congested()
1015 q = bdev_get_queue(ca->bdev); cached_dev_congested()
1119 q = bdev_get_queue(ca->bdev); flash_dev_congested()
/linux-4.1.27/drivers/md/
H A Ddm-linear.c86 bio->bi_bdev = lc->dev->bdev; linear_map_bio()
127 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) linear_ioctl()
130 return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); linear_ioctl()
137 struct request_queue *q = bdev_get_queue(lc->dev->bdev); linear_merge()
142 bvm->bi_bdev = lc->dev->bdev; linear_merge()
H A Ddm-snap.c287 struct block_device *bdev; member in struct:origin
353 static unsigned origin_hash(struct block_device *bdev) origin_hash() argument
355 return bdev->bd_dev & ORIGIN_MASK; origin_hash()
365 if (bdev_equal(o->bdev, origin)) __lookup_origin()
373 struct list_head *sl = &_origins[origin_hash(o->bdev)]; __insert_origin()
384 if (bdev_equal(o->dev->bdev, origin)) __lookup_dm_origin()
392 struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; __insert_dm_origin()
426 o = __lookup_origin(snap->origin->bdev); __find_snapshots_sharing_cow()
433 if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) __find_snapshots_sharing_cow()
524 struct block_device *bdev = snap->origin->bdev; register_snapshot() local
539 o = __lookup_origin(bdev); register_snapshot()
548 o->bdev = bdev; register_snapshot()
566 struct block_device *bdev = s->origin->bdev; reregister_snapshot() local
571 __insert_snapshot(__lookup_origin(bdev), s); reregister_snapshot()
581 o = __lookup_origin(s->origin->bdev); unregister_snapshot()
802 cow_dev_size = get_dev_size(s->cow->bdev); init_hash_tables()
997 dest.bdev = s->origin->bdev; snapshot_merge_next_chunks()
999 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); snapshot_merge_next_chunks()
1001 src.bdev = s->cow->bdev; snapshot_merge_next_chunks()
1557 struct block_device *bdev = s->origin->bdev; start_copy() local
1560 dev_size = get_dev_size(bdev); start_copy()
1562 src.bdev = bdev; start_copy()
1566 dest.bdev = s->cow->bdev; start_copy()
1652 bio->bi_bdev = s->cow->bdev; remap_exception()
1670 bio->bi_bdev = s->cow->bdev; snapshot_map()
1753 bio->bi_bdev = s->origin->bdev; snapshot_map()
1786 bio->bi_bdev = s->origin->bdev; snapshot_merge_map()
1788 bio->bi_bdev = s->cow->bdev; snapshot_merge_map()
1808 bio->bi_bdev = s->origin->bdev; snapshot_merge_map()
1822 bio->bi_bdev = s->origin->bdev; snapshot_merge_map()
1888 o = __lookup_dm_origin(s->origin->bdev); snapshot_resume()
1941 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev) get_origin_minimum_chunksize() argument
1946 min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); get_origin_minimum_chunksize()
1964 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); snapshot_merge_resume()
2027 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); snapshot_iterate_devices()
2166 o = __lookup_origin(origin->bdev); do_origin()
2199 o = __lookup_origin(merging_snap->origin->bdev); origin_write_extent()
2266 bio->bi_bdev = o->dev->bdev; origin_map()
2292 o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); origin_resume()
2328 struct request_queue *q = bdev_get_queue(o->dev->bdev); origin_merge()
2333 bvm->bi_bdev = o->dev->bdev; origin_merge()
H A Dlinear.c75 subq = bdev_get_queue(dev0->rdev->bdev); linear_mergeable_bvec()
77 bvm->bi_bdev = dev0->rdev->bdev; linear_mergeable_bvec()
105 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); linear_congested()
158 disk_stack_limits(mddev->gendisk, rdev->bdev, rdev_for_each()
164 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) rdev_for_each()
276 bio->bi_bdev = tmp_dev->rdev->bdev; linear_make_request()
312 bdevname(tmp_dev->rdev->bdev, b), linear_make_request()
H A Ddm-table.c268 if (dd->dm_dev->bdev->bd_dev == dev) find_device()
282 struct block_device *bdev = dev->bdev; device_area_is_invalid() local
284 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; device_area_is_invalid()
294 q = bdev_get_queue(bdev); device_area_is_invalid()
298 dm_device_name(ti->table->md), bdevname(bdev, b), device_area_is_invalid()
311 dm_device_name(ti->table->md), bdevname(bdev, b), device_area_is_invalid()
326 limits->logical_block_size, bdevname(bdev, b)); device_area_is_invalid()
335 limits->logical_block_size, bdevname(bdev, b)); device_area_is_invalid()
345 * device and not to touch the existing bdev field in case
356 r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, upgrade_mode()
378 struct block_device *bdev; dm_get_device() local
383 bdev = lookup_bdev(path); dm_get_device()
384 if (IS_ERR(bdev)) { dm_get_device()
389 dev = bdev->bd_dev; dm_get_device()
390 bdput(bdev); dm_get_device()
423 struct block_device *bdev = dev->bdev; dm_set_device_limits() local
424 struct request_queue *q = bdev_get_queue(bdev); dm_set_device_limits()
429 dm_device_name(ti->table->md), bdevname(bdev, b)); dm_set_device_limits()
433 if (bdev_stack_limits(limits, bdev, start) < 0) dm_set_device_limits()
437 dm_device_name(ti->table->md), bdevname(bdev, b), dm_set_device_limits()
889 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); list_for_each_entry()
904 if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) { list_for_each_entry()
1040 template_disk = dd->dm_dev->bdev->bd_disk; list_for_each_entry()
1318 struct request_queue *q = bdev_get_queue(dev->bdev); device_flush_capable()
1370 struct request_queue *q = bdev_get_queue(dev->bdev); device_is_nonrot()
1378 struct request_queue *q = bdev_get_queue(dev->bdev); device_is_not_random()
1386 struct request_queue *q = bdev_get_queue(dev->bdev); queue_supports_sg_merge()
1394 struct request_queue *q = bdev_get_queue(dev->bdev); queue_supports_sg_gaps()
1419 struct request_queue *q = bdev_get_queue(dev->bdev); device_not_write_same_capable()
1446 struct request_queue *q = bdev_get_queue(dev->bdev); device_discard_capable()
1661 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); list_for_each_entry()
1669 bdevname(dd->dm_dev->bdev, b)); list_for_each_entry()
H A Ddm-snap-transient.c43 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); transient_prepare_exception()
69 *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); transient_usage()
H A Dmultipath.c100 bdevname(rdev->bdev,b), multipath_end_request()
136 mp_bh->bio.bi_bdev = multipath->rdev->bdev; multipath_make_request()
167 struct request_queue *q = bdev_get_queue(rdev->bdev); multipath_congested()
214 bdevname(rdev->bdev, b), multipath_error()
237 bdevname(tmp->rdev->bdev,b)); print_multipath_conf()
258 q = rdev->bdev->bd_disk->queue; multipath_add_disk()
259 disk_stack_limits(mddev->gendisk, rdev->bdev, multipath_add_disk()
367 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; multipathd()
434 disk_stack_limits(mddev->gendisk, rdev->bdev, rdev_for_each()
440 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { rdev_for_each()
H A Draid0.c36 struct request_queue *q = bdev_get_queue(devlist[i]->bdev); raid0_congested()
62 + k]->bdev, b)); dump_zones()
93 bdevname(rdev1->bdev, b)); rdev_for_each()
102 rdev1->bdev->bd_disk->queue)); rdev_for_each()
108 bdevname(rdev1->bdev,b), rdev_for_each()
110 bdevname(rdev2->bdev,b2), rdev_for_each()
206 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) rdev_for_each()
241 bdevname(rdev->bdev, b));
247 bdevname(rdev->bdev, b), c);
382 subq = bdev_get_queue(rdev->bdev); raid0_mergeable_bvec()
384 bvm->bi_bdev = rdev->bdev; raid0_mergeable_bvec()
443 disk_stack_limits(mddev->gendisk, rdev->bdev, rdev_for_each()
445 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) rdev_for_each()
542 split->bi_bdev = tmp_dev->bdev; raid0_make_request()
H A Ddm-cache-metadata.c100 struct block_device *bdev; member in struct:dm_cache_metadata
298 sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; __write_initial_superblock()
403 if (get_disk_ro(cmd->bdev->bd_disk)) __check_incompat_features()
484 cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, __create_persistent_data_objects()
675 static struct dm_cache_metadata *metadata_open(struct block_device *bdev, metadata_open() argument
691 cmd->bdev = bdev; metadata_open()
720 static struct dm_cache_metadata *lookup(struct block_device *bdev) lookup() argument
725 if (cmd->bdev == bdev) { lookup()
733 static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, lookup_or_open() argument
741 cmd = lookup(bdev); lookup_or_open()
747 cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); lookup_or_open()
750 cmd2 = lookup(bdev); lookup_or_open()
776 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, dm_cache_metadata_open() argument
781 struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, dm_cache_metadata_open()
H A Ddm-log-writes.c205 bio->bi_bdev = lc->logdev->bdev; write_metadata()
270 bio->bi_bdev = lc->logdev->bdev; log_one_block()
292 bio->bi_bdev = lc->logdev->bdev; log_one_block()
338 return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT; logdev_last_sector()
541 bio->bi_bdev = lc->dev->bdev; normal_map_bio()
725 if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) log_writes_ioctl()
728 return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); log_writes_ioctl()
735 struct request_queue *q = bdev_get_queue(lc->dev->bdev); log_writes_merge()
740 bvm->bi_bdev = lc->dev->bdev; log_writes_merge()
780 struct request_queue *q = bdev_get_queue(lc->dev->bdev); log_writes_io_hints()
H A Dmd.c420 bi->bi_bdev = rdev->bdev; rdev_for_each_rcu()
668 if (rdev->bdev->bd_dev == dev) find_rdev()
679 if (rdev->bdev->bd_dev == dev) find_rdev_rcu()
700 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; calc_dev_sboffset()
761 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; md_super_write()
784 rdev->meta_bdev : rdev->bdev; sync_page_io()
816 bdevname(rdev->bdev,b)); read_disk_sb()
980 bdevname(rdev->bdev, b); super_90_load()
1025 b, bdevname(refdev->bdev,b2)); super_90_load()
1031 b, bdevname(refdev->bdev, b2)); super_90_load()
1281 d->major = MAJOR(rdev2->bdev->bd_dev); rdev_for_each()
1282 d->minor = MINOR(rdev2->bdev->bd_dev); rdev_for_each()
1402 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; super_1_load()
1434 bdevname(rdev->bdev,b)); super_1_load()
1439 bdevname(rdev->bdev,b)); super_1_load()
1457 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; super_1_load()
1526 bdevname(rdev->bdev,b), super_1_load()
1527 bdevname(refdev->bdev,b2)); super_1_load()
1539 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); super_1_load()
1801 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; super_1_sync()
1836 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; super_1_rdev_size_change()
1846 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; super_1_rdev_size_change()
1937 if (rdev->bdev->bd_contains == rdev_for_each_rcu()
1938 rdev2->bdev->bd_contains) { rdev_for_each_rcu()
1975 if (blk_integrity_compare(reference->bdev->bd_disk, rdev_for_each()
1976 rdev->bdev->bd_disk) < 0) rdev_for_each()
1979 if (!reference || !bdev_get_integrity(reference->bdev))
1986 bdev_get_integrity(reference->bdev)) != 0) {
2010 bi_rdev = bdev_get_integrity(rdev->bdev); md_integrity_add_rdev()
2018 rdev->bdev->bd_disk) >= 0) md_integrity_add_rdev()
2033 if (find_rdev(mddev, rdev->bdev->bd_dev)) bind_rdev_to_array()
2074 bdevname(rdev->bdev,b); bind_rdev_to_array()
2084 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; bind_rdev_to_array()
2090 bd_link_disk_holder(rdev->bdev, mddev->gendisk); bind_rdev_to_array()
2114 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); unbind_rdev_from_array()
2116 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); unbind_rdev_from_array()
2140 struct block_device *bdev; lock_rdev() local
2143 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lock_rdev()
2145 if (IS_ERR(bdev)) { lock_rdev()
2148 return PTR_ERR(bdev); lock_rdev()
2150 rdev->bdev = bdev; lock_rdev()
2156 struct block_device *bdev = rdev->bdev; unlock_rdev() local
2157 rdev->bdev = NULL; unlock_rdev()
2158 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); unlock_rdev()
2168 bdevname(rdev->bdev,b)); export_rdev()
2172 md_autodetect_dev(rdev->bdev->bd_dev); export_rdev()
2337 bdevname(rdev->bdev, b), rdev_for_each()
2350 bdevname(rdev->bdev, b)); rdev_for_each()
2873 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - rdev_size_store()
2885 * ->bdev do not overlap. 'rcu' is sufficient to walk rdev_size_store()
2899 if (rdev->bdev == rdev2->bdev && rdev_for_each()
3130 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; md_import_device()
3134 bdevname(rdev->bdev,b)); md_import_device()
3146 bdevname(rdev->bdev,b), md_import_device()
3153 bdevname(rdev->bdev,b)); md_import_device()
3161 if (rdev->bdev) md_import_device()
3191 bdevname(rdev->bdev,b)); rdev_for_each_safe()
3205 mdname(mddev), bdevname(rdev->bdev, b), rdev_for_each_safe()
3215 bdevname(rdev->bdev,b)); rdev_for_each_safe()
3224 bdevname(rdev->bdev, b)); rdev_for_each_safe()
3804 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3805 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
5041 sync_blockdev(rdev->bdev); rdev_for_each()
5042 invalidate_bdev(rdev->bdev); rdev_for_each()
5109 rdev->bdev->bd_contains == rdev_for_each()
5110 rdev2->bdev->bd_contains) { rdev_for_each()
5116 bdevname(rdev->bdev,b), rdev_for_each()
5117 bdevname(rdev2->bdev,b2)); rdev_for_each()
5397 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) md_set_readonly() argument
5420 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || md_set_readonly()
5423 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { md_set_readonly()
5457 struct block_device *bdev) do_md_stop()
5482 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || do_md_stop()
5486 (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) { do_md_stop()
5563 printk("<%s>", bdevname(rdev->bdev,b)); rdev_for_each()
5601 bdevname(rdev0->bdev,b)); autorun_devices()
5606 bdevname(rdev->bdev,b)); autorun_devices()
5624 bdevname(rdev0->bdev, b), rdev0->preferred_minor); autorun_devices()
5644 mdname(mddev), bdevname(rdev0->bdev,b)); autorun_devices()
5784 info.major = MAJOR(rdev->bdev->bd_dev); get_disk_info()
5785 info.minor = MINOR(rdev->bdev->bd_dev); get_disk_info()
5844 bdevname(rdev->bdev,b), add_new_disk()
5845 bdevname(rdev0->bdev,b2)); add_new_disk()
5976 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; add_new_disk()
6024 bdevname(rdev->bdev,b), mdname(mddev)); hot_remove_disk()
6061 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; hot_add_disk()
6068 bdevname(rdev->bdev,b), mdname(mddev)); hot_add_disk()
6517 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) md_getgeo() argument
6519 struct mddev *mddev = bdev->bd_disk->private_data; md_getgeo()
6553 static int md_ioctl(struct block_device *bdev, fmode_t mode, md_ioctl() argument
6596 mddev = bdev->bd_disk->private_data; md_ioctl()
6651 sync_blockdev(bdev); md_ioctl()
6723 err = do_md_stop(mddev, 0, bdev); md_ioctl()
6727 err = md_set_readonly(mddev, bdev); md_ioctl()
6759 /* if the bdev is going readonly the value of mddev->ro md_ioctl()
6852 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, md_compat_ioctl() argument
6867 return md_ioctl(bdev, mode, cmd, arg); md_compat_ioctl()
6871 static int md_open(struct block_device *bdev, fmode_t mode) md_open() argument
6877 struct mddev *mddev = mddev_find(bdev->bd_dev); md_open()
6883 if (mddev->gendisk != bdev->bd_disk) { md_open()
6888 /* Wait until bdev->bd_disk is definitely gone */ md_open()
6893 BUG_ON(mddev != bdev->bd_disk->private_data); md_open()
6903 check_disk_change(bdev); md_open()
7073 bdevname(rdev->bdev,b)); status_unused()
7269 bdevname(rdev->bdev,b), rdev->desc_nr); rdev_for_each_rcu()
7457 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; rdev_for_each_rcu()
5456 do_md_stop(struct mddev *mddev, int mode, struct block_device *bdev) do_md_stop() argument
H A Ddm-flakey.c249 bio->bi_bdev = fc->dev->bdev; flakey_map_bio()
384 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) flakey_ioctl()
387 return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); flakey_ioctl()
394 struct request_queue *q = bdev_get_queue(fc->dev->bdev); flakey_merge()
399 bvm->bi_bdev = fc->dev->bdev; flakey_merge()
H A Ddm-stripe.c269 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; stripe_map_range()
290 bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; stripe_map()
304 bio->bi_bdev = sc->stripe[stripe].dev->bdev; stripe_map()
425 q = bdev_get_queue(sc->stripe[stripe].dev->bdev); stripe_merge()
429 bvm->bi_bdev = sc->stripe[stripe].dev->bdev; stripe_merge()
H A Ddm-mpath.c167 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); list_for_each_entry_safe()
390 struct block_device *bdev; __multipath_map() local
419 bdev = pgpath->path.dev->bdev; __multipath_map()
425 clone->q = bdev_get_queue(bdev); __multipath_map()
426 clone->rq_disk = bdev->bd_disk; __multipath_map()
430 *__clone = blk_get_request(bdev_get_queue(bdev), __multipath_map()
438 (*__clone)->rq_disk = bdev->bd_disk; __multipath_map()
580 q = bdev_get_queue(p->path.dev->bdev); parse_path()
1240 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), activate_path()
1556 struct block_device *bdev; multipath_ioctl() local
1561 bdev = NULL; multipath_ioctl()
1573 bdev = pgpath->path.dev->bdev; multipath_ioctl()
1579 else if (!bdev) multipath_ioctl()
1587 if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) { multipath_ioctl()
1605 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); multipath_ioctl()
1630 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); __pgpath_busy()
H A Ddm-switch.c325 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; switch_map()
518 struct block_device *bdev; switch_ioctl() local
525 bdev = sctx->path_list[path_nr].dmdev->bdev; switch_ioctl()
531 if (ti->len + sctx->path_list[path_nr].start != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) switch_ioctl()
534 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); switch_ioctl()
H A Ddm-era-target.c261 struct block_device *bdev; member in struct:era_metadata
613 md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE, create_persistent_data_objects()
774 static struct era_metadata *metadata_open(struct block_device *bdev, metadata_open() argument
784 md->bdev = bdev; metadata_open()
1198 bio->bi_bdev = era->origin_dev->bdev; remap_to_origin()
1382 struct request_queue *q = bdev_get_queue(dev->bdev); dev_is_congested()
1483 md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); era_ctr()
1629 format_dev_t(buf, era->metadata_dev->bdev->bd_dev); era_status()
1631 format_dev_t(buf, era->origin_dev->bdev->bd_dev); era_status()
1666 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; get_dev_size()
1680 struct request_queue *q = bdev_get_queue(era->origin_dev->bdev); era_merge()
1685 bvm->bi_bdev = era->origin_dev->bdev; era_merge()
H A Ddm-exception-store.c172 (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) || dm_exception_store_set_chunk_size()
174 (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) { dm_exception_store_set_chunk_size()
H A Ddm-exception-store.h191 static inline sector_t get_dev_size(struct block_device *bdev) get_dev_size() argument
193 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; get_dev_size()
H A Ddm-verity.c551 bio->bi_bdev = v->data_dev->bdev; verity_map()
644 ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT) verity_ioctl()
647 return r ? : __blkdev_driver_ioctl(v->data_dev->bdev, v->data_dev->mode, verity_ioctl()
655 struct request_queue *q = bdev_get_queue(v->data_dev->bdev); verity_merge()
660 bvm->bi_bdev = v->data_dev->bdev; verity_merge()
789 num < bdev_logical_block_size(v->data_dev->bdev) || verity_ctr()
799 num < bdev_logical_block_size(v->hash_dev->bdev) || verity_ctr()
947 v->bufio = dm_bufio_client_create(v->hash_dev->bdev, verity_ctr()
H A Ddm.c204 struct block_device *bdev; member in struct:mapped_device
442 static int dm_blk_open(struct block_device *bdev, fmode_t mode) dm_blk_open() argument
448 md = bdev->bd_disk->private_data; dm_blk_open()
549 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) dm_blk_getgeo() argument
551 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_getgeo()
556 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, dm_blk_ioctl() argument
559 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_ioctl()
745 struct block_device *bdev; open_table_device() local
749 BUG_ON(td->dm_dev.bdev); open_table_device()
751 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); open_table_device()
752 if (IS_ERR(bdev)) open_table_device()
753 return PTR_ERR(bdev); open_table_device()
755 r = bd_link_disk_holder(bdev, dm_disk(md)); open_table_device()
757 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); open_table_device()
761 td->dm_dev.bdev = bdev; open_table_device()
770 if (!td->dm_dev.bdev) close_table_device()
773 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); close_table_device()
774 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); close_table_device()
775 td->dm_dev.bdev = NULL; close_table_device()
783 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) find_table_device()
804 td->dm_dev.bdev = NULL; dm_get_table_device()
2328 md->bdev = bdget_disk(md->disk, 0); alloc_dev()
2329 if (!md->bdev) alloc_dev()
2333 md->flush_bio.bi_bdev = md->bdev; alloc_dev()
2397 bdput(md->bdev); free_dev()
2470 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); __set_size()
2500 struct block_device *bdev = dev->bdev; dm_device_merge_is_compulsory() local
2501 struct request_queue *q = bdev_get_queue(bdev); dm_device_merge_is_compulsory()
3103 md->frozen_sb = freeze_bdev(md->bdev); lock_fs()
3120 thaw_bdev(md->bdev, md->frozen_sb); unlock_fs()
H A Dfaulty.c219 b->bi_bdev = conf->rdev->bdev; make_request()
224 bio->bi_bdev = conf->rdev->bdev; make_request()
323 disk_stack_limits(mddev->gendisk, rdev->bdev, rdev_for_each()
H A Draid10.c409 bdevname(rdev->bdev, b), raid10_end_read_request()
735 bdev_get_queue(rdev->bdev); raid10_mergeable_bvec()
739 bvm->bi_bdev = rdev->bdev; raid10_mergeable_bvec()
747 bdev_get_queue(rdev->bdev); raid10_mergeable_bvec()
751 bvm->bi_bdev = rdev->bdev; raid10_mergeable_bvec()
928 struct request_queue *q = bdev_get_queue(rdev->bdev); raid10_congested()
1246 read_bio->bi_bdev = rdev->bdev; __make_request()
1455 mbio->bi_bdev = rdev->bdev; __make_request()
1498 mbio->bi_bdev = rdev->bdev; __make_request()
1688 mdname(mddev), bdevname(rdev->bdev, b), error()
1712 bdevname(tmp->rdev->bdev,b)); print_conf()
1780 struct request_queue *q = bdev_get_queue(rdev->bdev); raid10_add_disk()
1816 disk_stack_limits(mddev->gendisk, rdev->bdev, raid10_add_disk()
1824 disk_stack_limits(mddev->gendisk, rdev->bdev, raid10_add_disk()
1850 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) raid10_add_disk()
2116 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); sync_request_write()
2119 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; sync_request_write()
2140 md_sync_acct(conf->mirrors[d].replacement->bdev, sync_request_write()
2273 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); recovery_request_write()
2278 md_sync_acct(conf->mirrors[d].replacement->bdev, recovery_request_write()
2375 bdevname(rdev->bdev, b); fix_read_error()
2482 bdevname(rdev->bdev, b)); fix_read_error()
2486 bdevname(rdev->bdev, b)); fix_read_error()
2521 bdevname(rdev->bdev, b)); fix_read_error()
2525 bdevname(rdev->bdev, b)); fix_read_error()
2535 bdevname(rdev->bdev, b)); fix_read_error()
2576 bdev_logical_block_size(rdev->bdev) >> 9); narrow_write_error()
2592 wbio->bi_bdev = rdev->bdev; narrow_write_error()
2657 bdevname(rdev->bdev, b), handle_read_error()
2666 bio->bi_bdev = rdev->bdev; handle_read_error()
3128 bio->bi_bdev = rdev->bdev; sync_request()
3153 bio->bi_bdev = rdev->bdev; sync_request()
3182 bio->bi_bdev = rdev->bdev; sync_request()
3302 bio->bi_bdev = conf->mirrors[d].rdev->bdev; sync_request()
3324 bio->bi_bdev = conf->mirrors[d].replacement->bdev; sync_request()
3655 q = bdev_get_queue(rdev->bdev); rdev_for_each()
3667 disk_stack_limits(mddev->gendisk, rdev->bdev, rdev_for_each()
3672 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) rdev_for_each()
4388 read_bio->bi_bdev = rdev->bdev; reshape_request()
4422 b->bi_bdev = rdev2->bdev; reshape_request()
H A Ddm-log.c304 .bdev = lc->header_location.bdev, flush_header()
437 lc->header_location.bdev = lc->log_dev->bdev; create_log_context()
446 bdev)); create_log_context()
448 if (buf_size > i_size_read(dev->bdev->bd_inode)) { create_log_context()
H A Ddm-snap-persistent.c232 .bdev = dm_snap_cow(ps->store->snap)->bdev, chunk_io()
322 bdev) >> 9); read_header()
499 client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev, read_exceptions()
580 *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); persistent_usage()
677 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); persistent_prepare_exception()
H A Draid1.c356 bdevname(conf->mirrors[mirror].rdev->bdev, raid1_end_read_request()
615 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); read_balance()
626 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; read_balance()
727 bdev_get_queue(rdev->bdev); raid1_mergeable_bvec()
731 bvm->bi_bdev = rdev->bdev; raid1_mergeable_bvec()
756 struct request_queue *q = bdev_get_queue(rdev->bdev); raid1_congested()
1198 read_bio->bi_bdev = mirror->rdev->bdev; make_request()
1409 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; make_request()
1513 mdname(mddev), bdevname(rdev->bdev, b), error()
1537 bdevname(rdev->bdev,b)); print_conf()
1619 struct request_queue *q = bdev_get_queue(rdev->bdev); raid1_add_disk()
1637 disk_stack_limits(mddev->gendisk, rdev->bdev, raid1_add_disk()
1677 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) raid1_add_disk()
1983 b->bi_bdev = conf->mirrors[i].rdev->bdev; process_checks()
2076 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); sync_request_write()
2179 bdevname(rdev->bdev, b)); fix_read_error()
2215 bdev_logical_block_size(rdev->bdev) >> 9); narrow_write_error()
2250 wbio->bi_bdev = rdev->bdev; narrow_write_error()
2372 bdevname(rdev->bdev, b)); handle_read_error()
2374 bio->bi_bdev = rdev->bdev; handle_read_error()
2617 bio->bi_bdev = rdev->bdev; sync_request()
2811 q = bdev_get_queue(rdev->bdev); rdev_for_each()
2922 disk_stack_limits(mddev->gendisk, rdev->bdev, rdev_for_each()
2924 if (blk_queue_discard(bdev_get_queue(rdev->bdev))) rdev_for_each()
H A Ddm-bufio.h24 dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
H A Ddm-raid1.c268 io[i].bdev = m->dev->bdev; mirror_flush()
338 from.bdev = m->dev->bdev; recover()
357 dest->bdev = m->dev->bdev; recover()
452 bio->bi_bdev = m->dev->bdev; map_bio()
459 io->bdev = m->dev->bdev; map_region()
H A Ddm-thin.c568 bio->bi_bdev = tc->pool_dev->bdev; remap()
580 bio->bi_bdev = tc->origin_dev->bdev; remap_to_origin()
971 to.bdev = tc->pool_dev->bdev; ll_zero()
1035 from.bdev = origin->bdev; schedule_copy()
1039 to.bdev = tc->pool_dev->bdev; schedule_copy()
2386 q = bdev_get_queue(pt->data_dev->bdev); pool_is_congested()
2410 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); data_dev_supports_discard()
2427 struct block_device *data_bdev = pt->data_dev->bdev; disable_passdown_if_not_supported()
2776 static sector_t get_dev_size(struct block_device *bdev) get_dev_size() argument
2778 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; get_dev_size()
2781 static void warn_if_metadata_device_too_big(struct block_device *bdev) warn_if_metadata_device_too_big() argument
2783 sector_t metadata_dev_size = get_dev_size(bdev); warn_if_metadata_device_too_big()
2788 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); warn_if_metadata_device_too_big()
2791 static sector_t get_metadata_dev_size(struct block_device *bdev) get_metadata_dev_size() argument
2793 sector_t metadata_dev_size = get_dev_size(bdev); get_metadata_dev_size()
2801 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev) get_metadata_dev_size_in_blocks() argument
2803 sector_t metadata_dev_size = get_metadata_dev_size(bdev); get_metadata_dev_size_in_blocks()
2823 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; calc_metadata_threshold()
2883 warn_if_metadata_device_too_big(metadata_dev->bdev); pool_ctr()
2912 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, pool_ctr()
2996 bio->bi_bdev = pt->data_dev->bdev; pool_map()
3555 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev), pool_status()
3556 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), pool_status()
3580 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); pool_merge()
3585 bvm->bi_bdev = pt->data_dev->bdev; pool_merge()
3601 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; set_discard_limits()
3788 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); thin_ctr()
3943 tc->origin_size = get_dev_size(tc->origin_dev->bdev); thin_preresume()
3992 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), thin_status()
3995 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); thin_status()
4010 struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev); thin_merge()
4015 bvm->bi_bdev = tc->pool_dev->bdev; thin_merge()
H A Ddm-delay.c278 bio->bi_bdev = dc->dev_write->bdev; delay_map()
286 bio->bi_bdev = dc->dev_read->bdev; delay_map()
H A Ddm-bufio.c92 struct block_device *bdev; member in struct:dm_bufio_client
562 .bdev = b->c->bdev, use_dmio()
605 b->bio.bi_bdev = b->c->bdev; use_inline_bio()
1305 .bdev = c->bdev, dm_bufio_issue_flush()
1426 return i_size_read(c->bdev->bd_inode) >> dm_bufio_get_device_size()
1574 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size, dm_bufio_client_create() argument
1593 c->bdev = bdev; dm_bufio_client_create()
H A Ddm-thin-metadata.c145 struct block_device *bdev; member in struct:dm_pool_metadata
478 sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; __write_initial_superblock()
588 if (get_disk_ro(pmd->bdev->bd_disk)) __check_incompat_features()
685 pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, __create_persistent_data_objects()
823 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, dm_pool_metadata_open() argument
841 pmd->bdev = bdev; dm_pool_metadata_open()
H A Ddm-io.c289 struct request_queue *q = bdev_get_queue(where->bdev); do_region()
317 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), do_region()
322 bio->bi_bdev = where->bdev; do_region()
H A Ddm-cache-metadata.h56 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
H A Ddm-thin-metadata.h44 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
/linux-4.1.27/include/linux/
H A Dblktrace_api.h34 dev_t dev, struct block_device *bdev,
63 struct block_device *bdev,
73 # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
75 # define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY)
77 # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
H A Dblkdev.h863 static inline struct request_queue *bdev_get_queue(struct block_device *bdev) bdev_get_queue() argument
865 return bdev->bd_disk->queue; /* this is never NULL */ bdev_get_queue()
1025 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1027 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1047 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1161 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1163 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1165 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1234 static inline unsigned short bdev_logical_block_size(struct block_device *bdev) bdev_logical_block_size() argument
1236 return queue_logical_block_size(bdev_get_queue(bdev)); bdev_logical_block_size()
1244 static inline unsigned int bdev_physical_block_size(struct block_device *bdev) bdev_physical_block_size() argument
1246 return queue_physical_block_size(bdev_get_queue(bdev)); bdev_physical_block_size()
1254 static inline int bdev_io_min(struct block_device *bdev) bdev_io_min() argument
1256 return queue_io_min(bdev_get_queue(bdev)); bdev_io_min()
1264 static inline int bdev_io_opt(struct block_device *bdev) bdev_io_opt() argument
1266 return queue_io_opt(bdev_get_queue(bdev)); bdev_io_opt()
1285 static inline int bdev_alignment_offset(struct block_device *bdev) bdev_alignment_offset() argument
1287 struct request_queue *q = bdev_get_queue(bdev); bdev_alignment_offset()
1292 if (bdev != bdev->bd_contains) bdev_alignment_offset()
1293 return bdev->bd_part->alignment_offset; bdev_alignment_offset()
1329 static inline int bdev_discard_alignment(struct block_device *bdev) bdev_discard_alignment() argument
1331 struct request_queue *q = bdev_get_queue(bdev); bdev_discard_alignment()
1333 if (bdev != bdev->bd_contains) bdev_discard_alignment()
1334 return bdev->bd_part->discard_alignment; bdev_discard_alignment()
1347 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) bdev_discard_zeroes_data() argument
1349 return queue_discard_zeroes_data(bdev_get_queue(bdev)); bdev_discard_zeroes_data()
1352 static inline unsigned int bdev_write_same(struct block_device *bdev) bdev_write_same() argument
1354 struct request_queue *q = bdev_get_queue(bdev); bdev_write_same()
1385 static inline unsigned int block_size(struct block_device *bdev) block_size() argument
1387 return bdev->bd_block_size; block_size()
1503 struct blk_integrity *bdev_get_integrity(struct block_device *bdev) bdev_get_integrity() argument
1505 return bdev->bd_disk->integrity; bdev_get_integrity()
1665 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, blkdev_issue_flush() argument
H A Dcmdline-parser.h38 const char *bdev);
H A Dbuffer_head.h171 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
176 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
178 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
196 void write_boundary_block(struct block_device *bdev,
361 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, getblk_unmovable() argument
365 return __getblk_gfp(bdev, block, size, 0); getblk_unmovable()
368 static inline struct buffer_head *__getblk(struct block_device *bdev, __getblk() argument
372 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE); __getblk()
377 * @bdev: the block_device to read from
386 __bread(struct block_device *bdev, sector_t block, unsigned size) __bread() argument
388 return __bread_gfp(bdev, block, size, __GFP_MOVABLE); __bread()
H A Ddm-io.h18 struct block_device *bdev; member in struct:dm_io_region
H A Dgenhd.h181 * Protected with matching bdev lock but stat and other
424 extern void set_device_ro(struct block_device *bdev, int flag);
441 static inline sector_t get_start_sect(struct block_device *bdev) get_start_sect() argument
443 return bdev->bd_part->start_sect; get_start_sect()
607 extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
608 extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
668 * bd_mutex or gendisk bdev bd_mutex, should be done using this
H A Dcdrom.h97 extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
100 extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
/linux-4.1.27/drivers/mtd/
H A Dmtdsuper.c126 struct block_device *bdev; mount_mtd() local
179 bdev = lookup_bdev(dev_name); mount_mtd()
180 if (IS_ERR(bdev)) { mount_mtd()
181 ret = PTR_ERR(bdev); mount_mtd()
189 major = MAJOR(bdev->bd_dev); mount_mtd()
190 mtdnr = MINOR(bdev->bd_dev); mount_mtd()
191 bdput(bdev); mount_mtd()
H A Dmtd_blkdevs.c191 static int blktrans_open(struct block_device *bdev, fmode_t mode) blktrans_open() argument
193 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); blktrans_open()
268 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) blktrans_getgeo() argument
270 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); blktrans_getgeo()
288 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, blktrans_ioctl() argument
291 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); blktrans_ioctl()
/linux-4.1.27/fs/nfs/blocklayout/
H A Ddev.c24 if (dev->bdev) bl_free_device()
25 blkdev_put(dev->bdev, FMODE_READ); bl_free_device()
125 map->bdev = dev->bdev; bl_map_simple()
198 d->bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); bl_parse_simple()
199 if (IS_ERR(d->bdev)) { bl_parse_simple()
201 MAJOR(dev), MINOR(dev), PTR_ERR(d->bdev)); bl_parse_simple()
202 return PTR_ERR(d->bdev); bl_parse_simple()
206 d->len = i_size_read(d->bdev->bd_inode); bl_parse_simple()
210 d->bdev->bd_disk->disk_name); bl_parse_simple()
H A Dblocklayout.h100 struct block_device *bdev; member in struct:pnfs_block_dev_map
113 struct block_device *bdev; member in struct:pnfs_block_dev
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dlloop.c469 struct block_device *bdev, struct file *file) loop_set_fd()
504 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); loop_set_fd()
507 lo->lo_device = bdev; loop_set_fd()
532 bd_set_size(bdev, size << 9); loop_set_fd()
534 set_blocksize(bdev, lo->lo_blocksize); loop_set_fd()
546 static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev, loop_clr_fd() argument
572 invalidate_bdev(bdev); loop_clr_fd()
574 bd_set_size(bdev, 0); loop_clr_fd()
583 static int lo_open(struct block_device *bdev, fmode_t mode) lo_open() argument
585 struct lloop_device *lo = bdev->bd_disk->private_data; lo_open()
604 static int lo_ioctl(struct block_device *bdev, fmode_t mode, lo_ioctl() argument
607 struct lloop_device *lo = bdev->bd_disk->private_data; lo_ioctl()
614 err = loop_clr_fd(lo, bdev, 2); lo_ioctl()
616 blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */ lo_ioctl()
667 struct block_device *bdev = NULL; lloop_ioctl() local
711 bdev = blkdev_get_by_dev(dev, file->f_mode, NULL); lloop_ioctl()
712 if (IS_ERR(bdev)) { lloop_ioctl()
713 err = PTR_ERR(bdev); lloop_ioctl()
718 err = loop_set_fd(lo, NULL, bdev, file); lloop_ioctl()
721 blkdev_put(bdev, 0); lloop_ioctl()
748 bdev = lo->lo_device; lloop_ioctl()
749 err = loop_clr_fd(lo, bdev, 1); lloop_ioctl()
751 blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */ lloop_ioctl()
468 loop_set_fd(struct lloop_device *lo, struct file *unused, struct block_device *bdev, struct file *file) loop_set_fd() argument
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_ttm.c53 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) radeon_get_rdev() argument
58 mman = container_of(bdev, struct radeon_mman, bdev); radeon_get_rdev()
122 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) radeon_invalidate_caches() argument
127 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, radeon_init_mem_type() argument
132 rdev = radeon_get_rdev(bdev); radeon_init_mem_type()
264 rdev = radeon_get_rdev(bo->bdev); radeon_move_blit()
321 rdev = radeon_get_rdev(bo->bdev); radeon_move_vram_ram()
368 rdev = radeon_get_rdev(bo->bdev); radeon_move_ram_vram()
405 rdev = radeon_get_rdev(bo->bdev); radeon_bo_move()
449 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) radeon_ttm_io_mem_reserve() argument
451 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; radeon_ttm_io_mem_reserve()
452 struct radeon_device *rdev = radeon_get_rdev(bdev); radeon_ttm_io_mem_reserve()
512 static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) radeon_ttm_io_mem_free() argument
532 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); radeon_ttm_tt_pin_userptr()
594 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); radeon_ttm_tt_unpin_userptr()
677 static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, radeon_ttm_tt_create() argument
684 rdev = radeon_get_rdev(bdev); radeon_ttm_tt_create()
687 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, radeon_ttm_tt_create()
698 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) { radeon_ttm_tt_create()
740 rdev = radeon_get_rdev(ttm->bdev); radeon_ttm_tt_populate()
791 rdev = radeon_get_rdev(ttm->bdev); radeon_ttm_tt_unpopulate()
874 r = ttm_bo_device_init(&rdev->mman.bdev, radeon_ttm_init()
885 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, radeon_ttm_init()
911 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, radeon_ttm_init()
943 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); radeon_ttm_fini()
944 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); radeon_ttm_fini()
945 ttm_bo_device_release(&rdev->mman.bdev); radeon_ttm_fini()
961 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; radeon_ttm_set_active_vram_size()
979 rdev = radeon_get_rdev(bo->bdev); radeon_ttm_fault()
1001 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); radeon_mmap()
1022 struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv; radeon_mm_dump_table()
1024 struct ttm_bo_global *glob = rdev->mman.bdev.glob; radeon_mm_dump_table()
/linux-4.1.27/kernel/trace/
H A Dblktrace.c417 struct block_device *bdev) blk_trace_setup_lba()
421 if (bdev) blk_trace_setup_lba()
422 part = bdev->bd_part; blk_trace_setup_lba()
437 struct block_device *bdev, do_blk_trace_setup()
512 blk_trace_setup_lba(bt, bdev); do_blk_trace_setup()
540 struct block_device *bdev, blk_trace_setup()
550 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); blk_trace_setup()
564 dev_t dev, struct block_device *bdev, compat_blk_trace_setup()
583 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); compat_blk_trace_setup()
639 * @bdev: the block device
644 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) blk_trace_ioctl() argument
650 q = bdev_get_queue(bdev); blk_trace_ioctl()
654 mutex_lock(&bdev->bd_mutex); blk_trace_ioctl()
658 bdevname(bdev, b); blk_trace_ioctl()
659 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); blk_trace_ioctl()
663 bdevname(bdev, b); blk_trace_ioctl()
664 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); blk_trace_ioctl()
680 mutex_unlock(&bdev->bd_mutex); blk_trace_ioctl()
1488 struct block_device *bdev) blk_trace_setup_queue()
1501 bt->dev = bdev->bd_dev; blk_trace_setup_queue()
1504 blk_trace_setup_lba(bt, bdev); blk_trace_setup_queue()
1629 static struct request_queue *blk_trace_get_queue(struct block_device *bdev) blk_trace_get_queue() argument
1631 if (bdev->bd_disk == NULL) blk_trace_get_queue()
1634 return bdev_get_queue(bdev); blk_trace_get_queue()
1643 struct block_device *bdev; sysfs_blk_trace_attr_show() local
1646 bdev = bdget(part_devt(p)); sysfs_blk_trace_attr_show()
1647 if (bdev == NULL) sysfs_blk_trace_attr_show()
1650 q = blk_trace_get_queue(bdev); sysfs_blk_trace_attr_show()
1654 mutex_lock(&bdev->bd_mutex); sysfs_blk_trace_attr_show()
1673 mutex_unlock(&bdev->bd_mutex); sysfs_blk_trace_attr_show()
1675 bdput(bdev); sysfs_blk_trace_attr_show()
1684 struct block_device *bdev; sysfs_blk_trace_attr_store() local
1707 bdev = bdget(part_devt(p)); sysfs_blk_trace_attr_store()
1708 if (bdev == NULL) sysfs_blk_trace_attr_store()
1711 q = blk_trace_get_queue(bdev); sysfs_blk_trace_attr_store()
1715 mutex_lock(&bdev->bd_mutex); sysfs_blk_trace_attr_store()
1719 ret = blk_trace_setup_queue(q, bdev); sysfs_blk_trace_attr_store()
1727 ret = blk_trace_setup_queue(q, bdev); sysfs_blk_trace_attr_store()
1741 mutex_unlock(&bdev->bd_mutex); sysfs_blk_trace_attr_store()
1743 bdput(bdev); sysfs_blk_trace_attr_store()
416 blk_trace_setup_lba(struct blk_trace *bt, struct block_device *bdev) blk_trace_setup_lba() argument
436 do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, struct blk_user_trace_setup *buts) do_blk_trace_setup() argument
539 blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg) blk_trace_setup() argument
563 compat_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct block_device *bdev, char __user *arg) compat_blk_trace_setup() argument
1487 blk_trace_setup_queue(struct request_queue *q, struct block_device *bdev) blk_trace_setup_queue() argument
/linux-4.1.27/drivers/block/xen-blkback/
H A Dxenbus.c90 if (!blkif->irq || !blkif->vbd.bdev) xen_update_blkif_status()
108 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping); xen_update_blkif_status()
113 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); xen_update_blkif_status()
408 if (vbd->bdev) xen_vbd_free()
409 blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE); xen_vbd_free()
410 vbd->bdev = NULL; xen_vbd_free()
418 struct block_device *bdev; xen_vbd_create() local
428 bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ? xen_vbd_create()
431 if (IS_ERR(bdev)) { xen_vbd_create()
437 vbd->bdev = bdev; xen_vbd_create()
438 if (vbd->bdev->bd_disk == NULL) { xen_vbd_create()
446 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom) xen_vbd_create()
448 if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE) xen_vbd_create()
451 q = bdev_get_queue(bdev); xen_vbd_create()
509 struct block_device *bdev = be->blkif->vbd.bdev; xen_blkbk_discard() local
510 struct request_queue *q = bdev_get_queue(bdev); xen_blkbk_discard()
831 bdev_logical_block_size(be->blkif->vbd.bdev)); connect()
838 bdev_physical_block_size(be->blkif->vbd.bdev)); connect()
H A Dcommon.h229 struct block_device *bdev; member in struct:xen_vbd
362 #define vbd_sz(_v) ((_v)->bdev->bd_part ? \
363 (_v)->bdev->bd_part->nr_sects : \
364 get_capacity((_v)->bdev->bd_disk))
376 struct block_device *bdev; member in struct:phys_req
H A Dblkback.c497 req->bdev = vbd->bdev; xen_vbd_translate()
977 struct block_device *bdev = blkif->vbd.bdev; dispatch_discard_io() local
999 err = blkdev_issue_discard(bdev, req->u.discard.sector_number, dispatch_discard_io()
1287 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev dispatch_rw_block_io()
1292 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { dispatch_rw_block_io()
1334 bio->bi_bdev = preq.bdev; dispatch_rw_block_io()
1352 bio->bi_bdev = preq.bdev; dispatch_rw_block_io()
/linux-4.1.27/drivers/block/
H A Dbrd.c328 struct block_device *bdev = bio->bi_bdev; brd_make_request() local
329 struct brd_device *brd = bdev->bd_disk->private_data; brd_make_request()
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) brd_make_request()
363 static int brd_rw_page(struct block_device *bdev, sector_t sector, brd_rw_page() argument
366 struct brd_device *brd = bdev->bd_disk->private_data; brd_rw_page()
373 static long brd_direct_access(struct block_device *bdev, sector_t sector, brd_direct_access() argument
376 struct brd_device *brd = bdev->bd_disk->private_data; brd_direct_access()
397 static int brd_ioctl(struct block_device *bdev, fmode_t mode, brd_ioctl() argument
401 struct brd_device *brd = bdev->bd_disk->private_data; brd_ioctl()
411 mutex_lock(&bdev->bd_mutex); brd_ioctl()
413 if (bdev->bd_openers <= 1) { brd_ioctl()
421 kill_bdev(bdev); brd_ioctl()
425 mutex_unlock(&bdev->bd_mutex); brd_ioctl()
H A Dnbd.c580 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, __nbd_ioctl() argument
592 fsync_bdev(bdev); __nbd_ioctl()
614 kill_bdev(bdev); __nbd_ioctl()
629 bdev->bd_invalidated = 1; __nbd_ioctl()
639 bdev->bd_inode->i_size = nbd->bytesize; __nbd_ioctl()
640 set_blocksize(bdev, nbd->blksize); __nbd_ioctl()
646 bdev->bd_inode->i_size = nbd->bytesize; __nbd_ioctl()
647 set_blocksize(bdev, nbd->blksize); __nbd_ioctl()
661 bdev->bd_inode->i_size = nbd->bytesize; __nbd_ioctl()
662 set_blocksize(bdev, nbd->blksize); __nbd_ioctl()
679 set_device_ro(bdev, true); __nbd_ioctl()
706 kill_bdev(bdev); __nbd_ioctl()
708 set_device_ro(bdev, false); __nbd_ioctl()
713 bdev->bd_inode->i_size = 0; __nbd_ioctl()
716 ioctl_by_bdev(bdev, BLKRRPART, 0); __nbd_ioctl()
739 static int nbd_ioctl(struct block_device *bdev, fmode_t mode, nbd_ioctl() argument
742 struct nbd_device *nbd = bdev->bd_disk->private_data; nbd_ioctl()
751 error = __nbd_ioctl(bdev, nbd, cmd, arg); nbd_ioctl()
H A Dpmem.c62 struct block_device *bdev = bio->bi_bdev; pmem_make_request() local
63 struct pmem_device *pmem = bdev->bd_disk->private_data; pmem_make_request()
70 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) { pmem_make_request()
89 static int pmem_rw_page(struct block_device *bdev, sector_t sector, pmem_rw_page() argument
92 struct pmem_device *pmem = bdev->bd_disk->private_data; pmem_rw_page()
100 static long pmem_direct_access(struct block_device *bdev, sector_t sector, pmem_direct_access() argument
103 struct pmem_device *pmem = bdev->bd_disk->private_data; pmem_direct_access()
H A Dloop.c172 struct block_device *bdev = lo->lo_device; figure_loop_size() local
181 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); figure_loop_size()
183 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); figure_loop_size()
485 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, loop_change_fd() argument
525 ioctl_by_bdev(bdev, BLKRRPART, 0); loop_change_fd()
670 struct block_device *bdev, unsigned int arg) loop_set_fd()
697 if (f->f_mapping->host->i_bdev == bdev) loop_set_fd()
735 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); loop_set_fd()
738 lo->lo_device = bdev; loop_set_fd()
751 bd_set_size(bdev, size << 9); loop_set_fd()
754 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); loop_set_fd()
756 set_blocksize(bdev, lo_blocksize); loop_set_fd()
762 ioctl_by_bdev(bdev, BLKRRPART, 0); loop_set_fd()
765 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). loop_set_fd()
767 bdgrab(bdev); loop_set_fd()
819 struct block_device *bdev = lo->lo_device; loop_clr_fd() local
859 if (bdev) { loop_clr_fd()
860 bdput(bdev); loop_clr_fd()
861 invalidate_bdev(bdev); loop_clr_fd()
865 if (bdev) { loop_clr_fd()
866 bd_set_size(bdev, 0); loop_clr_fd()
868 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); loop_clr_fd()
874 if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) loop_clr_fd()
875 ioctl_by_bdev(bdev, BLKRRPART, 0); loop_clr_fd()
1106 static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) loop_set_capacity() argument
1114 static int lo_ioctl(struct block_device *bdev, fmode_t mode, lo_ioctl() argument
1117 struct loop_device *lo = bdev->bd_disk->private_data; lo_ioctl()
1123 err = loop_set_fd(lo, mode, bdev, arg); lo_ioctl()
1126 err = loop_change_fd(lo, bdev, arg); lo_ioctl()
1155 err = loop_set_capacity(lo, bdev); lo_ioctl()
1285 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, lo_compat_ioctl() argument
1288 struct loop_device *lo = bdev->bd_disk->private_data; lo_compat_ioctl()
1311 err = lo_ioctl(bdev, mode, cmd, arg); lo_compat_ioctl()
1321 static int lo_open(struct block_device *bdev, fmode_t mode) lo_open() argument
1327 lo = bdev->bd_disk->private_data; lo_open()
669 loop_set_fd(struct loop_device *lo, fmode_t mode, struct block_device *bdev, unsigned int arg) loop_set_fd() argument
H A Dpktcdvd.c364 MAJOR(pd->bdev->bd_dev), class_pktcdvd_show_map()
365 MINOR(pd->bdev->bd_dev)); class_pktcdvd_show_map()
701 struct request_queue *q = bdev_get_queue(pd->bdev); pkt_generic_packet()
725 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); pkt_generic_packet()
1064 bio->bi_bdev = pd->bdev; pkt_gather_data()
1158 pkt->bio->bi_bdev = pd->bdev; pkt_start_recovery()
1304 pkt->w_bio->bi_bdev = pd->bdev; pkt_start_write()
2199 bdget(pd->bdev->bd_dev); pkt_open_dev()
2200 if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd))) pkt_open_dev()
2209 set_capacity(pd->bdev->bd_disk, lba << 2); pkt_open_dev()
2210 bd_set_size(pd->bdev, (loff_t)lba << 11); pkt_open_dev()
2212 q = bdev_get_queue(pd->bdev); pkt_open_dev()
2244 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); pkt_open_dev()
2261 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); pkt_release_dev()
2273 static int pkt_open(struct block_device *bdev, fmode_t mode) pkt_open() argument
2280 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); pkt_open()
2302 set_blocksize(bdev, CD_FRAMESIZE); pkt_open()
2352 cloned_bio->bi_bdev = pd->bdev; pkt_make_request_read()
2545 bdevname(pd->bdev, bdev_buf)); pkt_seq_show()
2616 struct block_device *bdev; pkt_new_dev() local
2626 if (pd2->bdev->bd_dev == dev) { pkt_new_dev()
2628 bdevname(pd2->bdev, b)); pkt_new_dev()
2637 bdev = bdget(dev); pkt_new_dev()
2638 if (!bdev) pkt_new_dev()
2640 ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); pkt_new_dev()
2647 pd->bdev = bdev; pkt_new_dev()
2648 set_blocksize(bdev, CD_FRAMESIZE); pkt_new_dev()
2661 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b)); pkt_new_dev()
2665 blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); pkt_new_dev()
2671 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) pkt_ioctl() argument
2673 struct pktcdvd_device *pd = bdev->bd_disk->private_data; pkt_ioctl()
2677 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); pkt_ioctl()
2697 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); pkt_ioctl()
2717 if (!pd->bdev) pkt_check_events()
2719 attached_disk = pd->bdev->bd_disk; pkt_check_events()
2804 disk->events = pd->bdev->bd_disk->events; pkt_setup_dev()
2805 disk->async_events = pd->bdev->bd_disk->async_events; pkt_setup_dev()
2867 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); pkt_remove_dev()
2895 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); pkt_get_status()
H A Dxen-blkfront.c349 static int blkif_ioctl(struct block_device *bdev, fmode_t mode, blkif_ioctl() argument
352 struct blkfront_info *info = bdev->bd_disk->private_data; blkif_ioctl()
1628 struct block_device *bdev = NULL; blkfront_closing() local
1638 bdev = bdget_disk(info->gd, 0); blkfront_closing()
1642 if (!bdev) { blkfront_closing()
1647 mutex_lock(&bdev->bd_mutex); blkfront_closing()
1649 if (bdev->bd_openers) { blkfront_closing()
1658 mutex_unlock(&bdev->bd_mutex); blkfront_closing()
1659 bdput(bdev); blkfront_closing()
1937 struct block_device *bdev = NULL; blkfront_remove() local
1948 bdev = bdget_disk(disk, 0); blkfront_remove()
1953 if (!bdev) { blkfront_remove()
1960 * state. See if it's safe to remove the disk. If the bdev blkfront_remove()
1964 mutex_lock(&bdev->bd_mutex); blkfront_remove()
1969 xbdev->nodename, bdev->bd_openers); blkfront_remove()
1971 if (info && !bdev->bd_openers) { blkfront_remove()
1977 mutex_unlock(&bdev->bd_mutex); blkfront_remove()
1978 bdput(bdev); blkfront_remove()
1990 static int blkif_open(struct block_device *bdev, fmode_t mode) blkif_open() argument
1992 struct gendisk *disk = bdev->bd_disk; blkif_open()
2021 struct block_device *bdev; blkif_release() local
2026 bdev = bdget_disk(disk, 0); blkif_release()
2028 if (!bdev) { blkif_release()
2032 if (bdev->bd_openers) blkif_release()
2037 * deferred this request, because the bdev was still open. blkif_release()
2045 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); blkif_release()
2054 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); blkif_release()
2061 bdput(bdev); blkif_release()
H A Dswim.c617 static int floppy_open(struct block_device *bdev, fmode_t mode) floppy_open() argument
619 struct floppy_state *fs = bdev->bd_disk->private_data; floppy_open()
647 check_disk_change(bdev); floppy_open()
665 static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) floppy_unlocked_open() argument
670 ret = floppy_open(bdev, mode); floppy_unlocked_open()
692 static int floppy_ioctl(struct block_device *bdev, fmode_t mode, floppy_ioctl() argument
695 struct floppy_state *fs = bdev->bd_disk->private_data; floppy_ioctl()
724 static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) floppy_getgeo() argument
726 struct floppy_state *fs = bdev->bd_disk->private_data; floppy_getgeo()
H A Dcpqarray.c162 static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
164 static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
165 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
823 static int ida_open(struct block_device *bdev, fmode_t mode) ida_open() argument
825 drv_info_t *drv = get_drv(bdev->bd_disk); ida_open()
826 ctlr_info_t *host = get_host(bdev->bd_disk); ida_open()
828 DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name)); ida_open()
845 static int ida_unlocked_open(struct block_device *bdev, fmode_t mode) ida_unlocked_open() argument
850 ret = ida_open(bdev, mode); ida_unlocked_open()
1123 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo) ida_getgeo() argument
1125 drv_info_t *drv = get_drv(bdev->bd_disk); ida_getgeo()
1144 static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) ida_locked_ioctl() argument
1146 drv_info_t *drv = get_drv(bdev->bd_disk); ida_locked_ioctl()
1147 ctlr_info_t *host = get_host(bdev->bd_disk); ida_locked_ioctl()
1182 if (MINOR(bdev->bd_dev) != 0) ida_locked_ioctl()
1212 static int ida_ioctl(struct block_device *bdev, fmode_t mode, ida_ioctl() argument
1218 ret = ida_locked_ioctl(bdev, mode, cmd, param); ida_ioctl()
H A Dswim3.c252 static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
254 static int floppy_open(struct block_device *bdev, fmode_t mode);
904 static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode, floppy_locked_ioctl() argument
907 struct floppy_state *fs = bdev->bd_disk->private_data; floppy_locked_ioctl()
932 static int floppy_ioctl(struct block_device *bdev, fmode_t mode, floppy_ioctl() argument
938 ret = floppy_locked_ioctl(bdev, mode, cmd, param); floppy_ioctl()
944 static int floppy_open(struct block_device *bdev, fmode_t mode) floppy_open() argument
946 struct floppy_state *fs = bdev->bd_disk->private_data; floppy_open()
984 check_disk_change(bdev); floppy_open()
1013 static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) floppy_unlocked_open() argument
1018 ret = floppy_open(bdev, mode); floppy_unlocked_open()
H A Dz2ram.c148 static int z2_open(struct block_device *bdev, fmode_t mode) z2_open() argument
157 device = MINOR(bdev->bd_dev); z2_open()
H A Dataflop.c366 static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
371 static int floppy_open(struct block_device *bdev, fmode_t mode);
1509 static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, fd_locked_ioctl() argument
1512 struct gendisk *disk = bdev->bd_disk; fd_locked_ioctl()
1687 check_disk_change(bdev); fd_locked_ioctl()
1694 static int fd_ioctl(struct block_device *bdev, fmode_t mode, fd_ioctl() argument
1700 ret = fd_locked_ioctl(bdev, mode, cmd, arg); fd_ioctl()
1841 static int floppy_open(struct block_device *bdev, fmode_t mode) floppy_open() argument
1843 struct atari_floppy_struct *p = bdev->bd_disk->private_data; floppy_open()
1844 int type = MINOR(bdev->bd_dev) >> 2; floppy_open()
1864 check_disk_change(bdev); floppy_open()
1878 static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) floppy_unlocked_open() argument
1883 ret = floppy_open(bdev, mode); floppy_unlocked_open()
H A Damiflop.c1453 static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) fd_getgeo() argument
1455 int drive = MINOR(bdev->bd_dev) & 3; fd_getgeo()
1463 static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, fd_locked_ioctl() argument
1466 struct amiga_floppy_struct *p = bdev->bd_disk->private_data; fd_locked_ioctl()
1478 fsync_bdev(bdev); fd_locked_ioctl()
1507 invalidate_bdev(bdev); fd_locked_ioctl()
1540 static int fd_ioctl(struct block_device *bdev, fmode_t mode, fd_ioctl() argument
1546 ret = fd_locked_ioctl(bdev, mode, cmd, param); fd_ioctl()
1587 static int floppy_open(struct block_device *bdev, fmode_t mode) floppy_open() argument
1589 int drive = MINOR(bdev->bd_dev) & 3; floppy_open()
1590 int system = (MINOR(bdev->bd_dev) & 4) >> 2; floppy_open()
1603 check_disk_change(bdev); floppy_open()
H A Dcciss.c168 static int cciss_open(struct block_device *bdev, fmode_t mode);
169 static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
171 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
173 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
1078 static int cciss_open(struct block_device *bdev, fmode_t mode) cciss_open() argument
1080 ctlr_info_t *h = get_host(bdev->bd_disk); cciss_open()
1081 drive_info_struct *drv = get_drv(bdev->bd_disk); cciss_open()
1083 dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name); cciss_open()
1095 if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */ cciss_open()
1097 if (MINOR(bdev->bd_dev) & 0x0f) { cciss_open()
1113 static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) cciss_unlocked_open() argument
1118 ret = cciss_open(bdev, mode); cciss_unlocked_open()
1143 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
1145 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
1148 static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, cciss_compat_ioctl() argument
1167 return cciss_ioctl(bdev, mode, cmd, arg); cciss_compat_ioctl()
1170 return cciss_ioctl32_passthru(bdev, mode, cmd, arg); cciss_compat_ioctl()
1172 return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg); cciss_compat_ioctl()
1179 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, cciss_ioctl32_passthru() argument
1208 err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); cciss_ioctl32_passthru()
1219 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, cciss_ioctl32_big_passthru() argument
1250 err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); cciss_ioctl32_big_passthru()
1262 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) cciss_getgeo() argument
1264 drive_info_struct *drv = get_drv(bdev->bd_disk); cciss_getgeo()
1700 static int cciss_ioctl(struct block_device *bdev, fmode_t mode, cciss_ioctl() argument
1703 struct gendisk *disk = bdev->bd_disk; cciss_ioctl()
1750 return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); cciss_ioctl()
H A Dfloppy.c3221 static int invalidate_drive(struct block_device *bdev) invalidate_drive() argument
3224 set_bit((long)bdev->bd_disk->private_data, &fake_change); invalidate_drive()
3226 check_disk_change(bdev); invalidate_drive()
3231 int drive, int type, struct block_device *bdev) set_geometry()
3257 struct block_device *bdev = opened_bdev[cnt]; set_geometry() local
3258 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) set_geometry()
3260 __invalidate_device(bdev, true); set_geometry()
3293 invalidate_drive(bdev); set_geometry()
3364 static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) fd_getgeo() argument
3366 int drive = (long)bdev->bd_disk->private_data; fd_getgeo()
3381 static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, fd_locked_ioctl() argument
3384 int drive = (long)bdev->bd_disk->private_data; fd_locked_ioctl()
3453 return invalidate_drive(bdev); fd_locked_ioctl()
3456 return set_geometry(cmd, &inparam.g, drive, type, bdev); fd_locked_ioctl()
3489 return invalidate_drive(bdev); fd_locked_ioctl()
3557 static int fd_ioctl(struct block_device *bdev, fmode_t mode, fd_ioctl() argument
3563 ret = fd_locked_ioctl(bdev, mode, cmd, param); fd_ioctl()
3642 static int floppy_open(struct block_device *bdev, fmode_t mode) floppy_open() argument
3644 int drive = (long)bdev->bd_disk->private_data; floppy_open()
3653 if (opened_bdev[drive] && opened_bdev[drive] != bdev) floppy_open()
3663 opened_bdev[drive] = bdev; floppy_open()
3697 new_dev = MINOR(bdev->bd_dev); floppy_open()
3712 check_disk_change(bdev); floppy_open()
3786 static int __floppy_read_block_0(struct block_device *bdev, int drive) __floppy_read_block_0() argument
3800 size = bdev->bd_block_size; __floppy_read_block_0()
3813 bio.bi_bdev = bdev; __floppy_read_block_0()
3230 set_geometry(unsigned int cmd, struct floppy_struct *g, int drive, int type, struct block_device *bdev) set_geometry() argument
/linux-4.1.27/drivers/platform/x86/
H A Dapple-gmux.c36 struct backlight_device *bdev; member in struct:apple_gmux_data
442 struct backlight_device *bdev; gmux_probe() local
536 bdev = backlight_device_register("gmux_backlight", &pnp->dev, gmux_probe()
538 if (IS_ERR(bdev)) { gmux_probe()
539 ret = PTR_ERR(bdev); gmux_probe()
543 gmux_data->bdev = bdev; gmux_probe()
544 bdev->props.brightness = gmux_get_brightness(bdev); gmux_probe()
545 backlight_update_status(bdev); gmux_probe()
612 backlight_device_unregister(bdev); gmux_probe()
642 backlight_device_unregister(gmux_data->bdev); gmux_remove()
/linux-4.1.27/include/drm/ttm/
H A Dttm_bo_driver.h96 * @bdev: Pointer to a struct ttm_bo_device.
103 * @bdev: Pointer to the current struct ttm_bo_device.
115 struct ttm_bo_device *bdev; member in struct:ttm_tt
260 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
261 * static information. bdev::driver::io_mem_free is never used.
271 struct ttm_bo_device *bdev; member in struct:ttm_mem_type_manager
321 * @bdev: pointer to a struct ttm_bo_device:
331 struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
359 * @bdev: the buffer object device.
368 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
369 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
435 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
436 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
569 * @bdev: pointer to a struct ttm_bo_device:
579 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
582 extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
666 * @bdev: Pointer to a struct ttm_bo_device.
672 extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
708 extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
713 * @bdev: A pointer to a struct ttm_bo_device to initialize.
725 extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
942 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
944 void ttm_mem_io_free(struct ttm_bo_device *bdev,
1040 * @bdev: Pointer to a struct ttm_bo_device.
1051 extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
H A Dttm_bo_api.h154 * @bdev: Pointer to the buffer object device structure.
201 struct ttm_bo_device *bdev; member in struct:ttm_buffer_object
230 * Members protected by the bdev::lru_lock.
394 extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
401 extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
433 * @bdev: Pointer to a ttm_bo_device struct.
439 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
442 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
449 * @bdev: Pointer to a ttm_bo_device struct.
481 extern int ttm_bo_init(struct ttm_bo_device *bdev,
497 * @bdev: Pointer to a ttm_bo_device struct.
519 extern int ttm_bo_create(struct ttm_bo_device *bdev,
531 * @bdev: Pointer to a ttm_bo_device struct.
544 extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
549 * @bdev: Pointer to a ttm_bo_device struct.
573 extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
578 * @bdev: Pointer to a ttm_bo_device struct.
594 extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
664 * @bdev: Pointer to the ttm_bo_device with the address space manager.
671 struct ttm_bo_device *bdev);
676 * @bdev: Pointer to the struct ttm_bo_device.
694 extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
698 extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
/linux-4.1.27/fs/f2fs/
H A Dgc.h106 struct block_device *bdev = sbi->sb->s_bdev; is_idle() local
107 struct request_queue *q = bdev_get_queue(bdev); is_idle()
/linux-4.1.27/kernel/power/
H A Dblock_io.c28 static int submit(int rw, struct block_device *bdev, sector_t sector, submit() argument
36 bio->bi_bdev = bdev; submit()
/linux-4.1.27/fs/btrfs/
H A Dvolumes.c113 static void btrfs_kobject_uevent(struct block_device *bdev, btrfs_kobject_uevent() argument
118 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); btrfs_kobject_uevent()
122 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), btrfs_kobject_uevent()
123 &disk_to_dev(bdev->bd_disk)->kobj); btrfs_kobject_uevent()
189 int flush, struct block_device **bdev, btrfs_get_bdev_and_sb()
194 *bdev = blkdev_get_by_path(device_path, flags, holder); btrfs_get_bdev_and_sb()
196 if (IS_ERR(*bdev)) { btrfs_get_bdev_and_sb()
197 ret = PTR_ERR(*bdev); btrfs_get_bdev_and_sb()
203 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); btrfs_get_bdev_and_sb()
204 ret = set_blocksize(*bdev, 4096); btrfs_get_bdev_and_sb()
206 blkdev_put(*bdev, flags); btrfs_get_bdev_and_sb()
209 invalidate_bdev(*bdev); btrfs_get_bdev_and_sb()
210 *bh = btrfs_read_dev_super(*bdev); btrfs_get_bdev_and_sb()
213 blkdev_put(*bdev, flags); btrfs_get_bdev_and_sb()
220 *bdev = NULL; btrfs_get_bdev_and_sb()
275 bdi = blk_get_backing_dev_info(device->bdev); run_scheduled_bios()
519 * in case of 2a the stale bdev has to be updated as well. device_list_add()
645 if (device->bdev) { btrfs_close_extra_devices()
646 blkdev_put(device->bdev, device->mode); btrfs_close_extra_devices()
647 device->bdev = NULL; btrfs_close_extra_devices()
667 fs_devices->latest_bdev = latest_dev->bdev; btrfs_close_extra_devices()
678 if (device->bdev) __free_device()
679 blkdev_put(device->bdev, device->mode); __free_device()
707 if (device->bdev) __btrfs_close_devices()
777 struct block_device *bdev; __btrfs_open_devices() local
790 if (device->bdev) list_for_each_entry()
797 &bdev, &bh)) list_for_each_entry()
817 device->writeable = !bdev_read_only(bdev); list_for_each_entry()
821 q = bdev_get_queue(bdev); list_for_each_entry()
825 device->bdev = bdev; list_for_each_entry()
829 if (!blk_queue_nonrot(bdev_get_queue(bdev))) list_for_each_entry()
844 blkdev_put(bdev, flags); list_for_each_entry()
853 fs_devices->latest_bdev = latest_dev->bdev;
884 struct block_device *bdev; btrfs_scan_one_device() local
904 bdev = blkdev_get_by_path(path, flags, holder); btrfs_scan_one_device()
906 if (IS_ERR(bdev)) { btrfs_scan_one_device()
907 ret = PTR_ERR(bdev); btrfs_scan_one_device()
912 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode)) btrfs_scan_one_device()
925 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, btrfs_scan_one_device()
965 blkdev_put(bdev, flags); btrfs_scan_one_device()
1069 map = (struct map_lookup *)em->bdev; list_for_each_entry()
1550 struct block_device *bdev; btrfs_rm_device() local
1614 !tmp->bdev) { list_for_each_entry()
1619 bdev = NULL;
1630 &bdev, &bh);
1702 if (device->bdev == root->fs_info->sb->s_bdev)
1703 root->fs_info->sb->s_bdev = next_device->bdev;
1704 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1705 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1707 if (device->bdev) {
1759 i_size_read(bdev->bd_inode))
1763 bh = __bread(bdev, bytenr / 4096,
1783 if (bdev) {
1785 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1793 if (bdev)
1794 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1836 if (srcdev->bdev) btrfs_rm_dev_replace_remove_srcdev()
1879 if (tgtdev->bdev) { btrfs_destroy_dev_replace_tgtdev()
1887 if (tgtdev->bdev == fs_info->sb->s_bdev) btrfs_destroy_dev_replace_tgtdev()
1888 fs_info->sb->s_bdev = next_device->bdev; btrfs_destroy_dev_replace_tgtdev()
1889 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev) btrfs_destroy_dev_replace_tgtdev()
1890 fs_info->fs_devices->latest_bdev = next_device->bdev; btrfs_destroy_dev_replace_tgtdev()
1906 struct block_device *bdev; btrfs_find_device_by_path() local
1911 root->fs_info->bdev_holder, 0, &bdev, &bh); btrfs_find_device_by_path()
1922 blkdev_put(bdev, FMODE_READ); btrfs_find_device_by_path()
1941 if (tmp->in_fs_metadata && !tmp->bdev) { list_for_each_entry()
2101 struct block_device *bdev; btrfs_init_new_device() local
2112 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, btrfs_init_new_device()
2114 if (IS_ERR(bdev)) btrfs_init_new_device()
2115 return PTR_ERR(bdev); btrfs_init_new_device()
2123 filemap_write_and_wait(bdev->bd_inode->i_mapping); btrfs_init_new_device()
2129 if (device->bdev == bdev) { list_for_each_entry()
2161 q = bdev_get_queue(bdev);
2169 device->total_bytes = i_size_read(bdev->bd_inode);
2173 device->bdev = bdev;
2178 set_blocksize(device->bdev, 4096);
2203 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2296 blkdev_put(bdev, FMODE_EXCL);
2310 struct block_device *bdev; btrfs_init_dev_replace_tgtdev() local
2323 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, btrfs_init_dev_replace_tgtdev()
2325 if (IS_ERR(bdev)) { btrfs_init_dev_replace_tgtdev()
2327 return PTR_ERR(bdev); btrfs_init_dev_replace_tgtdev()
2330 filemap_write_and_wait(bdev->bd_inode->i_mapping); btrfs_init_dev_replace_tgtdev()
2334 if (device->bdev == bdev) { list_for_each_entry()
2342 if (i_size_read(bdev->bd_inode) <
2364 q = bdev_get_queue(bdev);
2380 device->bdev = bdev;
2385 set_blocksize(device->bdev, 4096);
2396 blkdev_put(bdev, FMODE_EXCL);
2612 map = (struct map_lookup *)em->bdev; btrfs_remove_chunk()
4455 em->bdev = (struct block_device *)map; __btrfs_alloc_chunk()
4550 map = (struct map_lookup *)em->bdev; btrfs_finish_chunk_alloc()
4692 map = (struct map_lookup *)em->bdev; btrfs_chunk_readonly()
4772 map = (struct map_lookup *)em->bdev; btrfs_num_copies()
4808 map = (struct map_lookup *)em->bdev; btrfs_full_stripe_len()
4829 map = (struct map_lookup *)em->bdev; btrfs_is_parity_mirror()
4857 if (map->stripes[optimal].dev->bdev && find_live_mirror()
4861 if (map->stripes[i].dev->bdev && find_live_mirror()
4990 map = (struct map_lookup *)em->bdev; __btrfs_map_block()
5532 map = (struct map_lookup *)em->bdev; btrfs_rmap_block()
5611 if (dev->bdev) { btrfs_end_bio()
5674 if (device->missing || !device->bdev) { btrfs_schedule_bio()
5720 static int bio_size_ok(struct block_device *bdev, struct bio *bio, bio_size_ok() argument
5724 struct request_queue *q = bdev_get_queue(bdev); bio_size_ok()
5727 .bi_bdev = bdev, bio_size_ok()
5766 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, submit_stripe_bio()
5771 bio->bi_bdev = dev->bdev; submit_stripe_bio()
5787 int nr_vecs = bio_get_nr_vecs(dev->bdev); breakup_stripe_bio()
5791 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS); breakup_stripe_bio()
5882 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) { btrfs_map_bio()
5891 if (!bio_size_ok(dev->bdev, first_bio, btrfs_map_bio()
6047 em->bdev = (struct block_device *)map; read_one_chunk()
6206 if (!device->bdev && !btrfs_test_opt(root, DEGRADED)) read_one_dev()
6209 if(!device->bdev && !device->missing) { read_one_dev()
6213 * device->bdev is NULL, and so we have to set read_one_dev()
6611 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", btrfs_dev_stat_print_on_error()
6631 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n", btrfs_dev_stat_print_on_load()
6680 bh = btrfs_read_dev_super(device->bdev); btrfs_scratch_superblock()
6731 map = (struct map_lookup *)em->bdev; btrfs_update_commit_device_bytes_used()
188 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, int flush, struct block_device **bdev, struct buffer_head **bh) btrfs_get_bdev_and_sb() argument
H A Dcheck-integrity.c199 struct block_device *bdev; member in struct:btrfsic_dev_state
276 struct block_device *bdev,
298 struct block_device *bdev,
387 struct block_device *bdev);
473 ds->bdev = NULL; btrfsic_dev_state_init()
514 ((unsigned int)((uintptr_t)b->dev_state->bdev))) & btrfsic_block_hashtable_add()
526 struct block_device *bdev, btrfsic_block_hashtable_lookup()
532 ((unsigned int)((uintptr_t)bdev))) & btrfsic_block_hashtable_lookup()
541 if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) btrfsic_block_hashtable_lookup()
564 ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ btrfsic_block_link_hashtable_add()
565 ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev))) btrfsic_block_link_hashtable_add()
600 if (l->block_ref_to->dev_state->bdev == bdev_ref_to && btrfsic_block_link_hashtable_lookup()
602 l->block_ref_from->dev_state->bdev == bdev_ref_from && btrfsic_block_link_hashtable_lookup()
624 (((unsigned int)((uintptr_t)ds->bdev)) & btrfsic_dev_state_hashtable_add()
636 struct block_device *bdev, btrfsic_dev_state_hashtable_lookup()
640 (((unsigned int)((uintptr_t)bdev)) & btrfsic_dev_state_hashtable_lookup()
649 if (ds->bdev == bdev) btrfsic_dev_state_hashtable_lookup()
677 if (!device->bdev || !device->name) list_for_each_entry()
680 dev_state = btrfsic_dev_state_lookup(device->bdev); list_for_each_entry()
755 tmp_next_block_ctx.dev->bdev,
761 tmp_next_block_ctx.dev->bdev,
764 bdev,
804 struct block_device *const superblock_bdev = device->bdev; btrfsic_process_superblock_dev_mirror()
848 printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)" btrfsic_process_superblock_dev_mirror()
1350 next_block_ctx->dev->bdev, btrfsic_create_link_to_next_block()
1352 block_ctx->dev->bdev, btrfsic_create_link_to_next_block()
1594 block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev); btrfsic_map_block()
1684 bio->bi_bdev = block_ctx->dev->bdev; btrfsic_read_block()
1830 struct block_device *bdev = dev_state->bdev; btrfsic_process_written_block() local
1844 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, btrfsic_process_written_block()
2601 state->latest_superblock->dev_state->bdev == btrfsic_is_block_ref_by_superblock()
2602 l->block_ref_from->dev_state->bdev) btrfsic_is_block_ref_by_superblock()
2653 state->latest_superblock->dev_state->bdev == block->dev_state->bdev) btrfsic_get_block_type()
2744 l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev, btrfsic_block_link_lookup_or_add()
2746 from_block->dev_state->bdev, btrfsic_block_link_lookup_or_add()
2792 block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev, btrfsic_block_lookup_or_add()
2803 dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev); btrfsic_block_lookup_or_add()
2861 if (dev_state->bdev == block_ctx.dev->bdev && btrfsic_cmp_log_and_dev_bytenr()
2872 " buffer->log_bytenr=%llu, submit_bio(bdev=%s," btrfsic_cmp_log_and_dev_bytenr()
2891 struct block_device *bdev) btrfsic_dev_state_lookup()
2895 ds = btrfsic_dev_state_hashtable_lookup(bdev, btrfsic_dev_state_lookup()
2922 " size=%zu, data=%p, bdev=%p)\n", btrfsic_submit_bh()
2932 "submit_bh(rw=0x%x FLUSH, bdev=%p)\n", btrfsic_submit_bh()
3030 "submit_bio(rw=0x%x FLUSH, bdev=%p)\n", __btrfsic_submit_bio()
3125 if (!device->bdev || !device->name) list_for_each_entry()
3135 ds->bdev = device->bdev; list_for_each_entry()
3137 bdevname(ds->bdev, ds->name); list_for_each_entry()
3183 if (!device->bdev || !device->name) list_for_each_entry()
3187 device->bdev, list_for_each_entry()
525 btrfsic_block_hashtable_lookup( struct block_device *bdev, u64 dev_bytenr, struct btrfsic_block_hashtable *h) btrfsic_block_hashtable_lookup() argument
635 btrfsic_dev_state_hashtable_lookup( struct block_device *bdev, struct btrfsic_dev_state_hashtable *h) btrfsic_dev_state_hashtable_lookup() argument
2890 btrfsic_dev_state_lookup( struct block_device *bdev) btrfsic_dev_state_lookup() argument
H A Dextent_map.h35 struct block_device *bdev; member in struct:extent_map
H A Dcompression.c97 static struct bio *compressed_bio_alloc(struct block_device *bdev, compressed_bio_alloc() argument
102 nr_vecs = bio_get_nr_vecs(bdev); compressed_bio_alloc()
103 return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags); compressed_bio_alloc()
343 struct block_device *bdev; btrfs_submit_compressed_write() local
362 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; btrfs_submit_compressed_write()
364 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); btrfs_submit_compressed_write()
412 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); btrfs_submit_compressed_write()
579 struct block_device *bdev; btrfs_submit_compressed_read() local
630 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; btrfs_submit_compressed_read()
654 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); btrfs_submit_compressed_read()
705 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, btrfs_submit_compressed_read()
H A Ddev-replace.c559 if (fs_info->sb->s_bdev == src_device->bdev) btrfs_dev_replace_finishing()
560 fs_info->sb->s_bdev = tgt_device->bdev; btrfs_dev_replace_finishing()
561 if (fs_info->fs_devices->latest_bdev == src_device->bdev) btrfs_dev_replace_finishing()
562 fs_info->fs_devices->latest_bdev = tgt_device->bdev; btrfs_dev_replace_finishing()
616 map = (struct map_lookup *)em->bdev; btrfs_dev_replace_update_device_in_mapping_tree()
782 if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) { btrfs_resume_dev_replace_async()
H A Dsysfs.c614 if (one_device && one_device->bdev) { btrfs_kobj_rm_device()
615 disk = one_device->bdev->bd_part; btrfs_kobj_rm_device()
643 if (!dev->bdev) btrfs_kobj_add_device()
649 disk = dev->bdev->bd_part; btrfs_kobj_add_device()
H A Dextent_map.c79 kfree(em->bdev); free_extent_map()
211 prev->bdev == next->bdev && mergable_maps()
H A Ddisk-io.h62 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
/linux-4.1.27/drivers/mtd/devices/
H A Dblock2mtd.c227 struct block_device *bdev = ERR_PTR(-ENODEV); add_device() local
239 bdev = blkdev_get_by_path(devname, mode, dev); add_device()
246 for (i = 0; IS_ERR(bdev) && i <= timeout; i++) { add_device()
261 bdev = blkdev_get_by_dev(devt, mode, dev); add_device()
265 if (IS_ERR(bdev)) { add_device()
269 dev->blkdev = bdev; add_device()
271 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { add_device()
/linux-4.1.27/drivers/scsi/
H A Dscsicam.c39 struct block_device *bdev = dev->bd_contains; scsi_bios_ptable() local
41 void *data = read_dev_sector(bdev, 0, &sect); scsi_bios_ptable()
56 * @bdev: which device
67 int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) scsicam_bios_param() argument
73 p = scsi_bios_ptable(bdev); scsicam_bios_param()
H A Dsr.c520 static int sr_block_open(struct block_device *bdev, fmode_t mode) sr_block_open() argument
526 cd = scsi_cd_get(bdev->bd_disk); sr_block_open()
528 ret = cdrom_open(&cd->cdi, bdev, mode); sr_block_open()
545 static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, sr_block_ioctl() argument
548 struct scsi_cd *cd = scsi_cd(bdev->bd_disk); sr_block_ioctl()
571 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); sr_block_ioctl()
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_buffer.c679 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, vmw_ttm_tt_create() argument
691 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); vmw_ttm_tt_create()
695 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, vmw_ttm_tt_create()
698 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, vmw_ttm_tt_create()
709 static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) vmw_invalidate_caches() argument
714 static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, vmw_init_mem_type() argument
767 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) vmw_ttm_io_mem_reserve() argument
769 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; vmw_ttm_io_mem_reserve()
770 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); vmw_ttm_io_mem_reserve()
795 static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) vmw_ttm_io_mem_free() argument
H A Dvmwgfx_drv.c310 ret = ttm_bo_create(&dev_priv->bdev, vmw_dummy_query_bo_create()
718 ret = ttm_bo_device_init(&dev_priv->bdev, vmw_driver_load()
792 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, vmw_driver_load()
801 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, vmw_driver_load()
810 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, vmw_driver_load()
844 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); vmw_driver_load()
846 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); vmw_driver_load()
847 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); vmw_driver_load()
864 (void)ttm_bo_device_release(&dev_priv->bdev); vmw_driver_load()
894 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); vmw_driver_unload()
896 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); vmw_driver_unload()
897 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); vmw_driver_unload()
910 (void)ttm_bo_device_release(&dev_priv->bdev); vmw_driver_unload()
1167 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); vmw_master_set()
1221 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); vmw_master_drop()
1263 ttm_bo_swapout_all(&dev_priv->bdev); vmwgfx_pm_notifier()
H A Dvmwgfx_gmrid_manager.c120 container_of(man->bdev, struct vmw_private, bdev); vmw_gmrid_man_init()
H A Dvmwgfx_ttm_glue.c43 return ttm_bo_mmap(filp, vma, &dev_priv->bdev); vmw_mmap()
/linux-4.1.27/drivers/block/aoe/
H A Daoeblk.c234 aoeblk_open(struct block_device *bdev, fmode_t mode) aoeblk_open() argument
236 struct aoedev *d = bdev->bd_disk->private_data; aoeblk_open()
297 aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) aoeblk_getgeo() argument
299 struct aoedev *d = bdev->bd_disk->private_data; aoeblk_getgeo()
313 aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg) aoeblk_ioctl() argument
320 d = bdev->bd_disk->private_data; aoeblk_ioctl()
/linux-4.1.27/drivers/block/rsxx/
H A Ddev.c68 static int rsxx_blkdev_ioctl(struct block_device *bdev, rsxx_blkdev_ioctl() argument
73 struct rsxx_cardinfo *card = bdev->bd_disk->private_data; rsxx_blkdev_ioctl()
85 static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo) rsxx_getgeo() argument
87 struct rsxx_cardinfo *card = bdev->bd_disk->private_data; rsxx_getgeo()
/linux-4.1.27/fs/gfs2/
H A Dops_fstype.c1230 struct block_device *bdev = ptr; test_gfs2_super() local
1231 return (bdev == s->s_bdev); test_gfs2_super()
1251 struct block_device *bdev; gfs2_mount() local
1261 bdev = blkdev_get_by_path(dev_name, mode, fs_type); gfs2_mount()
1262 if (IS_ERR(bdev)) gfs2_mount()
1263 return ERR_CAST(bdev); gfs2_mount()
1270 mutex_lock(&bdev->bd_fsfreeze_mutex); gfs2_mount()
1271 if (bdev->bd_fsfreeze_count > 0) { gfs2_mount()
1272 mutex_unlock(&bdev->bd_fsfreeze_mutex); gfs2_mount()
1276 s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev); gfs2_mount()
1277 mutex_unlock(&bdev->bd_fsfreeze_mutex); gfs2_mount()
1291 blkdev_put(bdev, mode); gfs2_mount()
1317 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); gfs2_mount()
1318 sb_set_blocksize(s, block_size(bdev)); gfs2_mount()
1323 bdev->bd_super = s; gfs2_mount()
1336 blkdev_put(bdev, mode); gfs2_mount()
/linux-4.1.27/mm/
H A Dswapfile.c136 err = blkdev_issue_discard(si->bdev, start_block, discard_swap()
147 err = blkdev_issue_discard(si->bdev, start_block, discard_swap()
186 if (blkdev_issue_discard(si->bdev, start_block, discard_swap_cluster()
818 struct gendisk *disk = p->bdev->bd_disk; swap_entry_free()
820 disk->fops->swap_slot_free_notify(p->bdev, swap_entry_free()
990 struct block_device *bdev = NULL; swap_type_of() local
994 bdev = bdget(device); swap_type_of()
1003 if (!bdev) { swap_type_of()
1005 *bdev_p = bdgrab(sis->bdev); swap_type_of()
1010 if (bdev == sis->bdev) { swap_type_of()
1015 *bdev_p = bdgrab(sis->bdev); swap_type_of()
1018 bdput(bdev); swap_type_of()
1024 if (bdev) swap_type_of()
1025 bdput(bdev); swap_type_of()
1036 struct block_device *bdev; swapdev_block() local
1042 return map_swap_entry(swp_entry(type, offset), &bdev); swapdev_block()
1577 * into the bdev, not sector offset.
1579 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) map_swap_entry() argument
1587 *bdev = sis->bdev; map_swap_entry()
1608 * Returns the page offset into bdev for the specified page's swap entry.
1610 sector_t map_swap_page(struct page *page, struct block_device **bdev) map_swap_page() argument
1614 return map_swap_entry(entry, bdev); map_swap_page()
1927 struct block_device *bdev = I_BDEV(inode); SYSCALL_DEFINE1() local
1928 set_blocksize(bdev, old_block_size); SYSCALL_DEFINE1()
1929 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); SYSCALL_DEFINE1()
2144 p->bdev = bdgrab(I_BDEV(inode)); claim_swapfile()
2145 error = blkdev_get(p->bdev, claim_swapfile()
2149 p->bdev = NULL; claim_swapfile()
2152 p->old_block_size = block_size(p->bdev); claim_swapfile()
2153 error = set_blocksize(p->bdev, PAGE_SIZE); claim_swapfile()
2158 p->bdev = inode->i_sb->s_bdev; claim_swapfile()
2337 struct request_queue *q = bdev_get_queue(si->bdev); swap_discardable()
2435 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { SYSCALL_DEFINE2()
2475 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
2533 if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2534 set_blocksize(p->bdev, p->old_block_size);
2535 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
H A Dpage_io.c116 struct gendisk *disk = sis->bdev->bd_disk; end_swap_bio_read()
125 disk->fops->swap_slot_free_notify(sis->bdev, end_swap_bio_read()
304 ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); __swap_writepage()
352 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); swap_readpage()
/linux-4.1.27/drivers/net/ethernet/amd/
H A Ddeclance.c1021 static int dec_lance_probe(struct device *bdev, const int type) dec_lance_probe() argument
1037 if (bdev) dec_lance_probe()
1038 snprintf(name, sizeof(name), "%s", dev_name(bdev)); dec_lance_probe()
1110 dev_set_drvdata(bdev, dev); dec_lance_probe()
1112 start = to_tc_dev(bdev)->resource.start; dec_lance_probe()
1113 len = to_tc_dev(bdev)->resource.end - start + 1; dec_lance_probe()
1114 if (!request_mem_region(start, len, dev_name(bdev))) { dec_lance_probe()
1117 dev_name(bdev)); dec_lance_probe()
1125 dev->irq = to_tc_dev(bdev)->interrupt; dec_lance_probe()
1261 if (!bdev) { dec_lance_probe()
1270 if (bdev) dec_lance_probe()
1280 static void __exit dec_lance_remove(struct device *bdev) dec_lance_remove() argument
1282 struct net_device *dev = dev_get_drvdata(bdev); dec_lance_remove()
1286 start = to_tc_dev(bdev)->resource.start; dec_lance_remove()
1287 len = to_tc_dev(bdev)->resource.end - start + 1; dec_lance_remove()
/linux-4.1.27/drivers/net/fddi/
H A Ddefxx.c376 struct device __maybe_unused *bdev = bp->bus_dev; dfx_port_write_long() local
377 int dfx_bus_tc = DFX_BUS_TC(bdev); dfx_port_write_long()
400 struct device __maybe_unused *bdev = bp->bus_dev; dfx_port_read_long() local
401 int dfx_bus_tc = DFX_BUS_TC(bdev); dfx_port_read_long()
424 * bdev - pointer to device information
434 static void dfx_get_bars(struct device *bdev, dfx_get_bars() argument
437 int dfx_bus_pci = dev_is_pci(bdev); dfx_get_bars()
438 int dfx_bus_eisa = DFX_BUS_EISA(bdev); dfx_get_bars()
439 int dfx_bus_tc = DFX_BUS_TC(bdev); dfx_get_bars()
445 bar_start[0] = pci_resource_start(to_pci_dev(bdev), num); dfx_get_bars()
446 bar_len[0] = pci_resource_len(to_pci_dev(bdev), num); dfx_get_bars()
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr; dfx_get_bars()
481 bar_start[0] = to_tc_dev(bdev)->resource.start + dfx_get_bars()
510 * bdev - pointer to device information
526 static int dfx_register(struct device *bdev) dfx_register() argument
529 int dfx_bus_pci = dev_is_pci(bdev); dfx_register()
530 int dfx_bus_eisa = DFX_BUS_EISA(bdev); dfx_register()
531 int dfx_bus_tc = DFX_BUS_TC(bdev); dfx_register()
533 const char *print_name = dev_name(bdev); dfx_register()
556 err = pci_enable_device(to_pci_dev(bdev)); dfx_register()
564 SET_NETDEV_DEV(dev, bdev); dfx_register()
567 bp->bus_dev = bdev; dfx_register()
568 dev_set_drvdata(bdev, dev); dfx_register()
570 dfx_get_bars(bdev, bar_start, bar_len); dfx_register()
632 pci_set_master(to_pci_dev(bdev)); dfx_register()
655 dma_free_coherent(bdev, alloc_size, dfx_register()
678 pci_disable_device(to_pci_dev(bdev)); dfx_register()
720 struct device *bdev = bp->bus_dev; dfx_bus_init() local
721 int dfx_bus_pci = dev_is_pci(bdev); dfx_bus_init()
722 int dfx_bus_eisa = DFX_BUS_EISA(bdev); dfx_bus_init()
723 int dfx_bus_tc = DFX_BUS_TC(bdev); dfx_bus_init()
735 dev->irq = to_tc_dev(bdev)->interrupt; dfx_bus_init()
737 unsigned long base_addr = to_eisa_device(bdev)->base_addr; dfx_bus_init()
829 struct pci_dev *pdev = to_pci_dev(bdev); dfx_bus_init()
880 struct device *bdev = bp->bus_dev; dfx_bus_uninit() local
881 int dfx_bus_pci = dev_is_pci(bdev); dfx_bus_uninit()
882 int dfx_bus_eisa = DFX_BUS_EISA(bdev); dfx_bus_uninit()
890 unsigned long base_addr = to_eisa_device(bdev)->base_addr; dfx_bus_uninit()
942 struct device __maybe_unused *bdev = bp->bus_dev; dfx_bus_config_check() local
943 int dfx_bus_eisa = DFX_BUS_EISA(bdev); dfx_bus_config_check()
959 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) { dfx_bus_config_check()
1040 struct device *bdev = bp->bus_dev; dfx_driver_init() local
1041 int dfx_bus_pci = dev_is_pci(bdev); dfx_driver_init()
1042 int dfx_bus_eisa = DFX_BUS_EISA(bdev); dfx_driver_init()
1043 int dfx_bus_tc = DFX_BUS_TC(bdev); dfx_driver_init()
1949 struct device *bdev = bp->bus_dev; dfx_interrupt() local
1950 int dfx_bus_pci = dev_is_pci(bdev); dfx_interrupt()
1951 int dfx_bus_eisa = DFX_BUS_EISA(bdev); dfx_interrupt()
1952 int dfx_bus_tc = DFX_BUS_TC(bdev); dfx_interrupt()
1982 unsigned long base_addr = to_eisa_device(bdev)->base_addr; dfx_interrupt()
3679 * bdev - pointer to device information
3693 static void dfx_unregister(struct device *bdev) dfx_unregister() argument
3695 struct net_device *dev = dev_get_drvdata(bdev); dfx_unregister()
3697 int dfx_bus_pci = dev_is_pci(bdev); dfx_unregister()
3698 int dfx_bus_tc = DFX_BUS_TC(bdev); dfx_unregister()
3714 dma_free_coherent(bdev, alloc_size, dfx_unregister()
3719 dfx_get_bars(bdev, bar_start, bar_len); dfx_unregister()
3731 pci_disable_device(to_pci_dev(bdev)); dfx_unregister()
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_int.h599 /* cached value of bdev->disk_conf->meta_dev_idx (see below) */
1122 extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1473 extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
1497 struct drbd_backing_dev *bdev, sector_t sector, int rw);
1500 struct drbd_backing_dev *bdev, unsigned int *done);
1616 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1808 * @bdev: Meta data block device.
1813 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) drbd_md_first_sector() argument
1815 switch (bdev->md.meta_dev_idx) { drbd_md_first_sector()
1818 return bdev->md.md_offset + bdev->md.bm_offset; drbd_md_first_sector()
1821 return bdev->md.md_offset; drbd_md_first_sector()
1827 * @bdev: Meta data block device.
1829 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) drbd_md_last_sector() argument
1831 switch (bdev->md.meta_dev_idx) { drbd_md_last_sector()
1834 return bdev->md.md_offset + MD_4kB_SECT -1; drbd_md_last_sector()
1837 return bdev->md.md_offset + bdev->md.md_size_sect -1; drbd_md_last_sector()
1842 static inline sector_t drbd_get_capacity(struct block_device *bdev) drbd_get_capacity() argument
1844 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ drbd_get_capacity()
1845 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0; drbd_get_capacity()
1850 * @bdev: Meta data block device.
1856 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) drbd_get_max_capacity() argument
1860 switch (bdev->md.meta_dev_idx) { drbd_get_max_capacity()
1863 s = drbd_get_capacity(bdev->backing_bdev) drbd_get_max_capacity()
1865 drbd_md_first_sector(bdev)) drbd_get_max_capacity()
1870 drbd_get_capacity(bdev->backing_bdev)); drbd_get_max_capacity()
1873 BM_EXT_TO_SECT(bdev->md.md_size_sect drbd_get_max_capacity()
1874 - bdev->md.bm_offset)); drbd_get_max_capacity()
1878 drbd_get_capacity(bdev->backing_bdev)); drbd_get_max_capacity()
1885 * @bdev: Meta data block device.
1887 static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev) drbd_md_ss() argument
1889 const int meta_dev_idx = bdev->md.meta_dev_idx; drbd_md_ss()
1898 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8; drbd_md_ss()
1901 return MD_128MB_SECT * bdev->md.meta_dev_idx; drbd_md_ss()
H A Ddrbd_actlog.c118 void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev, wait_until_done_or_force_detached() argument
124 dt = rcu_dereference(bdev->disk_conf)->disk_timeout; wait_until_done_or_force_detached()
139 struct drbd_backing_dev *bdev, _drbd_md_sync_page_io()
155 bio->bi_bdev = bdev->md_bdev; _drbd_md_sync_page_io()
181 wait_until_done_or_force_detached(device, bdev, &device->md_io.done); _drbd_md_sync_page_io()
190 int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, drbd_md_sync_page_io() argument
196 BUG_ON(!bdev->md_bdev); drbd_md_sync_page_io()
203 if (sector < drbd_md_first_sector(bdev) || drbd_md_sync_page_io()
204 sector + 7 > drbd_md_last_sector(bdev)) drbd_md_sync_page_io()
209 err = _drbd_md_sync_page_io(device, bdev, sector, rw); drbd_md_sync_page_io()
138 _drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, sector_t sector, int rw) _drbd_md_sync_page_io() argument
H A Ddrbd_nl.c773 struct drbd_backing_dev *bdev) drbd_md_set_sector_offsets()
776 unsigned int al_size_sect = bdev->md.al_size_4k * 8; drbd_md_set_sector_offsets()
778 bdev->md.md_offset = drbd_md_ss(bdev); drbd_md_set_sector_offsets()
780 switch (bdev->md.meta_dev_idx) { drbd_md_set_sector_offsets()
783 bdev->md.md_size_sect = MD_128MB_SECT; drbd_md_set_sector_offsets()
784 bdev->md.al_offset = MD_4kB_SECT; drbd_md_set_sector_offsets()
785 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect; drbd_md_set_sector_offsets()
789 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); drbd_md_set_sector_offsets()
790 bdev->md.al_offset = MD_4kB_SECT; drbd_md_set_sector_offsets()
791 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect; drbd_md_set_sector_offsets()
796 bdev->md.al_offset = -al_size_sect; drbd_md_set_sector_offsets()
798 md_size_sect = drbd_get_capacity(bdev->backing_bdev); drbd_md_set_sector_offsets()
807 bdev->md.md_size_sect = md_size_sect; drbd_md_set_sector_offsets()
809 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT; drbd_md_set_sector_offsets()
1026 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev, drbd_new_dev_size() argument
1030 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */ drbd_new_dev_size()
1034 m_size = drbd_get_max_capacity(bdev); drbd_new_dev_size()
1125 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev, drbd_setup_queue_param() argument
1133 if (bdev) { drbd_setup_queue_param()
1134 b = bdev->backing_bdev->bd_disk->queue; drbd_setup_queue_param()
1182 void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev) drbd_reconsider_max_bio_size() argument
1190 if (bdev) { drbd_reconsider_max_bio_size()
1191 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9; drbd_reconsider_max_bio_size()
1225 drbd_setup_queue_param(device, bdev, new); drbd_reconsider_max_bio_size()
1279 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev) drbd_al_extents_max() argument
1299 unsigned int al_size_4k = bdev->md.al_size_4k; drbd_al_extents_max()
1465 struct block_device *bdev; drbd_adm_attach() local
1559 bdev = blkdev_get_by_path(new_disk_conf->backing_dev, drbd_adm_attach()
1561 if (IS_ERR(bdev)) { drbd_adm_attach()
1563 PTR_ERR(bdev)); drbd_adm_attach()
1567 nbc->backing_bdev = bdev; drbd_adm_attach()
1577 bdev = blkdev_get_by_path(new_disk_conf->meta_dev, drbd_adm_attach()
1581 if (IS_ERR(bdev)) { drbd_adm_attach()
1583 PTR_ERR(bdev)); drbd_adm_attach()
1587 nbc->md_bdev = bdev; drbd_adm_attach()
772 drbd_md_set_sector_offsets(struct drbd_device *device, struct drbd_backing_dev *bdev) drbd_md_set_sector_offsets() argument
H A Ddrbd_main.c64 static int drbd_open(struct block_device *bdev, fmode_t mode);
1857 static int drbd_open(struct block_device *bdev, fmode_t mode) drbd_open() argument
1859 struct drbd_device *device = bdev->bd_disk->private_data; drbd_open()
3173 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev) check_offsets_and_sizes() argument
3175 sector_t capacity = drbd_get_capacity(bdev->md_bdev); check_offsets_and_sizes()
3176 struct drbd_md *in_core = &bdev->md; check_offsets_and_sizes()
3215 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev)) check_offsets_and_sizes()
3256 * @bdev: Device from which the meta data should be read in.
3262 * even before @bdev is assigned to @device->ldev.
3264 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) drbd_md_read() argument
3279 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx; drbd_md_read()
3280 bdev->md.md_offset = drbd_md_ss(bdev); drbd_md_read()
3282 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) { drbd_md_read()
3317 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect); drbd_md_read()
3319 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); drbd_md_read()
3320 bdev->md.flags = be32_to_cpu(buffer->flags); drbd_md_read()
3321 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); drbd_md_read()
3323 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect); drbd_md_read()
3324 bdev->md.al_offset = be32_to_cpu(buffer->al_offset); drbd_md_read()
3325 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset); drbd_md_read()
3327 if (check_activity_log_stripe_size(device, buffer, &bdev->md)) drbd_md_read()
3329 if (check_offsets_and_sizes(device, bdev)) drbd_md_read()
3332 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { drbd_md_read()
3334 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); drbd_md_read()
3337 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { drbd_md_read()
3339 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); drbd_md_read()
3634 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) drbd_md_test_flag() argument
3636 return (bdev->md.flags & flag) != 0; drbd_md_test_flag()
/linux-4.1.27/drivers/block/zram/
H A Dzram_drv.c882 struct block_device *bdev; reset_store() local
885 bdev = bdget_disk(zram->disk, 0); reset_store()
887 if (!bdev) reset_store()
890 mutex_lock(&bdev->bd_mutex); reset_store()
892 if (bdev->bd_openers) { reset_store()
907 fsync_bdev(bdev); reset_store()
910 mutex_unlock(&bdev->bd_mutex); reset_store()
912 bdput(bdev); reset_store()
917 mutex_unlock(&bdev->bd_mutex); reset_store()
918 bdput(bdev); reset_store()
1001 static void zram_slot_free_notify(struct block_device *bdev, zram_slot_free_notify() argument
1007 zram = bdev->bd_disk->private_data; zram_slot_free_notify()
1016 static int zram_rw_page(struct block_device *bdev, sector_t sector, zram_rw_page() argument
1024 zram = bdev->bd_disk->private_data; zram_rw_page()
/linux-4.1.27/fs/nilfs2/
H A Dsuper.c1212 struct block_device *bdev; member in struct:nilfs_super_data
1285 sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); nilfs_mount()
1286 if (IS_ERR(sd.bdev)) nilfs_mount()
1287 return ERR_CAST(sd.bdev); nilfs_mount()
1301 mutex_lock(&sd.bdev->bd_fsfreeze_mutex); nilfs_mount()
1302 if (sd.bdev->bd_fsfreeze_count > 0) { nilfs_mount()
1303 mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); nilfs_mount()
1308 sd.bdev); nilfs_mount()
1309 mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); nilfs_mount()
1322 strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); nilfs_mount()
1323 sb_set_blocksize(s, block_size(sd.bdev)); nilfs_mount()
1360 blkdev_put(sd.bdev, mode); nilfs_mount()
1369 blkdev_put(sd.bdev, mode); nilfs_mount()
/linux-4.1.27/arch/m68k/emu/
H A Dnfblock.c82 static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) nfhd_getgeo() argument
84 struct nfhd_device *dev = bdev->bd_disk->private_data; nfhd_getgeo()
/linux-4.1.27/arch/xtensa/platforms/iss/
H A Dsimdisk.c130 static int simdisk_open(struct block_device *bdev, fmode_t mode) simdisk_open() argument
132 struct simdisk *dev = bdev->bd_disk->private_data; simdisk_open()
136 check_disk_change(bdev); simdisk_open()
/linux-4.1.27/fs/ext4/
H A Dreadpage.c150 struct block_device *bdev = inode->i_sb->s_bdev; ext4_mpage_readpages() local
287 min_t(int, nr_pages, bio_get_nr_vecs(bdev))); ext4_mpage_readpages()
293 bio->bi_bdev = bdev; ext4_mpage_readpages()
/linux-4.1.27/fs/quota/
H A Dquota.c730 struct block_device *bdev; quotactl_block() local
736 bdev = lookup_bdev(tmp->name); quotactl_block()
738 if (IS_ERR(bdev)) quotactl_block()
739 return ERR_CAST(bdev); quotactl_block()
741 sb = get_super_thawed(bdev); quotactl_block()
743 sb = get_super(bdev); quotactl_block()
744 bdput(bdev); quotactl_block()
/linux-4.1.27/drivers/target/
H A Dtarget_core_iblock.c421 struct block_device *bdev = priv; iblock_do_unmap() local
424 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); iblock_do_unmap()
436 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; iblock_execute_unmap() local
438 return sbc_execute_unmap(cmd, iblock_do_unmap, bdev); iblock_execute_unmap()
444 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; iblock_execute_write_same_unmap() local
449 ret = iblock_do_unmap(cmd, bdev, lba, nolb); iblock_execute_write_same_unmap()
/linux-4.1.27/drivers/block/paride/
H A Dpcd.c227 static int pcd_block_open(struct block_device *bdev, fmode_t mode) pcd_block_open() argument
229 struct pcd_unit *cd = bdev->bd_disk->private_data; pcd_block_open()
233 ret = cdrom_open(&cd->info, bdev, mode); pcd_block_open()
247 static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode, pcd_block_ioctl() argument
250 struct pcd_unit *cd = bdev->bd_disk->private_data; pcd_block_ioctl()
254 ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg); pcd_block_ioctl()
H A Dpf.c208 static int pf_open(struct block_device *bdev, fmode_t mode);
210 static int pf_ioctl(struct block_device *bdev, fmode_t mode,
212 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
304 static int pf_open(struct block_device *bdev, fmode_t mode) pf_open() argument
306 struct pf_unit *pf = bdev->bd_disk->private_data; pf_open()
329 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo) pf_getgeo() argument
331 struct pf_unit *pf = bdev->bd_disk->private_data; pf_getgeo()
347 static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) pf_ioctl() argument
349 struct pf_unit *pf = bdev->bd_disk->private_data; pf_ioctl()
H A Dpd.c739 static int pd_open(struct block_device *bdev, fmode_t mode) pd_open() argument
741 struct pd_unit *disk = bdev->bd_disk->private_data; pd_open()
754 static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo) pd_getgeo() argument
756 struct pd_unit *disk = bdev->bd_disk->private_data; pd_getgeo()
771 static int pd_ioctl(struct block_device *bdev, fmode_t mode, pd_ioctl() argument
774 struct pd_unit *disk = bdev->bd_disk->private_data; pd_ioctl()
/linux-4.1.27/include/trace/events/
H A Dbcache.h416 __entry->dev = ca->bdev->bd_dev;
436 __entry->dev = ca->bdev->bd_dev;
456 __entry->dev = ca->bdev->bd_dev;
/linux-4.1.27/arch/um/drivers/
H A Dubd_kern.c89 static int ubd_open(struct block_device *bdev, fmode_t mode);
91 static int ubd_ioctl(struct block_device *bdev, fmode_t mode,
93 static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
1113 static int ubd_open(struct block_device *bdev, fmode_t mode) ubd_open() argument
1115 struct gendisk *disk = bdev->bd_disk; ubd_open()
1331 static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) ubd_getgeo() argument
1333 struct ubd *ubd_dev = bdev->bd_disk->private_data; ubd_getgeo()
1341 static int ubd_ioctl(struct block_device *bdev, fmode_t mode, ubd_ioctl() argument
1344 struct ubd *ubd_dev = bdev->bd_disk->private_data; ubd_ioctl()
/linux-4.1.27/drivers/md/persistent-data/
H A Ddm-block-manager.h35 struct block_device *bdev, unsigned block_size,
H A Ddm-block-manager.c370 struct dm_block_manager *dm_block_manager_create(struct block_device *bdev, dm_block_manager_create() argument
384 bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread, dm_block_manager_create()
/linux-4.1.27/fs/jfs/
H A Djfs_logmgr.c1082 struct block_device *bdev; lmLogOpen() local
1094 if (log->bdev->bd_dev == sbi->logdev) { lmLogOpen()
1125 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, lmLogOpen()
1127 if (IS_ERR(bdev)) { lmLogOpen()
1128 rc = PTR_ERR(bdev); lmLogOpen()
1132 log->bdev = bdev; lmLogOpen()
1166 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); lmLogOpen()
1187 log->bdev = sb->s_bdev; open_inline_log()
1462 struct block_device *bdev; lmLogClose() local
1508 bdev = log->bdev; lmLogClose()
1511 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); lmLogClose()
2001 bio->bi_bdev = log->bdev; lbmRead()
2147 bio->bi_bdev = log->bdev; lbmStartIO()
/linux-4.1.27/fs/ext3/
H A Dsuper.c359 struct block_device *bdev; ext3_blkdev_get() local
362 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); ext3_blkdev_get()
363 if (IS_ERR(bdev)) ext3_blkdev_get()
365 return bdev; ext3_blkdev_get()
369 __bdevname(dev, b), PTR_ERR(bdev)); ext3_blkdev_get()
377 static void ext3_blkdev_put(struct block_device *bdev) ext3_blkdev_put() argument
379 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); ext3_blkdev_put()
384 struct block_device *bdev; ext3_blkdev_remove() local
385 bdev = sbi->journal_bdev; ext3_blkdev_remove()
386 if (bdev) { ext3_blkdev_remove()
387 ext3_blkdev_put(bdev); ext3_blkdev_remove()
2238 struct block_device *bdev; ext3_get_dev_journal() local
2240 bdev = ext3_blkdev_get(j_dev, sb); ext3_get_dev_journal()
2241 if (bdev == NULL) ext3_get_dev_journal()
2245 hblock = bdev_logical_block_size(bdev); ext3_get_dev_journal()
2254 set_blocksize(bdev, blocksize); ext3_get_dev_journal()
2255 if (!(bh = __bread(bdev, sb_block, blocksize))) { ext3_get_dev_journal()
2281 journal = journal_init_dev(bdev, sb->s_bdev, ext3_get_dev_journal()
2302 EXT3_SB(sb)->journal_bdev = bdev; ext3_get_dev_journal()
2308 ext3_blkdev_put(bdev); ext3_get_dev_journal()
2742 * around from a previously readonly bdev mount, ext3_remount()
/linux-4.1.27/drivers/staging/i2o/
H A Di2o_block.c567 * @bdev: block device being opened
575 static int i2o_block_open(struct block_device *bdev, fmode_t mode) i2o_block_open() argument
577 struct i2o_block_device *dev = bdev->bd_disk->private_data; i2o_block_open()
634 static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) i2o_block_getgeo() argument
636 i2o_block_biosparam(get_capacity(bdev->bd_disk), i2o_block_getgeo()
643 * @bdev: block device being opened
652 static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, i2o_block_ioctl() argument
655 struct gendisk *disk = bdev->bd_disk; i2o_block_ioctl()
/linux-4.1.27/fs/xfs/
H A Dxfs_discard.c44 struct block_device *bdev = mp->m_ddev_targp->bt_bdev; xfs_trim_extents() local
126 error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0); xfs_trim_extents()
/linux-4.1.27/drivers/mmc/card/
H A Dblock.c294 static int mmc_blk_open(struct block_device *bdev, fmode_t mode) mmc_blk_open() argument
296 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); mmc_blk_open()
302 check_disk_change(bdev); mmc_blk_open()
325 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) mmc_blk_getgeo() argument
327 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); mmc_blk_getgeo()
447 static int mmc_blk_ioctl_cmd(struct block_device *bdev, mmc_blk_ioctl_cmd() argument
466 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) mmc_blk_ioctl_cmd()
473 md = mmc_blk_get(bdev->bd_disk); mmc_blk_ioctl_cmd()
618 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, mmc_blk_ioctl() argument
623 ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); mmc_blk_ioctl()
628 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, mmc_blk_compat_ioctl() argument
631 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); mmc_blk_compat_ioctl()
/linux-4.1.27/drivers/mtd/ubi/
H A Dblock.c224 static int ubiblock_open(struct block_device *bdev, fmode_t mode) ubiblock_open() argument
226 struct ubiblock *dev = bdev->bd_disk->private_data; ubiblock_open()
280 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo) ubiblock_getgeo() argument
285 geo->sectors = get_capacity(bdev->bd_disk); ubiblock_getgeo()
/linux-4.1.27/drivers/cdrom/
H A Dgdrom.c497 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) gdrom_bdops_open() argument
501 ret = cdrom_open(gd.cd_info, bdev, mode); gdrom_bdops_open()
519 static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, gdrom_bdops_ioctl() argument
525 ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg); gdrom_bdops_ioctl()
/linux-4.1.27/fs/jbd/
H A Drevoke.c337 struct block_device *bdev; journal_revoke() local
350 bdev = journal->j_fs_dev; journal_revoke()
354 bh = __find_get_block(bdev, blocknr, journal->j_blocksize); journal_revoke()
364 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); journal_revoke()
/linux-4.1.27/fs/jbd2/
H A Drevoke.c334 struct block_device *bdev; jbd2_journal_revoke() local
347 bdev = journal->j_fs_dev; jbd2_journal_revoke()
351 bh = __find_get_block(bdev, blocknr, journal->j_blocksize); jbd2_journal_revoke()
361 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); jbd2_journal_revoke()

Completed in 3090 milliseconds

12