/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_port.c | 143 ret += be64_to_cpu(*curr); en_stats_adder() 207 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + mlx4_en_DUMP_ETH_STATS() 211 be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) + mlx4_en_DUMP_ETH_STATS() 212 be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) + mlx4_en_DUMP_ETH_STATS() 246 be64_to_cpu(mlx4_en_stats->RInRangeLengthErr); mlx4_en_DUMP_ETH_STATS() 248 be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr); mlx4_en_DUMP_ETH_STATS() 260 priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0); mlx4_en_DUMP_ETH_STATS() 261 priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0); mlx4_en_DUMP_ETH_STATS() 262 priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1); mlx4_en_DUMP_ETH_STATS() 263 priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1); mlx4_en_DUMP_ETH_STATS() 264 priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2); mlx4_en_DUMP_ETH_STATS() 265 priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2); mlx4_en_DUMP_ETH_STATS() 266 priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3); mlx4_en_DUMP_ETH_STATS() 267 priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3); mlx4_en_DUMP_ETH_STATS() 268 priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4); mlx4_en_DUMP_ETH_STATS() 269 priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4); mlx4_en_DUMP_ETH_STATS() 270 priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5); mlx4_en_DUMP_ETH_STATS() 271 priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5); mlx4_en_DUMP_ETH_STATS() 272 priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6); mlx4_en_DUMP_ETH_STATS() 273 priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6); mlx4_en_DUMP_ETH_STATS() 274 priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7); mlx4_en_DUMP_ETH_STATS() 275 priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7); mlx4_en_DUMP_ETH_STATS() 276 priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan); mlx4_en_DUMP_ETH_STATS() 277 priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan); mlx4_en_DUMP_ETH_STATS() 278 priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0); mlx4_en_DUMP_ETH_STATS() 279 priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0); mlx4_en_DUMP_ETH_STATS() 280 priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1); mlx4_en_DUMP_ETH_STATS() 281 priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1); mlx4_en_DUMP_ETH_STATS() 282 priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2); mlx4_en_DUMP_ETH_STATS() 283 priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2); mlx4_en_DUMP_ETH_STATS() 284 priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3); mlx4_en_DUMP_ETH_STATS() 285 priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3); mlx4_en_DUMP_ETH_STATS() 286 priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4); mlx4_en_DUMP_ETH_STATS() 287 priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4); mlx4_en_DUMP_ETH_STATS() 288 priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5); mlx4_en_DUMP_ETH_STATS() 289 priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5); mlx4_en_DUMP_ETH_STATS() 290 priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6); mlx4_en_DUMP_ETH_STATS() 291 priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6); mlx4_en_DUMP_ETH_STATS() 292 priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7); mlx4_en_DUMP_ETH_STATS() 293 priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7); mlx4_en_DUMP_ETH_STATS() 294 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); mlx4_en_DUMP_ETH_STATS() 295 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); mlx4_en_DUMP_ETH_STATS() 319 be64_to_cpu(flowstats[i].rx_pause); mlx4_en_DUMP_ETH_STATS() 321 be64_to_cpu(flowstats[i].rx_pause_duration); mlx4_en_DUMP_ETH_STATS() 323 be64_to_cpu(flowstats[i].rx_pause_transition); mlx4_en_DUMP_ETH_STATS() 325 be64_to_cpu(flowstats[i].tx_pause); mlx4_en_DUMP_ETH_STATS() 327 be64_to_cpu(flowstats[i].tx_pause_duration); mlx4_en_DUMP_ETH_STATS() 329 be64_to_cpu(flowstats[i].tx_pause_transition); mlx4_en_DUMP_ETH_STATS() 334 be64_to_cpu(flowstats[0].rx_pause); mlx4_en_DUMP_ETH_STATS() 336 be64_to_cpu(flowstats[0].rx_pause_duration); mlx4_en_DUMP_ETH_STATS() 338 be64_to_cpu(flowstats[0].rx_pause_transition); mlx4_en_DUMP_ETH_STATS() 340 be64_to_cpu(flowstats[0].tx_pause); mlx4_en_DUMP_ETH_STATS() 342 be64_to_cpu(flowstats[0].tx_pause_duration); mlx4_en_DUMP_ETH_STATS() 344 be64_to_cpu(flowstats[0].tx_pause_transition); mlx4_en_DUMP_ETH_STATS()
|
H A D | en_dcb_nl.c | 457 be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds); mlx4_en_dcbnl_ieee_getqcnstats()
|
H A D | en_rx.c | 563 dma = be64_to_cpu(rx_desc->data[nr].addr); mlx4_en_complete_rx_desc() 614 dma = be64_to_cpu(rx_desc->data[0].addr); mlx4_en_rx_skb() 814 dma = be64_to_cpu(rx_desc->data[0].addr); mlx4_en_process_rx_cq()
|
H A D | en_tx.c | 315 (dma_addr_t)be64_to_cpu(data->addr), mlx4_en_free_tx_desc() 342 (dma_addr_t)be64_to_cpu(data->addr), mlx4_en_free_tx_desc() 1011 dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr), mlx4_en_xmit()
|
H A D | port.c | 112 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) find_index() 152 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { mlx4_find_cached_mac() 181 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { __mlx4_register_mac()
|
H A D | cmd.c | 540 be64_to_cpu(vhcr->out_param); mlx4_slave_cmd() 559 be64_to_cpu(vhcr->out_param); mlx4_slave_cmd() 1682 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param); mlx4_master_process_vhcr() 1683 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param); mlx4_master_process_vhcr()
|
H A D | resource_tracker.c | 2437 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; mr_get_mtt_addr() 3020 int start = be64_to_cpu(page_list[0]); mlx4_WRITE_MTT_wrapper() 3036 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); mlx4_WRITE_MTT_wrapper() 3038 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, mlx4_WRITE_MTT_wrapper() 4046 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); mlx4_UPDATE_QP_wrapper()
|
/linux-4.1.27/include/linux/ |
H A D | libfdt_env.h | 10 #define fdt64_to_cpu(x) be64_to_cpu(x)
|
/linux-4.1.27/arch/powerpc/boot/ |
H A D | libfdt_env.h | 17 #define fdt64_to_cpu(x) be64_to_cpu(x)
|
H A D | of.h | 32 #define be64_to_cpu(x) swab64(x) macro 39 #define be64_to_cpu(x) (x) macro
|
/linux-4.1.27/arch/arm/boot/compressed/ |
H A D | libfdt_env.h | 12 #define fdt64_to_cpu(x) be64_to_cpu(x)
|
/linux-4.1.27/arch/powerpc/platforms/powernv/ |
H A D | pci.c | 163 be64_to_cpu(data->errorClass), pnv_pci_dump_p7ioc_diag_data() 164 be64_to_cpu(data->correlator)); pnv_pci_dump_p7ioc_diag_data() 167 be64_to_cpu(data->p7iocPlssr), pnv_pci_dump_p7ioc_diag_data() 168 be64_to_cpu(data->p7iocCsr)); pnv_pci_dump_p7ioc_diag_data() 171 be64_to_cpu(data->lemFir), pnv_pci_dump_p7ioc_diag_data() 172 be64_to_cpu(data->lemErrorMask), pnv_pci_dump_p7ioc_diag_data() 173 be64_to_cpu(data->lemWOF)); pnv_pci_dump_p7ioc_diag_data() 176 be64_to_cpu(data->phbErrorStatus), pnv_pci_dump_p7ioc_diag_data() 177 be64_to_cpu(data->phbFirstErrorStatus), pnv_pci_dump_p7ioc_diag_data() 178 be64_to_cpu(data->phbErrorLog0), pnv_pci_dump_p7ioc_diag_data() 179 be64_to_cpu(data->phbErrorLog1)); pnv_pci_dump_p7ioc_diag_data() 182 be64_to_cpu(data->mmioErrorStatus), pnv_pci_dump_p7ioc_diag_data() 183 be64_to_cpu(data->mmioFirstErrorStatus), pnv_pci_dump_p7ioc_diag_data() 184 be64_to_cpu(data->mmioErrorLog0), pnv_pci_dump_p7ioc_diag_data() 185 be64_to_cpu(data->mmioErrorLog1)); pnv_pci_dump_p7ioc_diag_data() 188 be64_to_cpu(data->dma0ErrorStatus), pnv_pci_dump_p7ioc_diag_data() 189 be64_to_cpu(data->dma0FirstErrorStatus), pnv_pci_dump_p7ioc_diag_data() 190 be64_to_cpu(data->dma0ErrorLog0), pnv_pci_dump_p7ioc_diag_data() 191 be64_to_cpu(data->dma0ErrorLog1)); pnv_pci_dump_p7ioc_diag_data() 194 be64_to_cpu(data->dma1ErrorStatus), pnv_pci_dump_p7ioc_diag_data() 195 be64_to_cpu(data->dma1FirstErrorStatus), pnv_pci_dump_p7ioc_diag_data() 196 be64_to_cpu(data->dma1ErrorLog0), pnv_pci_dump_p7ioc_diag_data() 197 be64_to_cpu(data->dma1ErrorLog1)); pnv_pci_dump_p7ioc_diag_data() 205 i, be64_to_cpu(data->pestA[i]), pnv_pci_dump_p7ioc_diag_data() 206 be64_to_cpu(data->pestB[i])); pnv_pci_dump_p7ioc_diag_data() 254 be64_to_cpu(data->errorClass), pnv_pci_dump_phb3_diag_data() 255 be64_to_cpu(data->correlator)); pnv_pci_dump_phb3_diag_data() 258 be64_to_cpu(data->nFir), pnv_pci_dump_phb3_diag_data() 259 be64_to_cpu(data->nFirMask), pnv_pci_dump_phb3_diag_data() 260 be64_to_cpu(data->nFirWOF)); pnv_pci_dump_phb3_diag_data() 263 be64_to_cpu(data->phbPlssr), pnv_pci_dump_phb3_diag_data() 264 be64_to_cpu(data->phbCsr)); pnv_pci_dump_phb3_diag_data() 267 be64_to_cpu(data->lemFir), pnv_pci_dump_phb3_diag_data() 268 be64_to_cpu(data->lemErrorMask), pnv_pci_dump_phb3_diag_data() 269 be64_to_cpu(data->lemWOF)); pnv_pci_dump_phb3_diag_data() 272 be64_to_cpu(data->phbErrorStatus), pnv_pci_dump_phb3_diag_data() 273 be64_to_cpu(data->phbFirstErrorStatus), pnv_pci_dump_phb3_diag_data() 274 be64_to_cpu(data->phbErrorLog0), pnv_pci_dump_phb3_diag_data() 275 be64_to_cpu(data->phbErrorLog1)); pnv_pci_dump_phb3_diag_data() 278 be64_to_cpu(data->mmioErrorStatus), pnv_pci_dump_phb3_diag_data() 279 be64_to_cpu(data->mmioFirstErrorStatus), pnv_pci_dump_phb3_diag_data() 280 be64_to_cpu(data->mmioErrorLog0), pnv_pci_dump_phb3_diag_data() 281 be64_to_cpu(data->mmioErrorLog1)); pnv_pci_dump_phb3_diag_data() 284 be64_to_cpu(data->dma0ErrorStatus), pnv_pci_dump_phb3_diag_data() 285 be64_to_cpu(data->dma0FirstErrorStatus), pnv_pci_dump_phb3_diag_data() 286 be64_to_cpu(data->dma0ErrorLog0), pnv_pci_dump_phb3_diag_data() 287 be64_to_cpu(data->dma0ErrorLog1)); pnv_pci_dump_phb3_diag_data() 290 be64_to_cpu(data->dma1ErrorStatus), pnv_pci_dump_phb3_diag_data() 291 be64_to_cpu(data->dma1FirstErrorStatus), pnv_pci_dump_phb3_diag_data() 292 be64_to_cpu(data->dma1ErrorLog0), pnv_pci_dump_phb3_diag_data() 293 be64_to_cpu(data->dma1ErrorLog1)); pnv_pci_dump_phb3_diag_data() 296 if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && pnv_pci_dump_phb3_diag_data() 297 (be64_to_cpu(data->pestB[i]) >> 63) == 0) pnv_pci_dump_phb3_diag_data() 301 i, be64_to_cpu(data->pestA[i]), pnv_pci_dump_phb3_diag_data() 302 be64_to_cpu(data->pestB[i])); pnv_pci_dump_phb3_diag_data()
|
H A D | opal-memory-errors.c | 51 paddr_start = be64_to_cpu(merr_evt->u.resilience.physical_address_start); handle_memory_error_event() 52 paddr_end = be64_to_cpu(merr_evt->u.resilience.physical_address_end); handle_memory_error_event() 55 paddr_start = be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_start); handle_memory_error_event() 56 paddr_end = be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_end); handle_memory_error_event()
|
H A D | opal-power.c | 28 type = be64_to_cpu(power_msg->params[0]); opal_power_control_event()
|
H A D | opal-msglog.c | 53 conbuf = phys_to_virt(be64_to_cpu(mc->obuf_phys)); opal_msglog_read() 115 if (be64_to_cpu(mc->magic) != MEMCONS_MAGIC) { opal_msglog_init()
|
H A D | opal.c | 296 opal_do_notifier(be64_to_cpu(evt)); opal_notifier_enable() 406 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0) opal_get_chars() 411 return be64_to_cpu(len); opal_get_chars() 437 len = be64_to_cpu(olen); opal_put_chars() 456 len = be64_to_cpu(olen); opal_put_chars() 477 (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)); opal_put_chars() 576 opal_do_notifier(be64_to_cpu(evt)); opal_handle_hmi_exception() 619 opal_do_notifier(be64_to_cpu(events)); opal_interrupt() 660 bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0])); opal_export_symmap() 661 bin_attr_symbol_map.size = be64_to_cpu(syms[1]); opal_export_symmap() 937 uint64_t next = be64_to_cpu(sg->next); opal_free_sg_list()
|
H A D | opal-rtc.c | 61 h_m_s_ms = be64_to_cpu(__h_m_s_ms); opal_get_boot_time()
|
H A D | eeh-powernv.c | 1150 be64_to_cpu(data->gemXfir), pnv_eeh_dump_hub_diag_common() 1151 be64_to_cpu(data->gemRfir), pnv_eeh_dump_hub_diag_common() 1152 be64_to_cpu(data->gemRirqfir), pnv_eeh_dump_hub_diag_common() 1153 be64_to_cpu(data->gemMask), pnv_eeh_dump_hub_diag_common() 1154 be64_to_cpu(data->gemRwof)); pnv_eeh_dump_hub_diag_common() 1160 be64_to_cpu(data->lemFir), pnv_eeh_dump_hub_diag_common() 1161 be64_to_cpu(data->lemErrMask), pnv_eeh_dump_hub_diag_common() 1162 be64_to_cpu(data->lemAction0), pnv_eeh_dump_hub_diag_common() 1163 be64_to_cpu(data->lemAction1), pnv_eeh_dump_hub_diag_common() 1164 be64_to_cpu(data->lemWof)); pnv_eeh_dump_hub_diag_common() 1186 be64_to_cpu(data->rgc.rgcStatus), pnv_eeh_get_and_dump_hub_diag() 1187 be64_to_cpu(data->rgc.rgcLdcp)); pnv_eeh_get_and_dump_hub_diag() 1196 be64_to_cpu(data->bi.biLdcp0), pnv_eeh_get_and_dump_hub_diag() 1197 be64_to_cpu(data->bi.biLdcp1), pnv_eeh_get_and_dump_hub_diag() 1198 be64_to_cpu(data->bi.biLdcp2), pnv_eeh_get_and_dump_hub_diag() 1199 be64_to_cpu(data->bi.biFenceStatus)); pnv_eeh_get_and_dump_hub_diag() 1207 be64_to_cpu(data->ci.ciPortStatus), pnv_eeh_get_and_dump_hub_diag() 1208 be64_to_cpu(data->ci.ciPortLdcp)); pnv_eeh_get_and_dump_hub_diag() 1351 be16_to_cpu(severity), be64_to_cpu(frozen_pe_no), pnv_eeh_next_error() 1399 be64_to_cpu(frozen_pe_no), pe)) { pnv_eeh_next_error()
|
H A D | opal-hmi.c | 93 printk("%s HMER: %016llx\n", level, be64_to_cpu(hmi_evt->hmer)); print_hmi_event_info() 97 be64_to_cpu(hmi_evt->tfmr)); print_hmi_event_info()
|
H A D | opal-sensor.c | 58 ret = opal_error_code(be64_to_cpu(msg.params[1])); opal_get_sensor_data()
|
H A D | opal-elog.c | 256 elog_size = be64_to_cpu(size); elog_work_fn() 257 log_id = be64_to_cpu(id); elog_work_fn() 258 elog_type = be64_to_cpu(type); elog_work_fn()
|
H A D | opal-xscom.c | 106 *value = be64_to_cpu(v); opal_scom_read()
|
H A D | opal-sysparam.c | 68 ret = be64_to_cpu(msg.params[1]); opal_get_sys_param() 102 ret = be64_to_cpu(msg.params[1]); opal_set_sys_param()
|
H A D | opal-async.c | 137 token = be64_to_cpu(comp_msg->params[0]); opal_async_comp_event()
|
/linux-4.1.27/fs/ocfs2/dlm/ |
H A D | dlmast.c | 106 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_queue_ast() 107 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), __dlm_queue_ast() 114 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_queue_ast() 115 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); __dlm_queue_ast() 125 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_queue_ast() 126 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); __dlm_queue_ast() 172 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_queue_bast() 173 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); __dlm_queue_bast() 231 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_do_local_ast() 232 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); dlm_do_local_ast() 252 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_do_remote_ast() 253 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); dlm_do_remote_ast() 276 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_do_local_bast() 277 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlm_do_local_bast() 338 dlm_get_lock_cookie_node(be64_to_cpu(cookie)), dlm_proxy_ast_handler() 339 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), dlm_proxy_ast_handler() 349 dlm_get_lock_cookie_node(be64_to_cpu(cookie)), dlm_proxy_ast_handler() 350 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), dlm_proxy_ast_handler() 398 dlm_get_lock_cookie_node(be64_to_cpu(cookie)), 399 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), 414 dlm_get_lock_cookie_node(be64_to_cpu(cookie)), 415 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
|
H A D | dlmconvert.c | 285 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlmconvert_remote() 286 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlmconvert_remote() 297 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlmconvert_remote() 298 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlmconvert_remote() 506 dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)), dlm_convert_lock_handler() 507 dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie))); dlm_convert_lock_handler()
|
H A D | dlmthread.c | 360 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), dlm_shuffle_lists() 361 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), dlm_shuffle_lists() 422 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), dlm_shuffle_lists() 423 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), dlm_shuffle_lists() 533 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_flush_asts() 534 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlm_flush_asts() 592 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_flush_asts() 593 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlm_flush_asts()
|
H A D | dlmrecovery.c | 1118 u64 mig_cookie = be64_to_cpu(mres->mig_cookie); dlm_send_mig_lockres_msg() 1216 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_prepare_lvb_for_migration() 1217 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlm_prepare_lvb_for_migration() 1847 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1848 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1859 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1860 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1867 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1868 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1879 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1880 dlm_get_lock_cookie_seq(be64_to_cpu(c)), 1903 be64_to_cpu(ml->cookie), NULL); 1991 dlm_get_lock_cookie_node(be64_to_cpu(c)), list_for_each_entry() 1992 dlm_get_lock_cookie_seq(be64_to_cpu(c))); list_for_each_entry() 1997 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), list_for_each_entry() 1998 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), list_for_each_entry()
|
H A D | dlmunlock.c | 252 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlmunlock_common() 253 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dlmunlock_common() 513 dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)), 514 dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie)));
|
H A D | dlmdebug.c | 82 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), __dlm_print_lock() 83 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), __dlm_print_lock() 264 (unsigned int)be64_to_cpu(inode_blkno_be)); stringify_lockname() 510 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dump_lock() 511 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), dump_lock()
|
H A D | dlmlock.c | 495 be64_to_cpu(create->cookie), NULL); dlm_create_lock_handler()
|
/linux-4.1.27/fs/omfs/ |
H A D | file.c | 70 next = be64_to_cpu(oe->e_next); omfs_shrink_inode() 76 start = be64_to_cpu(entry->e_cluster); omfs_shrink_inode() 77 count = be64_to_cpu(entry->e_blocks); omfs_shrink_inode() 145 new_block = be64_to_cpu(entry->e_cluster) + omfs_grow_extent() 146 be64_to_cpu(entry->e_blocks); omfs_grow_extent() 151 be64_to_cpu(~terminator->e_blocks) + 1)); omfs_grow_extent() 176 be64_to_cpu(~terminator->e_blocks) + (u64) new_count)); omfs_grow_extent() 198 be64_to_cpu(ent->e_blocks)); find_block() 208 be64_to_cpu(ent->e_cluster)) + find_block() 247 next = be64_to_cpu(oe->e_next); omfs_get_block()
|
H A D | dir.c | 60 block = be64_to_cpu(oi->i_sibling); omfs_scan_list() 78 block = be64_to_cpu(*((__be64 *) &bh->b_data[ofs])); omfs_find_entry() 128 block = be64_to_cpu(*entry); omfs_add_link() 175 block = be64_to_cpu(*entry); omfs_delete_entry() 305 ino_t ino = be64_to_cpu(oi->i_head.h_self); omfs_lookup() 320 u64 ino = be64_to_cpu(header->h_self); omfs_is_bad() 350 fsblock = be64_to_cpu(oi->i_sibling); omfs_fill_chain() 434 __u64 fsblock = be64_to_cpu(*p++); omfs_readdir()
|
H A D | inode.c | 222 if (ino != be64_to_cpu(oi->i_head.h_self)) omfs_iget() 228 ctime = be64_to_cpu(oi->i_ctime); omfs_iget() 251 inode->i_size = be64_to_cpu(oi->i_size); omfs_iget() 469 sbi->s_num_blocks = be64_to_cpu(omfs_sb->s_num_blocks); omfs_fill_super() 472 sbi->s_root_ino = be64_to_cpu(omfs_sb->s_root_block); omfs_fill_super() 508 bh2 = omfs_bread(sb, be64_to_cpu(omfs_sb->s_root_block)); omfs_fill_super() 514 sbi->s_bitmap_ino = be64_to_cpu(omfs_rb->r_bitmap); omfs_fill_super() 517 if (sbi->s_num_blocks != be64_to_cpu(omfs_rb->r_num_blocks)) { omfs_fill_super() 521 (unsigned long long)be64_to_cpu(omfs_rb->r_num_blocks)); omfs_fill_super() 546 root = omfs_iget(sb, be64_to_cpu(omfs_rb->r_root_dir)); omfs_fill_super()
|
/linux-4.1.27/arch/powerpc/kvm/ |
H A D | book3s_hv_rm_mmu.c | 258 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 && kvmppc_do_h_enter() 276 pte = be64_to_cpu(hpte[0]); kvmppc_do_h_enter() 295 pte = be64_to_cpu(hpte[0]); kvmppc_do_h_enter() 413 pte = be64_to_cpu(hpte[0]); kvmppc_do_h_remove() 425 rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index); kvmppc_do_h_remove() 437 be64_to_cpu(hpte[1])); kvmppc_do_h_remove() 499 hp0 = be64_to_cpu(hp[0]); kvmppc_h_bulk_remove() 535 tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]), kvmppc_h_bulk_remove() 536 be64_to_cpu(hp[1]), pte_index); kvmppc_h_bulk_remove() 556 be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); kvmppc_h_bulk_remove() 582 pte = be64_to_cpu(hpte[0]); kvmppc_h_protect() 611 pte = be64_to_cpu(hpte[1]); kvmppc_h_protect() 648 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; kvmppc_h_read() 649 r = be64_to_cpu(hpte[1]); kvmppc_h_read() 670 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]), kvmppc_invalidate_hpte() 682 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]), kvmppc_clear_ref_hpte() 684 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8; kvmppc_clear_ref_hpte() 746 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; kvmppc_hv_find_lock_hpte() 755 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; kvmppc_hv_find_lock_hpte() 756 r = be64_to_cpu(hpte[i+1]); kvmppc_hv_find_lock_hpte() 812 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; kvmppc_hpte_hv_fault() 813 r = be64_to_cpu(hpte[1]); kvmppc_hpte_hv_fault()
|
H A D | book3s_64_mmu_hv.c | 339 v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; kvmppc_mmu_book3s_64_hv_xlate() 468 hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; kvmppc_book3s_hv_page_fault() 469 hpte[1] = be64_to_cpu(hptep[1]); kvmppc_book3s_hv_page_fault() 585 if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] || kvmppc_book3s_hv_page_fault() 586 be64_to_cpu(hptep[1]) != hpte[1] || kvmppc_book3s_hv_page_fault() 607 if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) { kvmppc_book3s_hv_page_fault() 613 r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); kvmppc_book3s_hv_page_fault() 641 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); kvmppc_book3s_hv_page_fault() 737 while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) kvm_unmap_rmapp() 756 psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel); kvm_unmap_rmapp() 757 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && kvm_unmap_rmapp() 762 rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); kvm_unmap_rmapp() 770 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); kvm_unmap_rmapp() 835 if (!(be64_to_cpu(hptep[1]) & HPTE_R_R)) kvm_age_rmapp() 841 while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) kvm_age_rmapp() 847 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && kvm_age_rmapp() 848 (be64_to_cpu(hptep[1]) & HPTE_R_R)) { kvm_age_rmapp() 856 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); kvm_age_rmapp() 888 if (be64_to_cpu(hp[1]) & HPTE_R_R) kvm_test_age_rmapp() 958 hptep1 = be64_to_cpu(hptep[1]); kvm_test_clear_dirty_npages() 973 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); kvm_test_clear_dirty_npages() 980 v = be64_to_cpu(hptep[0]); kvm_test_clear_dirty_npages() 981 r = be64_to_cpu(hptep[1]); kvm_test_clear_dirty_npages() 1151 if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) && hpte_dirty() 1152 (be64_to_cpu(hptp[1]) & rcbits_unset)) hpte_dirty() 1173 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) { record_hpte() 1176 !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED)) record_hpte() 1188 v = be64_to_cpu(hptp[0]); record_hpte() 1196 if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) { record_hpte() 1197 revp->guest_rpte |= (be64_to_cpu(hptp[1]) & record_hpte() 1216 unlock_hpte(hptp, be64_to_cpu(hptp[0])); record_hpte() 1384 v = be64_to_cpu(hpte_v); kvm_htab_write() 1385 r = be64_to_cpu(hpte_r); kvm_htab_write() 1392 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) kvm_htab_write() 1418 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) kvm_htab_write() 1563 if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))) debugfs_htab_read() 1570 v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK; debugfs_htab_read() 1571 hr = be64_to_cpu(hptp[1]); debugfs_htab_read()
|
H A D | book3s_pr_papr.c | 61 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0) kvmppc_h_pr_enter() 97 pte[0] = be64_to_cpu((__force __be64)pte[0]); kvmppc_h_pr_remove() 98 pte[1] = be64_to_cpu((__force __be64)pte[1]); kvmppc_h_pr_remove() 175 pte[0] = be64_to_cpu((__force __be64)pte[0]); kvmppc_h_pr_bulk_remove() 176 pte[1] = be64_to_cpu((__force __be64)pte[1]); kvmppc_h_pr_bulk_remove() 215 pte[0] = be64_to_cpu((__force __be64)pte[0]); kvmppc_h_pr_protect() 216 pte[1] = be64_to_cpu((__force __be64)pte[1]); kvmppc_h_pr_protect()
|
H A D | book3s_hv_ras.c | 54 unsigned long rb = be64_to_cpu(slb->save_area[i].esid); reload_slb() 55 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); reload_slb()
|
H A D | book3s_64_mmu.c | 278 u64 pte0 = be64_to_cpu(pteg[i]); kvmppc_mmu_book3s_64_xlate() 279 u64 pte1 = be64_to_cpu(pteg[i + 1]); kvmppc_mmu_book3s_64_xlate() 303 v = be64_to_cpu(pteg[i]); kvmppc_mmu_book3s_64_xlate() 304 r = be64_to_cpu(pteg[i+1]); kvmppc_mmu_book3s_64_xlate()
|
H A D | book3s.c | 855 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf)); kvmppc_h_logical_ci_load()
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | fadump.c | 275 fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len); fadump_reserve_mem() 317 be64_to_cpu(fdm_active->rmr_region.destination_address) + fadump_reserve_mem() 318 be64_to_cpu(fdm_active->rmr_region.source_len); fadump_reserve_mem() 475 while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) { fadump_read_registers() 476 fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id), fadump_read_registers() 477 be64_to_cpu(reg_entry->reg_value)); fadump_read_registers() 606 addr = be64_to_cpu(fdm->cpu_state_data.destination_address); fadump_build_cpu_notes() 610 if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) { fadump_build_cpu_notes() 615 pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number)); fadump_build_cpu_notes() 642 if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) { fadump_build_cpu_notes() 648 cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK; fadump_build_cpu_notes() 831 return be64_to_cpu(fdm.rmr_region.destination_address) + paddr; fadump_relocate() 905 phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address); fadump_create_elfcore_headers() 954 addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len); register_fadump() 1026 be64_to_cpu(fdm_active->cpu_state_data.destination_address)); fadump_cleanup() 1066 destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address); fadump_invalidate_release_mem() 1186 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address), fadump_region_show() 1187 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) + fadump_region_show() 1188 be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1, fadump_region_show() 1189 be64_to_cpu(fdm_ptr->cpu_state_data.source_len), fadump_region_show() 1190 be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped)); fadump_region_show() 1194 be64_to_cpu(fdm_ptr->hpte_region.destination_address), fadump_region_show() 1195 be64_to_cpu(fdm_ptr->hpte_region.destination_address) + fadump_region_show() 1196 be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1, fadump_region_show() 1197 be64_to_cpu(fdm_ptr->hpte_region.source_len), fadump_region_show() 1198 be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped)); fadump_region_show() 1202 be64_to_cpu(fdm_ptr->rmr_region.destination_address), fadump_region_show() 1203 be64_to_cpu(fdm_ptr->rmr_region.destination_address) + fadump_region_show() 1204 be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1, fadump_region_show() 1205 be64_to_cpu(fdm_ptr->rmr_region.source_len), fadump_region_show() 1206 be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped)); fadump_region_show() 1210 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address))) fadump_region_show() 1218 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1, fadump_region_show() 1219 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - fadump_region_show() 1221 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - fadump_region_show()
|
H A D | mce_power.c | 107 unsigned long rb = be64_to_cpu(slb->save_area[i].esid); flush_and_reload_slb() 108 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); flush_and_reload_slb()
|
H A D | time.c | 215 if (i == be64_to_cpu(vpa->dtl_idx)) scan_dispatch_log() 217 while (i < be64_to_cpu(vpa->dtl_idx)) { scan_dispatch_log() 218 dtb = be64_to_cpu(dtl->timebase); scan_dispatch_log() 222 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { scan_dispatch_log() 224 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; scan_dispatch_log() 274 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) { calculate_stolen_time()
|
H A D | prom_init.c | 1293 size = be64_to_cpu(val64); prom_instantiate_opal() 1298 align = be64_to_cpu(val64); prom_instantiate_opal() 2280 be64_to_cpu(mem_reserve_map[i].base), flatten_device_tree() 2281 be64_to_cpu(mem_reserve_map[i].size)); flatten_device_tree()
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_dquot.c | 142 ASSERT(be64_to_cpu(d->d_blk_softlimit) <= xfs_qm_adjust_dqtimers() 143 be64_to_cpu(d->d_blk_hardlimit)); xfs_qm_adjust_dqtimers() 145 ASSERT(be64_to_cpu(d->d_ino_softlimit) <= xfs_qm_adjust_dqtimers() 146 be64_to_cpu(d->d_ino_hardlimit)); xfs_qm_adjust_dqtimers() 148 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= xfs_qm_adjust_dqtimers() 149 be64_to_cpu(d->d_rtb_hardlimit)); xfs_qm_adjust_dqtimers() 154 (be64_to_cpu(d->d_bcount) > xfs_qm_adjust_dqtimers() 155 be64_to_cpu(d->d_blk_softlimit))) || xfs_qm_adjust_dqtimers() 157 (be64_to_cpu(d->d_bcount) > xfs_qm_adjust_dqtimers() 158 be64_to_cpu(d->d_blk_hardlimit)))) { xfs_qm_adjust_dqtimers() 166 (be64_to_cpu(d->d_bcount) <= xfs_qm_adjust_dqtimers() 167 be64_to_cpu(d->d_blk_softlimit))) && xfs_qm_adjust_dqtimers() 169 (be64_to_cpu(d->d_bcount) <= xfs_qm_adjust_dqtimers() 170 be64_to_cpu(d->d_blk_hardlimit)))) { xfs_qm_adjust_dqtimers() 177 (be64_to_cpu(d->d_icount) > xfs_qm_adjust_dqtimers() 178 be64_to_cpu(d->d_ino_softlimit))) || xfs_qm_adjust_dqtimers() 180 (be64_to_cpu(d->d_icount) > xfs_qm_adjust_dqtimers() 181 be64_to_cpu(d->d_ino_hardlimit)))) { xfs_qm_adjust_dqtimers() 189 (be64_to_cpu(d->d_icount) <= xfs_qm_adjust_dqtimers() 190 be64_to_cpu(d->d_ino_softlimit))) && xfs_qm_adjust_dqtimers() 192 (be64_to_cpu(d->d_icount) <= xfs_qm_adjust_dqtimers() 193 be64_to_cpu(d->d_ino_hardlimit)))) { xfs_qm_adjust_dqtimers() 200 (be64_to_cpu(d->d_rtbcount) > xfs_qm_adjust_dqtimers() 201 be64_to_cpu(d->d_rtb_softlimit))) || xfs_qm_adjust_dqtimers() 203 (be64_to_cpu(d->d_rtbcount) > xfs_qm_adjust_dqtimers() 204 be64_to_cpu(d->d_rtb_hardlimit)))) { xfs_qm_adjust_dqtimers() 212 (be64_to_cpu(d->d_rtbcount) <= xfs_qm_adjust_dqtimers() 213 be64_to_cpu(d->d_rtb_softlimit))) && xfs_qm_adjust_dqtimers() 215 (be64_to_cpu(d->d_rtbcount) <= xfs_qm_adjust_dqtimers() 216 be64_to_cpu(d->d_rtb_hardlimit)))) { xfs_qm_adjust_dqtimers() 277 dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit); xfs_dquot_set_prealloc_limits() 278 dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit); xfs_dquot_set_prealloc_limits() 647 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); xfs_qm_dqread() 648 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); xfs_qm_dqread() 649 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); xfs_qm_dqread()
|
H A D | xfs_qm_bhv.c | 39 be64_to_cpu(dqp->q_core.d_blk_softlimit) : xfs_fill_statvfs_from_dquot() 40 be64_to_cpu(dqp->q_core.d_blk_hardlimit); xfs_fill_statvfs_from_dquot() 49 be64_to_cpu(dqp->q_core.d_ino_softlimit) : xfs_fill_statvfs_from_dquot() 50 be64_to_cpu(dqp->q_core.d_ino_hardlimit); xfs_fill_statvfs_from_dquot()
|
H A D | xfs_trans_dquot.c | 386 ASSERT(be64_to_cpu(d->d_bcount) >= xfs_trans_apply_dquot_deltas() 390 ASSERT(be64_to_cpu(d->d_rtbcount) >= xfs_trans_apply_dquot_deltas() 394 ASSERT(be64_to_cpu(d->d_icount) >= xfs_trans_apply_dquot_deltas() 488 be64_to_cpu(dqp->q_core.d_bcount)); xfs_trans_apply_dquot_deltas() 490 be64_to_cpu(dqp->q_core.d_icount)); xfs_trans_apply_dquot_deltas() 492 be64_to_cpu(dqp->q_core.d_rtbcount)); xfs_trans_apply_dquot_deltas() 607 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); xfs_trans_dqresv() 610 softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); xfs_trans_dqresv() 619 hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); xfs_trans_dqresv() 622 softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); xfs_trans_dqresv() 659 total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; xfs_trans_dqresv() 663 hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); xfs_trans_dqresv() 666 softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); xfs_trans_dqresv() 713 ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); xfs_trans_dqresv() 714 ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); xfs_trans_dqresv() 715 ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); xfs_trans_dqresv()
|
H A D | xfs_qm_syscalls.c | 453 be64_to_cpu(ddq->d_blk_hardlimit); xfs_qm_scall_setqlim() 456 be64_to_cpu(ddq->d_blk_softlimit); xfs_qm_scall_setqlim() 470 be64_to_cpu(ddq->d_rtb_hardlimit); xfs_qm_scall_setqlim() 473 be64_to_cpu(ddq->d_rtb_softlimit); xfs_qm_scall_setqlim() 487 be64_to_cpu(ddq->d_ino_hardlimit); xfs_qm_scall_setqlim() 490 be64_to_cpu(ddq->d_ino_softlimit); xfs_qm_scall_setqlim() 667 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); xfs_qm_scall_getquota() 669 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); xfs_qm_scall_getquota() 670 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); xfs_qm_scall_getquota() 671 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); xfs_qm_scall_getquota() 679 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); xfs_qm_scall_getquota() 681 XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); xfs_qm_scall_getquota()
|
H A D | xfs_dquot.h | 150 freesp = be64_to_cpu(dqp->q_core.d_blk_hardlimit) - dqp->q_res_bcount; xfs_dquot_lowsp()
|
H A D | xfs_qm.c | 642 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); xfs_qm_init_quotainfo() 643 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); xfs_qm_init_quotainfo() 644 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); xfs_qm_init_quotainfo() 645 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); xfs_qm_init_quotainfo() 646 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); xfs_qm_init_quotainfo() 647 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); xfs_qm_init_quotainfo()
|
H A D | xfs_log_recover.c | 967 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); xlog_find_tail() 984 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); xlog_find_tail() 985 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); xlog_find_tail() 1855 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn); xlog_recover_get_buf_lsn() 1863 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); xlog_recover_get_buf_lsn() 1868 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); xlog_recover_get_buf_lsn() 1872 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); xlog_recover_get_buf_lsn() 1876 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); xlog_recover_get_buf_lsn() 1880 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); xlog_recover_get_buf_lsn() 1886 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); xlog_recover_get_buf_lsn() 1899 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); xlog_recover_get_buf_lsn() 1917 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); xlog_recover_get_buf_lsn() 2588 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn); xlog_recover_inode_pass2() 2901 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn); xlog_recover_dquot_pass2() 3606 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
|
H A D | xfs_trace.h | 839 __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); 840 __entry->icount = be64_to_cpu(dqp->q_core.d_icount); 842 be64_to_cpu(dqp->q_core.d_blk_hardlimit); 844 be64_to_cpu(dqp->q_core.d_blk_softlimit); 846 be64_to_cpu(dqp->q_core.d_ino_hardlimit); 848 be64_to_cpu(dqp->q_core.d_ino_softlimit);
|
H A D | xfs_dir2_readdir.c | 236 be64_to_cpu(dep->inumber), xfs_dir2_block_getdents() 618 be64_to_cpu(dep->inumber), xfs_dir2_leaf_getdents()
|
H A D | xfs_bmap_util.c | 344 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); xfs_bmap_count_tree() 353 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib); xfs_bmap_count_tree() 359 bno = be64_to_cpu(*pp); xfs_bmap_count_tree() 371 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); xfs_bmap_count_tree() 424 bno = be64_to_cpu(*pp); xfs_bmap_count_blocks()
|
H A D | xfs_log.c | 1784 XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); xlog_sync() 2358 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); xlog_write() 2571 lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); xlog_get_lowest_lsn() 2675 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { xlog_state_do_callback() 2702 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); xlog_state_do_callback() 2705 be64_to_cpu(iclog->ic_header.h_lsn)); xlog_state_do_callback() 3237 lsn = be64_to_cpu(iclog->ic_header.h_lsn); _xfs_log_force() 3247 if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && _xfs_log_force() 3360 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { _xfs_log_force_lsn()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
H A D | ehca_mcast.c | 82 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); ehca_attach_mcast() 83 interface_id = be64_to_cpu(my_gid.global.interface_id); ehca_attach_mcast() 119 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); ehca_detach_mcast() 120 interface_id = be64_to_cpu(my_gid.global.interface_id); ehca_detach_mcast()
|
H A D | ipz_pt_fn.h | 280 return be64_to_cpu(qpt->qpts[0]); ipz_qpt_get_firstpage()
|
/linux-4.1.27/crypto/ |
H A D | gf128mul.c | 127 u64 a = be64_to_cpu(x->a); gf128mul_x_lle() 128 u64 b = be64_to_cpu(x->b); gf128mul_x_lle() 137 u64 a = be64_to_cpu(x->a); gf128mul_x_bbe() 138 u64 b = be64_to_cpu(x->b); gf128mul_x_bbe() 158 u64 a = be64_to_cpu(x->a); gf128mul_x8_lle() 159 u64 b = be64_to_cpu(x->b); gf128mul_x8_lle() 168 u64 a = be64_to_cpu(x->a); gf128mul_x8_bbe() 169 u64 b = be64_to_cpu(x->b); gf128mul_x8_bbe()
|
H A D | cmac.c | 76 _const[0] = be64_to_cpu(consts[1]); crypto_cmac_digest_setkey() 77 _const[1] = be64_to_cpu(consts[0]); crypto_cmac_digest_setkey() 92 _const[0] = be64_to_cpu(consts[0]); crypto_cmac_digest_setkey()
|
H A D | khazad.c | 810 state = be64_to_cpu(*src) ^ roundKey[0]; khazad_crypt()
|
H A D | wp512.c | 791 block[i] = be64_to_cpu(buffer[i]); wp512_process_buffer()
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
H A D | hvconsole.c | 48 lbuf[0] = be64_to_cpu(retbuf[1]); hvc_get_chars() 49 lbuf[1] = be64_to_cpu(retbuf[2]); hvc_get_chars()
|
H A D | hotplug-memory.c | 112 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); dlpar_clone_drconf_property() 185 base = be64_to_cpu(*(unsigned long *)regs); pseries_remove_mem_node() 645 base = be64_to_cpu(*(unsigned long *)regs); pseries_add_mem_node() 690 be64_to_cpu(old_drmem[i].base_addr), pseries_update_drconf_memory() 697 rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr), pseries_update_drconf_memory()
|
H A D | hvcserver.c | 166 last_p_partition_ID = be64_to_cpu(pi_buff[0]); hvcs_get_partner_info() 167 last_p_unit_address = be64_to_cpu(pi_buff[1]); hvcs_get_partner_info()
|
H A D | iommu.c | 133 return be64_to_cpu(*tcep); tce_get_pseries() 379 dma_offset = next + be64_to_cpu(maprange->dma_base); tce_clearrange_multi_pSeriesLP() 436 dma_offset = next + be64_to_cpu(maprange->dma_base); tce_setrange_multi_pSeriesLP() 783 dma_addr = be64_to_cpu(direct64->dma_base); find_existing_ddw() 1046 dma_addr = be64_to_cpu(ddwprop->dma_base); enable_ddw()
|
H A D | lparcfg.c | 396 cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults); for_each_possible_cpu() 397 cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time); for_each_possible_cpu()
|
H A D | dtl.c | 89 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) consume_dtle()
|
/linux-4.1.27/net/netfilter/ |
H A D | nft_counter.c | 73 priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); nft_counter_init() 75 priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); nft_counter_init()
|
H A D | nft_limit.c | 66 priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE])); nft_limit_init() 67 priv->unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT])); nft_limit_init()
|
H A D | nfnetlink_acct.c | 112 *quota = be64_to_cpu(nla_get_be64(tb[NFACCT_QUOTA])); nfnl_acct_new() 120 be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES]))); nfnl_acct_new() 124 be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS]))); nfnl_acct_new()
|
H A D | nft_dynset.c | 142 timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT])); nft_dynset_init()
|
H A D | nf_tables_api.c | 1139 stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); nft_stats_alloc() 1140 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); nft_stats_alloc() 1211 handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE])); nf_tables_newchain() 1699 return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla))); nf_tables_rule_lookup() 1974 handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE])); nf_tables_newrule() 1998 pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); nf_tables_newrule() 2698 timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT])); nf_tables_newset() 3353 timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT])); nft_add_set_elem()
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_dquot_buf.c | 113 be64_to_cpu(ddq->d_bcount) > xfs_dqcheck() 114 be64_to_cpu(ddq->d_blk_softlimit)) { xfs_dqcheck() 124 be64_to_cpu(ddq->d_icount) > xfs_dqcheck() 125 be64_to_cpu(ddq->d_ino_softlimit)) { xfs_dqcheck() 135 be64_to_cpu(ddq->d_rtbcount) > xfs_dqcheck() 136 be64_to_cpu(ddq->d_rtb_softlimit)) { xfs_dqcheck()
|
H A D | xfs_inode_buf.c | 220 to->di_size = be64_to_cpu(from->di_size); xfs_dinode_from_disk() 221 to->di_nblocks = be64_to_cpu(from->di_nblocks); xfs_dinode_from_disk() 233 to->di_changecount = be64_to_cpu(from->di_changecount); xfs_dinode_from_disk() 236 to->di_flags2 = be64_to_cpu(from->di_flags2); xfs_dinode_from_disk() 237 to->di_ino = be64_to_cpu(from->di_ino); xfs_dinode_from_disk() 238 to->di_lsn = be64_to_cpu(from->di_lsn); xfs_dinode_from_disk() 311 if (be64_to_cpu(dip->di_ino) != ip->i_ino) xfs_dinode_verify() 426 ip->i_d.di_ino = be64_to_cpu(dip->di_ino); xfs_iread()
|
H A D | xfs_bmap_btree.c | 181 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21)); xfs_bmbt_disk_get_blockcount() 191 return ((xfs_fileoff_t)be64_to_cpu(r->l0) & xfs_bmbt_disk_get_startoff() 450 args.fsbno = be64_to_cpu(start->l); xfs_bmbt_alloc_block() 609 xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff), xfs_bmbt_init_rec_from_key() 634 return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) - xfs_bmbt_key_diff() 652 if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn) xfs_bmbt_verify() 658 if (be64_to_cpu(block->bb_u.l.bb_owner) == 0) xfs_bmbt_verify() 683 !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))) xfs_bmbt_verify() 687 !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))) xfs_bmbt_verify() 734 return be64_to_cpu(k1->bmbt.br_startoff) < xfs_bmbt_keys_inorder() 735 be64_to_cpu(k2->bmbt.br_startoff); xfs_bmbt_keys_inorder()
|
H A D | xfs_sb.c | 326 to->sb_dblocks = be64_to_cpu(from->sb_dblocks); __xfs_sb_from_disk() 327 to->sb_rblocks = be64_to_cpu(from->sb_rblocks); __xfs_sb_from_disk() 328 to->sb_rextents = be64_to_cpu(from->sb_rextents); __xfs_sb_from_disk() 330 to->sb_logstart = be64_to_cpu(from->sb_logstart); __xfs_sb_from_disk() 331 to->sb_rootino = be64_to_cpu(from->sb_rootino); __xfs_sb_from_disk() 332 to->sb_rbmino = be64_to_cpu(from->sb_rbmino); __xfs_sb_from_disk() 333 to->sb_rsumino = be64_to_cpu(from->sb_rsumino); __xfs_sb_from_disk() 352 to->sb_icount = be64_to_cpu(from->sb_icount); __xfs_sb_from_disk() 353 to->sb_ifree = be64_to_cpu(from->sb_ifree); __xfs_sb_from_disk() 354 to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks); __xfs_sb_from_disk() 355 to->sb_frextents = be64_to_cpu(from->sb_frextents); __xfs_sb_from_disk() 356 to->sb_uquotino = be64_to_cpu(from->sb_uquotino); __xfs_sb_from_disk() 357 to->sb_gquotino = be64_to_cpu(from->sb_gquotino); __xfs_sb_from_disk() 378 to->sb_pquotino = be64_to_cpu(from->sb_pquotino); __xfs_sb_from_disk() 379 to->sb_lsn = be64_to_cpu(from->sb_lsn); __xfs_sb_from_disk()
|
H A D | xfs_symlink_remote.c | 92 if (ino != be64_to_cpu(dsl->sl_owner)) xfs_symlink_hdr_ok() 112 if (bp->b_bn != be64_to_cpu(dsl->sl_blkno)) xfs_symlink_verify()
|
H A D | xfs_dir2_sf.c | 118 i8count += be64_to_cpu(dep->inumber) > XFS_DIR2_MAX_SHORT_INUM; xfs_dir2_block_sfsize() 125 parent = be64_to_cpu(dep->inumber); xfs_dir2_block_sfsize() 218 ASSERT(be64_to_cpu(dep->inumber) == dp->i_ino); xfs_dir2_block_to_sf() 224 ASSERT(be64_to_cpu(dep->inumber) == xfs_dir2_block_to_sf() 236 be64_to_cpu(dep->inumber)); xfs_dir2_block_to_sf()
|
H A D | xfs_attr_remote.c | 77 if (bno != be64_to_cpu(rmt->rm_blkno)) xfs_attr3_rmt_hdr_ok() 83 if (ino != be64_to_cpu(rmt->rm_owner)) xfs_attr3_rmt_hdr_ok() 105 if (be64_to_cpu(rmt->rm_blkno) != bno) xfs_attr3_rmt_verify()
|
H A D | xfs_dir2_block.c | 72 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_block_verify() 639 args->inumber = be64_to_cpu(dep->inumber); xfs_dir2_block_lookup() 866 ASSERT(be64_to_cpu(dep->inumber) != args->inumber); xfs_dir2_block_replace()
|
H A D | xfs_bmap.c | 297 ASSERT(be64_to_cpu(prevp->br_startoff) < xfs_check_block() 298 be64_to_cpu(keyp->br_startoff)); xfs_check_block() 318 (unsigned long long)be64_to_cpu(*thispa)); xfs_check_block() 366 bno = be64_to_cpu(*pp); xfs_bmap_check_leaf_extents() 399 bno = be64_to_cpu(*pp); xfs_bmap_check_leaf_extents() 427 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); xfs_bmap_check_leaf_extents() 686 cbno = be64_to_cpu(*pp); xfs_bmap_btree_to_extents() 1274 bno = be64_to_cpu(*pp); xfs_bmap_read_extents() 1291 bno = be64_to_cpu(*pp); xfs_bmap_read_extents() 1323 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); xfs_bmap_read_extents() 1334 trp->l0 = be64_to_cpu(frp->l0); xfs_bmap_read_extents() 1335 trp->l1 = be64_to_cpu(frp->l1); xfs_bmap_read_extents()
|
H A D | xfs_dir2_leaf.c | 70 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leaf1_check() 165 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leaf_verify() 1190 args->inumber = be64_to_cpu(dep->inumber); xfs_dir2_leaf_lookup() 1516 ASSERT(args->inumber != be64_to_cpu(dep->inumber)); xfs_dir2_leaf_replace()
|
H A D | xfs_dir2_data.c | 165 !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber))); __xfs_dir3_data_check() 225 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_data_verify()
|
H A D | xfs_inode_fork.c | 91 be64_to_cpu(dip->di_nblocks))) { xfs_iformat_fork() 98 be64_to_cpu(dip->di_nblocks)); xfs_iformat_fork() 155 di_size = be64_to_cpu(dip->di_size); xfs_iformat_fork()
|
H A D | xfs_dir2_node.c | 73 if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn) xfs_dir3_leafn_check() 98 if (be64_to_cpu(hdr3->blkno) != bp->b_bn) xfs_dir3_free_verify() 772 args->inumber = be64_to_cpu(dep->inumber); xfs_dir2_leafn_lookup_for_entry() 2189 ASSERT(inum != be64_to_cpu(dep->inumber)); xfs_dir2_node_replace()
|
H A D | xfs_btree.c | 81 be64_to_cpu(block->bb_u.l.bb_leftsib))) && xfs_btree_check_lblock() 85 be64_to_cpu(block->bb_u.l.bb_rightsib))); xfs_btree_check_lblock() 210 be64_to_cpu((&ptr->l)[index]), level); xfs_btree_check_ptr() 772 xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); xfs_btree_readahead_lblock() 773 xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); xfs_btree_readahead_lblock() 855 return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); xfs_btree_ptr_to_daddr()
|
/linux-4.1.27/drivers/scsi/csiostor/ |
H A D | csio_attr.c | 328 fhs->tx_frames += (be64_to_cpu(fcoe_port_stats.tx_bcast_frames) + csio_get_stats() 329 be64_to_cpu(fcoe_port_stats.tx_mcast_frames) + csio_get_stats() 330 be64_to_cpu(fcoe_port_stats.tx_ucast_frames) + csio_get_stats() 331 be64_to_cpu(fcoe_port_stats.tx_offload_frames)); csio_get_stats() 332 fhs->tx_words += (be64_to_cpu(fcoe_port_stats.tx_bcast_bytes) + csio_get_stats() 333 be64_to_cpu(fcoe_port_stats.tx_mcast_bytes) + csio_get_stats() 334 be64_to_cpu(fcoe_port_stats.tx_ucast_bytes) + csio_get_stats() 335 be64_to_cpu(fcoe_port_stats.tx_offload_bytes)) / csio_get_stats() 337 fhs->rx_frames += (be64_to_cpu(fcoe_port_stats.rx_bcast_frames) + csio_get_stats() 338 be64_to_cpu(fcoe_port_stats.rx_mcast_frames) + csio_get_stats() 339 be64_to_cpu(fcoe_port_stats.rx_ucast_frames)); csio_get_stats() 340 fhs->rx_words += (be64_to_cpu(fcoe_port_stats.rx_bcast_bytes) + csio_get_stats() 341 be64_to_cpu(fcoe_port_stats.rx_mcast_bytes) + csio_get_stats() 342 be64_to_cpu(fcoe_port_stats.rx_ucast_bytes)) / csio_get_stats() 344 fhs->error_frames += be64_to_cpu(fcoe_port_stats.rx_err_frames); csio_get_stats()
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | hash_native_64.c | 209 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { native_hpte_insert() 212 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) native_hpte_insert() 259 hpte_v = be64_to_cpu(hptep->v); native_hpte_remove() 264 hpte_v = be64_to_cpu(hptep->v); native_hpte_remove() 297 hpte_v = be64_to_cpu(hptep->v); native_hpte_updatepp() 311 hpte_v = be64_to_cpu(hptep->v); native_hpte_updatepp() 318 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & native_hpte_updatepp() 352 hpte_v = be64_to_cpu(hptep->v); native_hpte_find() 387 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & native_hpte_updateboltedpp() 411 hpte_v = be64_to_cpu(hptep->v); native_hpte_invalidate() 468 hpte_v = be64_to_cpu(hptep->v); native_hugepage_invalidate() 520 unsigned long hpte_v = be64_to_cpu(hpte->v); hpte_decode() 521 unsigned long hpte_r = be64_to_cpu(hpte->r); hpte_decode() 614 hpte_v = be64_to_cpu(hptep->v); native_hpte_clear() 666 hpte_v = be64_to_cpu(hptep->v); pte_iterate_hashed_subpages()
|
H A D | slb.c | 115 be64_to_cpu(get_slb_shadow()->save_area[2].vsid); __slb_flush_and_rebolt()
|
H A D | hash_utils_64.c | 431 phys_addr = be64_to_cpu(addr_prop[0]); htab_dt_scan_hugepage_blocks() 432 block_size = be64_to_cpu(addr_prop[1]); htab_dt_scan_hugepage_blocks()
|
/linux-4.1.27/fs/gfs2/ |
H A D | glops.c | 323 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) gfs2_dinode_in() 325 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); gfs2_dinode_in() 339 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); gfs2_dinode_in() 340 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); gfs2_dinode_in() 341 atime.tv_sec = be64_to_cpu(str->di_atime); gfs2_dinode_in() 345 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); gfs2_dinode_in() 347 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); gfs2_dinode_in() 350 ip->i_goal = be64_to_cpu(str->di_goal_meta); gfs2_dinode_in() 351 ip->i_generation = be64_to_cpu(str->di_generation); gfs2_dinode_in() 354 ip->i_eattr = be64_to_cpu(str->di_eattr); gfs2_dinode_in()
|
H A D | quota.c | 633 x = be64_to_cpu(qc->qc_change) + change; do_qc() 1019 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= need_sync() 1020 (s64)be64_to_cpu(qd->qd_qb.qb_limit)) need_sync() 1025 value += (s64)be64_to_cpu(qd->qd_qb.qb_value); need_sync() 1026 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) need_sync() 1137 warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); gfs2_quota_check() 1138 limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); gfs2_quota_check() 1139 value = (s64)be64_to_cpu(qd->qd_qb.qb_value); gfs2_quota_check() 1307 s64 qc_change = be64_to_cpu(qc->qc_change); gfs2_quota_init() 1554 fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift; gfs2_get_dqblk() 1555 fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift; gfs2_get_dqblk() 1556 fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift; gfs2_get_dqblk() 1613 ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn))) gfs2_set_dqblk() 1617 ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit))) gfs2_set_dqblk() 1621 ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value))) gfs2_set_dqblk()
|
H A D | dir.c | 747 *leaf_out = be64_to_cpu(*(hash + index)); get_leaf_nr() 794 ln = be64_to_cpu(leaf->lf_next); gfs2_dirent_search() 1266 be64_to_cpu(dent->de_inum.no_addr), do_filldir_main() 1319 lfn = be64_to_cpu(lf->lf_next); gfs2_dir_read_leaf() 1346 lfn = be64_to_cpu(lf->lf_next); gfs2_dir_read_leaf() 1408 blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]); gfs2_dir_readahead() 1462 be64_to_cpu(lp[index])); dir_e_read() 1561 addr = be64_to_cpu(dent->de_inum.no_addr); gfs2_dir_search() 1562 formal_ino = be64_to_cpu(dent->de_inum.no_formal_ino); gfs2_dir_search() 1583 if (be64_to_cpu(dent->de_inum.no_addr) != ip->i_no_addr) gfs2_dir_check() 1585 if (be64_to_cpu(dent->de_inum.no_formal_ino) != gfs2_dir_check() 1638 bn = be64_to_cpu(oleaf->lf_next); dir_new_leaf() 1918 nblk = be64_to_cpu(tmp_leaf->lf_next); leaf_dealloc() 1953 nblk = be64_to_cpu(tmp_leaf->lf_next); leaf_dealloc() 2019 leaf_no = be64_to_cpu(lp[index]); gfs2_dir_exhash_dealloc()
|
H A D | lops.c | 548 blkno = be64_to_cpu(*ptr++); buf_lo_scan_elements() 712 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); revoke_lo_scan_elements() 785 blkno = be64_to_cpu(*ptr++); databuf_lo_scan_elements() 786 esc = be64_to_cpu(*ptr++); databuf_lo_scan_elements()
|
H A D | bmap.c | 284 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE); gfs2_metapath_ra() 324 dblock = be64_to_cpu(*ptr); lookup_metapath() 366 u64 d = be64_to_cpu(*ptr); gfs2_extent_length() 377 } while(be64_to_cpu(*ptr) == d); gfs2_extent_length() 653 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr)); gfs2_block_map() 760 bn = be64_to_cpu(*p); do_strip() 812 bn = be64_to_cpu(*p); do_strip() 912 bn = be64_to_cpu(*top); recursive_scan()
|
H A D | xattr.c | 144 bn = be64_to_cpu(*eablk); ea_foreach() 253 bn = be64_to_cpu(*dataptrs); ea_dealloc_unstuffed() 280 bn = be64_to_cpu(*dataptrs); ea_dealloc_unstuffed() 480 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, gfs2_iter_unstuffed() 1323 bn = be64_to_cpu(*eablk); ea_dealloc_indirect() 1368 bn = be64_to_cpu(*eablk); ea_dealloc_indirect()
|
H A D | ops_fstype.c | 198 sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr); gfs2_sb_in() 199 sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino); gfs2_sb_in() 200 sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr); gfs2_sb_in() 201 sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino); gfs2_sb_in()
|
H A D | super.c | 455 sc->sc_total = be64_to_cpu(str->sc_total); gfs2_statfs_change_in() 456 sc->sc_free = be64_to_cpu(str->sc_free); gfs2_statfs_change_in() 457 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); gfs2_statfs_change_in()
|
H A D | recovery.c | 128 lh->lh_sequence = be64_to_cpu(str->lh_sequence); gfs2_log_header_in()
|
H A D | rgrp.c | 919 rgd->rd_addr = be64_to_cpu(buf.ri_addr); read_rindex_entry() 921 rgd->rd_data0 = be64_to_cpu(buf.ri_data0); read_rindex_entry() 1065 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); gfs2_rgrp_in() 1232 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration); update_rgrp_lvb()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
H A D | cxio_dbg.c | 114 uint size = (uint)(be64_to_cpu(*data) & 0xff); cxio_dump_wqe() 120 (unsigned long long) be64_to_cpu(*data)); cxio_dump_wqe() 133 (unsigned long long) be64_to_cpu(*data)); cxio_dump_wce()
|
/linux-4.1.27/arch/x86/include/asm/crypto/ |
H A D | glue_helper.h | 86 dst->a = cpu_to_le64(be64_to_cpu(src->a)); be128_to_le128() 87 dst->b = cpu_to_le64(be64_to_cpu(src->b)); be128_to_le128()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
H A D | ev.c | 75 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]), dump_err_cqe() 76 be64_to_cpu(p[3])); dump_err_cqe()
|
H A D | t4.h | 256 #define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts))) 257 #define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts))) 258 #define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
|
H A D | device.c | 1160 (unsigned long long)be64_to_cpu(*rsp), c4iw_uld_rx_handler() 1161 (unsigned long long)be64_to_cpu( c4iw_uld_rx_handler()
|
/linux-4.1.27/arch/mips/mti-sead3/ |
H A D | sead3-setup.c | 80 new_value = be64_to_cpu(*prop_value); parse_memsize_param()
|
/linux-4.1.27/fs/logfs/ |
H A D | super.c | 402 super->s_size = be64_to_cpu(ds->ds_filesystem_size); __logfs_read_sb() 403 super->s_root_reserve = be64_to_cpu(ds->ds_root_reserve); __logfs_read_sb() 404 super->s_speed_reserve = be64_to_cpu(ds->ds_speed_reserve); __logfs_read_sb() 415 super->s_feature_incompat = be64_to_cpu(ds->ds_feature_incompat); __logfs_read_sb() 416 super->s_feature_ro_compat = be64_to_cpu(ds->ds_feature_ro_compat); __logfs_read_sb() 417 super->s_feature_compat = be64_to_cpu(ds->ds_feature_compat); __logfs_read_sb() 418 super->s_feature_flags = be64_to_cpu(ds->ds_feature_flags); __logfs_read_sb()
|
H A D | journal.c | 78 super->s_gec = be64_to_cpu(dynsb->ds_gec); read_dynsb() 79 super->s_sweeper = be64_to_cpu(dynsb->ds_sweeper); read_dynsb() 80 super->s_victim_ino = be64_to_cpu(dynsb->ds_victim_ino); read_dynsb() 81 super->s_rename_dir = be64_to_cpu(dynsb->ds_rename_dir); read_dynsb() 82 super->s_rename_pos = be64_to_cpu(dynsb->ds_rename_pos); read_dynsb() 83 super->s_used_bytes = be64_to_cpu(dynsb->ds_used_bytes); read_dynsb() 95 super->s_last_ino = be64_to_cpu(da->da_last_ino); read_anchor() 98 i_size_write(inode, be64_to_cpu(da->da_size)); read_anchor() 99 li->li_used_bytes = be64_to_cpu(da->da_used_bytes); read_anchor() 102 li->li_data[i] = be64_to_cpu(da->da_data[i]); read_anchor() 303 err = read_je(sb, be64_to_cpu(super->s_je_array[i])); logfs_read_segment() 328 return be64_to_cpu(sh.gec); read_gec() 693 fill, ino, bix, level, child_no, be64_to_cpu(val)); write_alias_journal() 796 be64_to_cpu(super->s_je_array[super->s_no_je - 1]));
|
H A D | segment.c | 228 ino = be64_to_cpu(oa[i].ino); logfs_load_object_aliases() 229 bix = be64_to_cpu(oa[i].bix); logfs_load_object_aliases() 234 be64_to_cpu(item->val)); logfs_load_object_aliases() 583 if (be64_to_cpu(oh.ino) != inode->i_ino __logfs_segment_read() 584 || check_pos(sb, be64_to_cpu(oh.bix), bix, level)) { __logfs_segment_read() 588 be64_to_cpu(oh.ino), be64_to_cpu(oh.bix)); __logfs_segment_read() 699 LOGFS_BUG_ON(be64_to_cpu(h.ino) != inode->i_ino, sb); logfs_segment_delete() 700 LOGFS_BUG_ON(check_pos(sb, shadow->bix, be64_to_cpu(h.bix), logfs_segment_delete()
|
H A D | readwrite.c | 106 return ns_to_timespec(be64_to_cpu(betime)); be64_to_timespec() 124 inode->i_size = be64_to_cpu(di->di_size); logfs_disk_to_inode() 125 logfs_set_blocks(inode, be64_to_cpu(di->di_used_bytes)); logfs_disk_to_inode() 137 inode->i_rdev = be64_to_cpu(di->di_data[0]); logfs_disk_to_inode() 143 li->li_data[i] = be64_to_cpu(di->di_data[i]); logfs_disk_to_inode() 635 ptr = be64_to_cpu(array[i]); initialize_block_counters() 689 oldptr = be64_to_cpu(array[index]); block_set_pointer() 705 ptr = be64_to_cpu(block[index]); block_get_pointer() 857 if (!data && !(be64_to_cpu(rblock[slot]) & LOGFS_FULLY_POPULATED)) seek_holedata_loop() 868 bofs = be64_to_cpu(rblock[slot]); seek_holedata_loop()
|
H A D | dir.c | 317 be64_to_cpu(dd->ino), dd->type); logfs_readdir() 350 ino = be64_to_cpu(dd->ino); logfs_lookup() 649 dd->name, be64_to_cpu(dd->ino)); logfs_replace_inode()
|
H A D | gc.c | 164 ino = be64_to_cpu(oh.ino); logfs_gc_segment() 165 bix = be64_to_cpu(oh.bix); logfs_gc_segment()
|
/linux-4.1.27/fs/ufs/ |
H A D | swab.h | 31 return be64_to_cpu((__force __be64)n); fs64_to_cpu()
|
/linux-4.1.27/fs/befs/ |
H A D | endian.h | 20 return be64_to_cpu((__force __be64)n); fs64_to_cpu()
|
/linux-4.1.27/drivers/rtc/ |
H A D | rtc-opal.c | 78 h_m_s_ms = be64_to_cpu(__h_m_s_ms); opal_get_rtc_time() 137 rc = be64_to_cpu(msg.params[1]); opal_get_tpo_time() 184 rc = be64_to_cpu(msg.params[1]); opal_set_tpo_time()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | t4vf_hw.c | 161 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); t4vf_wr_mbox_core() 1400 s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes); t4vf_get_port_stats() 1401 s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames); t4vf_get_port_stats() 1402 s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes); t4vf_get_port_stats() 1403 s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames); t4vf_get_port_stats() 1404 s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes); t4vf_get_port_stats() 1405 s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames); t4vf_get_port_stats() 1406 s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames); t4vf_get_port_stats() 1407 s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes); t4vf_get_port_stats() 1408 s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames); t4vf_get_port_stats() 1410 s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes); t4vf_get_port_stats() 1411 s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames); t4vf_get_port_stats() 1412 s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes); t4vf_get_port_stats() 1413 s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames); t4vf_get_port_stats() 1414 s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes); t4vf_get_port_stats() 1415 s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames); t4vf_get_port_stats() 1417 s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames); t4vf_get_port_stats()
|
H A D | sge.c | 313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), unmap_sgl() 316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), unmap_sgl() 328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), unmap_sgl() 330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]), unmap_sgl() 339 dma_unmap_page(dev, be64_to_cpu(addr[0]), unmap_sgl() 341 dma_unmap_page(dev, be64_to_cpu(addr[1]), unmap_sgl() 347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), unmap_sgl() 349 dma_unmap_page(dev, be64_to_cpu(addr[0]), unmap_sgl() 362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]), unmap_sgl()
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
H A D | alias_GUID.c | 86 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. mlx4_ib_update_cache_on_guid_change() 206 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. mlx4_ib_notify_slaves_on_guid_change() 342 be64_to_cpu(required_val), aliasguid_query_handler() 374 be64_to_cpu(required_val), aliasguid_query_handler() 375 be64_to_cpu(sm_response)); aliasguid_query_handler() 411 be64_to_cpu((__force __be64)rec->guid_indexes), aliasguid_query_handler() 412 be64_to_cpu((__force __be64)applied_guid_indexes), aliasguid_query_handler() 413 be64_to_cpu((__force __be64)declined_guid_indexes)); aliasguid_query_handler() 595 be64_to_cpu(guid), mlx4_ib_guid_port_init()
|
H A D | mcg.c | 663 be64_to_cpu(group->response_sa_mad.mad_hdr.tid), mlx4_ib_mcg_work_handler() 664 be64_to_cpu(group->last_req_tid)); mlx4_ib_mcg_work_handler() 759 be64_to_cpu(group->rec.mgid.global.subnet_prefix), search_relocate_mgid0_group() 760 be64_to_cpu(group->rec.mgid.global.interface_id)); search_relocate_mgid0_group() 841 be64_to_cpu(group->rec.mgid.global.subnet_prefix), acquire_group() 842 be64_to_cpu(group->rec.mgid.global.interface_id)); acquire_group() 1005 be64_to_cpu(group->last_req_tid)); sysfs_show_group() 1011 be64_to_cpu(req->sa_mad.mad_hdr.tid)); sysfs_show_group()
|
H A D | mad.c | 743 be64_to_cpu(in_grh->sgid.global.subnet_prefix), ib_process_mad() 744 be64_to_cpu(in_grh->sgid.global.interface_id)); ib_process_mad() 746 be64_to_cpu(in_grh->dgid.global.subnet_prefix), ib_process_mad() 747 be64_to_cpu(in_grh->dgid.global.interface_id)); ib_process_mad() 818 (be64_to_cpu(cnt->tx_bytes) >> 2)); edit_counter() 820 (be64_to_cpu(cnt->rx_bytes) >> 2)); edit_counter() 822 be64_to_cpu(cnt->tx_frames)); edit_counter() 824 be64_to_cpu(cnt->rx_frames)); edit_counter()
|
H A D | cm.c | 375 be64_to_cpu(gid.global.interface_id)); mlx4_ib_demux_cm_handler()
|
H A D | sysfs.c | 59 return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val)); show_admin_alias_guid()
|
/linux-4.1.27/arch/mips/cavium-octeon/crypto/ |
H A D | octeon-crypto.h | 51 be64_to_cpu(__value); \
|
/linux-4.1.27/fs/qnx6/ |
H A D | qnx6.h | 82 return be64_to_cpu((__force __be64)n); fs64_to_cpu()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | mr.c | 85 mr->iova = be64_to_cpu(in->seg.start_addr); mlx5_core_create_mkey() 86 mr->size = be64_to_cpu(in->seg.len); mlx5_core_create_mkey()
|
H A D | pagealloc.c | 353 free_4k(dev, be64_to_cpu(in->pas[i])); give_pages() 401 addr = be64_to_cpu(out->pas[i]); reclaim_pages()
|
H A D | qp.c | 127 be64_to_cpu(pf_eqe->rdma.rdma_va); mlx5_eq_pagefault()
|
H A D | main.c | 333 gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22)); fw2drv_caps()
|
/linux-4.1.27/arch/x86/crypto/ |
H A D | ghash-clmulni-intel_glue.c | 68 a = be64_to_cpu(x->a); ghash_setkey() 69 b = be64_to_cpu(x->b); ghash_setkey()
|
H A D | blowfish_glue.c | 288 u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); __ctr_crypt()
|
H A D | des3_ede_glue.c | 294 u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); __ctr_crypt()
|
/linux-4.1.27/include/linux/byteorder/ |
H A D | generic.h | 92 #define be64_to_cpu __be64_to_cpu macro 170 *var = cpu_to_be64(be64_to_cpu(*var) + val); be64_add_cpu()
|
/linux-4.1.27/drivers/scsi/ibmvscsi/ |
H A D | ibmvfc.c | 169 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); ibmvfc_trc_start() 202 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id); ibmvfc_trc_end() 958 switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) { ibmvfc_get_host_speed() 979 be64_to_cpu(vhost->login_buf->resp.link_speed) / 100); ibmvfc_get_host_speed() 1431 if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]), ibmvfc_send_event() 1432 be64_to_cpu(crq_as_u64[1])))) { ibmvfc_send_event() 1634 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); ibmvfc_queuecommand_lck() 1902 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + ibmvfc_bsg_request() 1980 tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); ibmvfc_reset_device() 2184 if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS)) ibmvfc_cancel_all() 2310 tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp)); ibmvfc_abort_task_set() 2635 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event)); ibmvfc_handle_async() 2642 switch (be64_to_cpu(crq->event)) { ibmvfc_handle_async() 2691 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO) ibmvfc_handle_async() 2693 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) { ibmvfc_handle_async() 2724 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba); ibmvfc_handle_crq() 3639 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + ibmvfc_init_passthru() 3644 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + ibmvfc_init_passthru() 3648 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) + ibmvfc_init_passthru() 3765 mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff); ibmvfc_tgt_adisc() 3804 tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id); ibmvfc_tgt_query_target_done() 3805 if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id) ibmvfc_tgt_query_target_done() 4061 npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS); ibmvfc_npiv_login_done() 4066 fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name); ibmvfc_npiv_login_done() 4067 fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name); ibmvfc_npiv_login_done() 4068 fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name); ibmvfc_npiv_login_done() 4069 fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id); ibmvfc_npiv_login_done()
|
H A D | ibmvscsi.c | 937 rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]), ibmvscsi_send_srp_event() 938 be64_to_cpu(crq_as_u64[1])); ibmvscsi_send_srp_event() 1082 cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) + ibmvscsi_queuecommand_lck()
|
/linux-4.1.27/drivers/cpuidle/ |
H A D | cpuidle-pseries.c | 46 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); idle_loop_epilog()
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | unaligned-sh4a.h | 90 return be64_to_cpu(sh4a_get_unaligned_cpu64(p)); get_unaligned_be64()
|
/linux-4.1.27/net/rds/ |
H A D | recv.c | 171 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence), rds_recv_incoming() 198 if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq && rds_recv_incoming() 203 conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1; rds_recv_incoming() 533 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence); rds_inc_info_copy()
|
H A D | ib_cm.c | 194 rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq), rds_ib_cm_connect_complete() 475 (unsigned long long)be64_to_cpu(lguid), rds_ib_cm_handle_connect() 476 (unsigned long long)be64_to_cpu(fguid)); rds_ib_cm_handle_connect() 515 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); rds_ib_cm_handle_connect()
|
H A D | loop.c | 84 rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence), rds_loop_xmit()
|
H A D | iw_cm.c | 96 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); rds_iw_cm_connect_complete() 436 rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL); rds_iw_cm_handle_connect()
|
H A D | ib_recv.c | 839 state->ack_recv = be64_to_cpu(ihdr->h_ack); rds_ib_process_recv() 912 state->ack_next = be64_to_cpu(hdr->h_sequence); rds_ib_process_recv()
|
H A D | iw_recv.c | 670 state->ack_recv = be64_to_cpu(ihdr->h_ack); rds_iw_process_recv() 743 state->ack_next = be64_to_cpu(hdr->h_sequence); rds_iw_process_recv()
|
H A D | send.c | 442 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; rds_send_is_acked() 837 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); rds_send_queue_rm()
|
/linux-4.1.27/drivers/crypto/ccp/ |
H A D | ccp-crypto-aes-cmac.c | 282 k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); ccp_aes_cmac_setkey() 283 k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1)); ccp_aes_cmac_setkey()
|
/linux-4.1.27/drivers/infiniband/ulp/srp/ |
H A D | ib_srp.c | 783 (unsigned long long) be64_to_cpu(target->ioc_guid)); srp_send_req() 1840 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); srp_process_aer_req() 2286 be64_to_cpu(ch->path.dgid.global.subnet_prefix), srp_cm_rej_handler() 2287 be64_to_cpu(ch->path.dgid.global.interface_id)); srp_cm_rej_handler() 2562 (unsigned long long) be64_to_cpu(target->id_ext)); show_id_ext() 2571 (unsigned long long) be64_to_cpu(target->ioc_guid)); show_ioc_guid() 2580 (unsigned long long) be64_to_cpu(target->service_id)); show_service_id() 2778 (unsigned long long) be64_to_cpu(target->id_ext)); srp_add_target() 3189 be64_to_cpu(target->id_ext), srp_create_target() 3190 be64_to_cpu(target->ioc_guid), srp_create_target() 3191 be64_to_cpu(target->initiator_ext)); srp_create_target() 3296 be64_to_cpu(target->id_ext), 3297 be64_to_cpu(target->ioc_guid), 3299 be64_to_cpu(target->service_id),
|
/linux-4.1.27/drivers/mtd/ubi/ |
H A D | fastmap.c | 511 unsigned long long ec = be64_to_cpu(ech->ec); scan_pool() 532 new_aeb->ec = be64_to_cpu(ech->ec); scan_pool() 535 new_aeb->sqnum = be64_to_cpu(vh->sqnum); scan_pool() 991 if (sqnum < be64_to_cpu(vh->sqnum)) ubi_scan_fastmap() 992 sqnum = be64_to_cpu(vh->sqnum); ubi_scan_fastmap() 1299 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1361 ec = be64_to_cpu(ec_hdr->ec); erase_block()
|
H A D | debug.c | 68 pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec)); ubi_dump_ec_hdr() 96 (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); ubi_dump_vid_hdr()
|
H A D | attach.c | 330 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); ubi_compare_lebs() 460 sqnum = be64_to_cpu(vid_hdr->sqnum); ubi_add_to_av() 880 ec = be64_to_cpu(ech->ec); scan_peb() 997 *sqnum = be64_to_cpu(vidh->sqnum); scan_peb() 1658 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) { self_check_ai()
|
/linux-4.1.27/drivers/char/tpm/ |
H A D | tpm_ibmvtpm.c | 151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), tpm_ibmvtpm_send() 152 be64_to_cpu(word[1])); tpm_ibmvtpm_send()
|
/linux-4.1.27/drivers/i2c/busses/ |
H A D | i2c-opal.c | 74 rc = be64_to_cpu(msg.params[1]); i2c_opal_send_request()
|
/linux-4.1.27/drivers/misc/cxl/ |
H A D | fault.c | 51 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) find_free_sste()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
H A D | mem.c | 190 i >> shift, be64_to_cpu(pas[i >> shift])); __mlx5_ib_populate_pas()
|
H A D | cq.c | 319 addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr); mlx5_get_atomic_laddr() 340 *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr)); handle_atomic() 398 item->sig_err_offset = be64_to_cpu(cqe->err_offset); get_sig_err_item()
|
H A D | odp.c | 341 io_virt = be64_to_cpu(dseg->addr); pagefault_data_segments()
|
/linux-4.1.27/drivers/char/ipmi/ |
H A D | ipmi_powernv.c | 141 size = be64_to_cpu(size); ipmi_powernv_recv()
|
/linux-4.1.27/include/scsi/ |
H A D | fc_frame.h | 35 #define ntohll(x) be64_to_cpu(x)
|
H A D | sas.h | 34 #define SAS_ADDR(_sa) ((unsigned long long) be64_to_cpu(*(__be64 *)(_sa)))
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | fadump.h | 75 while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) \
|
/linux-4.1.27/arch/powerpc/perf/ |
H A D | hv-24x7.c | 681 catalog_version_num = be64_to_cpu(page_0->version); create_events_from_catalog() 913 catalog_version_num = be64_to_cpu(page_0->version); catalog_read() 977 (unsigned long long)be64_to_cpu(page_0->version)); 1127 *count = be64_to_cpu(resb->elements[0].element_data[0]); single_24x7_request()
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | kvm_emulate.h | 246 return be64_to_cpu(data); vcpu_data_guest_to_host()
|
/linux-4.1.27/net/openvswitch/ |
H A D | vport-vxlan.c | 254 md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8); vxlan_tnl_send()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | sge.c | 328 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), unmap_sgl() 331 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), unmap_sgl() 342 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), unmap_sgl() 344 dma_unmap_page(dev, be64_to_cpu(p->addr[1]), unmap_sgl() 353 dma_unmap_page(dev, be64_to_cpu(addr[0]), unmap_sgl() 355 dma_unmap_page(dev, be64_to_cpu(addr[1]), unmap_sgl() 361 dma_unmap_page(dev, be64_to_cpu(p->addr[0]), unmap_sgl() 363 dma_unmap_page(dev, be64_to_cpu(addr[0]), unmap_sgl() 375 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), unmap_sgl()
|
/linux-4.1.27/drivers/block/drbd/ |
H A D | drbd_receiver.c | 1852 sector = be64_to_cpu(p->sector); receive_DataReply() 1886 sector = be64_to_cpu(p->sector); receive_RSDataReply() 2306 sector = be64_to_cpu(p->sector); receive_Data() 2530 sector = be64_to_cpu(p->sector); receive_DataRequest() 3740 p_size = be64_to_cpu(p->d_size); receive_sizes() 3741 p_usize = be64_to_cpu(p->u_size); receive_sizes() 3742 p_csize = be64_to_cpu(p->c_size); receive_sizes() 3844 if (be64_to_cpu(p->c_size) != receive_sizes() 3886 p_uuid[i] = be64_to_cpu(p->uuid[i]); receive_uuids() 4230 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid)); receive_sync_uuid() 4558 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); receive_out_of_sync() 5165 sector_t sector = be64_to_cpu(p->sector); got_IsInSync() 5217 sector_t sector = be64_to_cpu(p->sector); got_BlockAck() 5263 sector_t sector = be64_to_cpu(p->sector); got_NegAck() 5299 sector_t sector = be64_to_cpu(p->sector); got_NegDReply() 5329 sector = be64_to_cpu(p->sector); got_NegRSDReply() 5390 sector = be64_to_cpu(p->sector); got_OVResult() 5395 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) got_OVResult()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
H A D | ipath_rc.c | 1182 val = be64_to_cpu(((__be64 *) data)[0]); ipath_rc_rcv_resp() 1443 u64 vaddr = be64_to_cpu(reth->vaddr); OP() 1769 u64 vaddr = be64_to_cpu(reth->vaddr); 1824 u64 vaddr = be64_to_cpu(reth->vaddr); OP() 1907 sdata = be64_to_cpu(ateth->swap_data); OP() 1912 be64_to_cpu(ateth->compare_data), OP()
|
H A D | ipath_uc.c | 441 u64 vaddr = be64_to_cpu(reth->vaddr);
|
H A D | ipath_eeprom.c | 743 (unsigned long long) be64_to_cpu(dd->ipath_guid)); ipath_get_eeprom_info() 835 (unsigned long long) be64_to_cpu(dd->ipath_guid)); ipath_get_eeprom_info()
|
/linux-4.1.27/drivers/misc/genwqe/ |
H A D | card_debugfs.c | 297 be64_to_cpu(pddcb->priv_64), pddcb->cmd); genwqe_ddcb_info_show()
|
H A D | card_ddcb.c | 195 be64_to_cpu(pddcb->priv_64), print_ddcb_info() 346 req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64); copy_ddcb_results() 347 req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64); copy_ddcb_results()
|
H A D | card_dev.c | 886 u_addr = be64_to_cpu(*((__be64 *)&cmd-> ddcb_cmd_fixups() 918 u_addr = be64_to_cpu(*((__be64 *) ddcb_cmd_fixups()
|
H A D | card_utils.c | 94 return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs)); __genwqe_readq()
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | pci-calgary_64.c | 785 val64 = be64_to_cpu(readq(target)); calgary_setup_tar() 812 val64 = be64_to_cpu(readq(target)); calgary_free_bus() 945 val64 = be64_to_cpu(readq(target)); calgary_set_split_completion_timeout() 1347 tce_space = be64_to_cpu(readq(target)); get_tce_space_from_tar()
|
/linux-4.1.27/drivers/infiniband/ulp/srpt/ |
H A D | ib_srpt.c | 1142 raddr = be64_to_cpu(db->va); srpt_map_sg_to_ib_sge() 2381 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]), srpt_cm_req_recv() 2382 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]), srpt_cm_req_recv() 2383 be64_to_cpu(*(__be64 *)&req->target_port_id[0]), srpt_cm_req_recv() 2384 be64_to_cpu(*(__be64 *)&req->target_port_id[8]), srpt_cm_req_recv() 2387 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]), srpt_cm_req_recv() 2388 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8])); srpt_cm_req_recv() 2522 be64_to_cpu(*(__be64 *)ch->i_port_id), srpt_cm_req_recv() 2523 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); srpt_cm_req_recv() 3185 srpt_service_guid = be64_to_cpu(device->node_guid); srpt_add_one() 3240 be64_to_cpu(sport->gid.global.subnet_prefix), srpt_add_one() 3241 be64_to_cpu(sport->gid.global.interface_id)); srpt_add_one()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 1738 u64 vaddr = be64_to_cpu(reth->vaddr); OP() 2066 u64 vaddr = be64_to_cpu(reth->vaddr); qib_rc_rcv() 2122 u64 vaddr = be64_to_cpu(reth->vaddr); OP() 2203 sdata = be64_to_cpu(ateth->swap_data); OP() 2207 be64_to_cpu(ateth->compare_data), OP()
|
H A D | qib_uc.c | 437 u64 vaddr = be64_to_cpu(reth->vaddr);
|
/linux-4.1.27/drivers/infiniband/core/ |
H A D | cma.c | 828 return htons((u16) (be64_to_cpu(sib->sib_sid) & cma_port() 829 be64_to_cpu(sib->sib_sid_mask))); cma_port() 2310 sid = be64_to_cpu(sib->sib_sid); cma_bind_port() 2311 mask = be64_to_cpu(sib->sib_sid_mask); cma_bind_port() 2465 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; cma_select_ib_ps() 2466 sid = be64_to_cpu(sib->sib_sid) & mask; cma_select_ib_ps() 2484 be64_to_cpu(sib->sib_sid_mask)); cma_select_ib_ps()
|
H A D | cm.c | 1234 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || cm_is_active_peer() 3882 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); ib_cm_init()
|
/linux-4.1.27/drivers/net/ethernet/brocade/bna/ |
H A D | bna_enet.c | 76 stats_dst[i] = be64_to_cpu(stats_src[i]); \ 215 stats_dst[k] = be64_to_cpu(*stats_src); bna_bfi_stats_get_rsp() 230 stats_dst[k] = be64_to_cpu(*stats_src); bna_bfi_stats_get_rsp()
|
/linux-4.1.27/drivers/net/wireless/ath/ath9k/ |
H A D | htc_drv_beacon.c | 273 tsf = be64_to_cpu(swba->tsf); ath9k_htc_choose_bslot()
|
H A D | htc_drv_txrx.c | 1007 rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp); ath9k_rx_prepare()
|
/linux-4.1.27/fs/ocfs2/ |
H A D | dlmglue.c | 135 (unsigned long long)be64_to_cpu(lvb->lvb_isize), ocfs2_dump_meta_lvb_info() 140 (long long)be64_to_cpu(lvb->lvb_iatime_packed), ocfs2_dump_meta_lvb_info() 141 (long long)be64_to_cpu(lvb->lvb_ictime_packed), ocfs2_dump_meta_lvb_info() 142 (long long)be64_to_cpu(lvb->lvb_imtime_packed), ocfs2_dump_meta_lvb_info() 592 return be64_to_cpu(inode_blkno_be); ocfs2_get_dentry_lock_ino() 2122 i_size_write(inode, be64_to_cpu(lvb->lvb_isize)); ocfs2_refresh_inode_from_lvb() 2139 be64_to_cpu(lvb->lvb_iatime_packed)); ocfs2_refresh_inode_from_lvb() 2141 be64_to_cpu(lvb->lvb_imtime_packed)); ocfs2_refresh_inode_from_lvb() 2143 be64_to_cpu(lvb->lvb_ictime_packed)); ocfs2_refresh_inode_from_lvb()
|
/linux-4.1.27/fs/hfsplus/ |
H A D | inode.c | 456 hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size); hfsplus_inode_read_fork()
|
/linux-4.1.27/drivers/power/ |
H A D | olpc_battery.c | 451 sprintf(bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf)); olpc_bat_get_property()
|
/linux-4.1.27/include/linux/mlx5/ |
H A D | device.h | 89 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
H A D | lustre_fid.h | 759 dst->lsr_start = be64_to_cpu(src->lsr_start); range_be_to_cpu() 760 dst->lsr_end = be64_to_cpu(src->lsr_end); range_be_to_cpu()
|
/linux-4.1.27/net/netfilter/ipset/ |
H A D | ip_set_core.c | 401 ext->bytes = be64_to_cpu(nla_get_be64( ip_set_get_extensions() 404 ext->packets = be64_to_cpu(nla_get_be64( ip_set_get_extensions() 415 fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK])); ip_set_get_extensions()
|
/linux-4.1.27/drivers/scsi/pm8001/ |
H A D | pm8001_ctl.c | 237 be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr)); pm8001_ctl_host_sas_address_show()
|
/linux-4.1.27/drivers/scsi/aic7xxx/ |
H A D | aic7xxx_osm.h | 124 #define ahc_be64toh(x) be64_to_cpu(x)
|
H A D | aic79xx_osm.h | 112 #define ahd_be64toh(x) be64_to_cpu(x)
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
H A D | mthca_eq.c | 330 be64_to_cpu(eqe->event.cmd.out_param)); mthca_eq_int()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | io.h | 469 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
|
/linux-4.1.27/fs/jbd2/ |
H A D | recovery.c | 872 blocknr = be64_to_cpu(* ((__be64 *) (bh->b_data+offset))); scan_revoke_records()
|
/linux-4.1.27/kernel/trace/ |
H A D | blktrace.c | 1131 return be64_to_cpu(*val); get_pdu_int() 1142 r->sector_from = be64_to_cpu(sector_from); get_pdu_remap()
|
/linux-4.1.27/drivers/net/ethernet/neterion/vxge/ |
H A D | vxge-config.c | 890 ((u64 *)serial_number)[0] = be64_to_cpu(data0); __vxge_hw_vpath_card_info_get() 891 ((u64 *)serial_number)[1] = be64_to_cpu(data1); __vxge_hw_vpath_card_info_get() 903 ((u64 *)part_number)[0] = be64_to_cpu(data0); __vxge_hw_vpath_card_info_get() 904 ((u64 *)part_number)[1] = be64_to_cpu(data1); __vxge_hw_vpath_card_info_get() 918 ((u64 *)product_desc)[j++] = be64_to_cpu(data0); __vxge_hw_vpath_card_info_get() 919 ((u64 *)product_desc)[j++] = be64_to_cpu(data1); __vxge_hw_vpath_card_info_get()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | sge.c | 247 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), unmap_skb() 256 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), unmap_skb() 1559 *p++ = be64_to_cpu(sgl->addr[0]); setup_deferred_unmapping() 1560 *p++ = be64_to_cpu(sgl->addr[1]); setup_deferred_unmapping() 1563 *p = be64_to_cpu(sgl->addr[0]); setup_deferred_unmapping()
|
/linux-4.1.27/drivers/scsi/osd/ |
H A D | osd_initiator.c | 1822 osi->obj.partition = be64_to_cpu(osidd->partition_id); osd_req_decode_sense_full() 1823 osi->obj.id = be64_to_cpu(osidd->object_id); osd_req_decode_sense_full()
|
/linux-4.1.27/drivers/scsi/cxgbi/cxgb4i/ |
H A D | cxgb4i.c | 1819 pgl->va, be64_to_cpu(*rsp), t4_uld_rx_handler() 1820 be64_to_cpu(*(u64 *)pgl->va), t4_uld_rx_handler()
|
/linux-4.1.27/fs/ocfs2/cluster/ |
H A D | tcp.c | 1280 (unsigned long long)be64_to_cpu(hand->protocol_version), o2net_check_handshake() 1515 (unsigned long long)be64_to_cpu(o2net_hand->connector_id)); o2net_sc_connect_completed()
|
/linux-4.1.27/drivers/scsi/aic94xx/ |
H A D | aic94xx_scb.c | 160 u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr); asd_get_attached_sas_addr()
|
/linux-4.1.27/drivers/nfc/ |
H A D | port100.c | 948 mask = be64_to_cpu(*(__be64 *)resp->data); port100_get_command_type_mask()
|
/linux-4.1.27/include/net/ |
H A D | ipv6.h | 671 return i * 64 + 63 - __fls(be64_to_cpu(xb)); __ipv6_addr_diff64()
|