/linux-4.1.27/drivers/sh/clk/ |
H A D | Makefile | 3 obj-$(CONFIG_SH_CLK_CPG) += cpg.o
|
H A D | cpg.c | 75 pr_err("cpg: failed to enable %p[%d]\n", sh_clk_mstp_enable()
|
/linux-4.1.27/drivers/clk/shmobile/ |
H A D | clk-rz.c | 32 rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *name) rz_cpg_register_clock() argument 49 if (!cpg->reg) rz_cpg_register_clock() 57 val = (clk_readl(cpg->reg + CPG_FRQCR) >> 8) & 3; rz_cpg_register_clock() 59 val = clk_readl(cpg->reg + CPG_FRQCR2) & 3; rz_cpg_register_clock() 69 struct rz_cpg *cpg; rz_cpg_clocks_init() local 78 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); rz_cpg_clocks_init() 80 BUG_ON(!cpg || !clks); rz_cpg_clocks_init() 82 cpg->data.clks = clks; rz_cpg_clocks_init() 83 cpg->data.clk_num = num_clks; rz_cpg_clocks_init() 85 cpg->reg = of_iomap(np, 0); rz_cpg_clocks_init() 93 clk = rz_cpg_register_clock(np, cpg, name); rz_cpg_clocks_init() 98 cpg->data.clks[i] = clk; rz_cpg_clocks_init() 101 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); rz_cpg_clocks_init() 103 CLK_OF_DECLARE(rz_cpg_clks, "renesas,rz-cpg-clocks", rz_cpg_clocks_init);
|
H A D | clk-r8a7778.c | 52 r8a7778_cpg_register_clock(struct device_node *np, struct r8a7778_cpg *cpg, r8a7778_cpg_register_clock() argument 82 struct r8a7778_cpg *cpg; r8a7778_cpg_clocks_init() local 93 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); r8a7778_cpg_clocks_init() 95 if (cpg == NULL || clks == NULL) { r8a7778_cpg_clocks_init() 102 spin_lock_init(&cpg->lock); r8a7778_cpg_clocks_init() 104 cpg->data.clks = clks; r8a7778_cpg_clocks_init() 105 cpg->data.clk_num = num_clks; r8a7778_cpg_clocks_init() 107 cpg->reg = of_iomap(np, 0); r8a7778_cpg_clocks_init() 108 if (WARN_ON(cpg->reg == NULL)) r8a7778_cpg_clocks_init() 118 clk = r8a7778_cpg_register_clock(np, cpg, name); r8a7778_cpg_clocks_init() 123 cpg->data.clks[i] = clk; r8a7778_cpg_clocks_init() 126 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); r8a7778_cpg_clocks_init() 129 CLK_OF_DECLARE(r8a7778_cpg_clks, "renesas,r8a7778-cpg-clocks",
|
H A D | clk-sh73a0.c | 77 sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg, sh73a0_cpg_register_clock() argument 88 u32 parent_idx = (clk_readl(cpg->reg + CPG_CKSCR) >> 28) & 3; sh73a0_cpg_register_clock() 93 void __iomem *enable_reg = cpg->reg; sh73a0_cpg_register_clock() 113 if (clk_readl(cpg->reg + CPG_PLLECR) & BIT(enable_bit)) { sh73a0_cpg_register_clock() 122 void __iomem *dsi_reg = cpg->reg + sh73a0_cpg_register_clock() 159 cpg->reg + reg, shift, width, 0, sh73a0_cpg_register_clock() 160 table, &cpg->lock); sh73a0_cpg_register_clock() 166 struct sh73a0_cpg *cpg; sh73a0_cpg_clocks_init() local 177 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); sh73a0_cpg_clocks_init() 179 if (cpg == NULL || clks == NULL) { sh73a0_cpg_clocks_init() 186 spin_lock_init(&cpg->lock); sh73a0_cpg_clocks_init() 188 cpg->data.clks = clks; sh73a0_cpg_clocks_init() 189 cpg->data.clk_num = num_clks; sh73a0_cpg_clocks_init() 191 cpg->reg = of_iomap(np, 0); sh73a0_cpg_clocks_init() 192 if (WARN_ON(cpg->reg == NULL)) sh73a0_cpg_clocks_init() 196 clk_writel(0x108, cpg->reg + CPG_SD0CKCR); sh73a0_cpg_clocks_init() 197 clk_writel(0x108, cpg->reg + CPG_SD1CKCR); sh73a0_cpg_clocks_init() 198 clk_writel(0x108, cpg->reg + CPG_SD2CKCR); sh73a0_cpg_clocks_init() 207 clk = sh73a0_cpg_register_clock(np, cpg, name); sh73a0_cpg_clocks_init() 212 cpg->data.clks[i] = clk; sh73a0_cpg_clocks_init() 215 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); sh73a0_cpg_clocks_init() 217 CLK_OF_DECLARE(sh73a0_cpg_clks, "renesas,sh73a0-cpg-clocks",
|
H A D | clk-r8a73a4.c | 63 r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg, r8a73a4_cpg_register_clock() argument 74 u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR); r8a73a4_cpg_register_clock() 98 u32 value = clk_readl(cpg->reg + CPG_PLL0CR); r8a73a4_cpg_register_clock() 105 u32 value = clk_readl(cpg->reg + CPG_PLL1CR); r8a73a4_cpg_register_clock() 128 value = clk_readl(cpg->reg + cr); r8a73a4_cpg_register_clock() 164 mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift) r8a73a4_cpg_register_clock() 187 cpg->reg + reg, shift, 4, 0, r8a73a4_cpg_register_clock() 188 table, &cpg->lock); r8a73a4_cpg_register_clock() 194 struct r8a73a4_cpg *cpg; r8a73a4_cpg_clocks_init() local 205 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); r8a73a4_cpg_clocks_init() 207 if (cpg == NULL || clks == NULL) { r8a73a4_cpg_clocks_init() 214 spin_lock_init(&cpg->lock); r8a73a4_cpg_clocks_init() 216 cpg->data.clks = clks; r8a73a4_cpg_clocks_init() 217 cpg->data.clk_num = num_clks; r8a73a4_cpg_clocks_init() 219 cpg->reg = of_iomap(np, 0); r8a73a4_cpg_clocks_init() 220 if (WARN_ON(cpg->reg == NULL)) r8a73a4_cpg_clocks_init() 230 clk = r8a73a4_cpg_register_clock(np, cpg, name); r8a73a4_cpg_clocks_init() 235 cpg->data.clks[i] = clk; r8a73a4_cpg_clocks_init() 238 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); r8a73a4_cpg_clocks_init() 240 CLK_OF_DECLARE(r8a73a4_cpg_clks, "renesas,r8a73a4-cpg-clocks",
|
H A D | clk-r8a7740.c | 65 r8a7740_cpg_register_clock(struct device_node *np, struct r8a7740_cpg *cpg, r8a7740_cpg_register_clock() argument 101 u32 value = clk_readl(cpg->reg + CPG_FRQCRC); r8a7740_cpg_register_clock() 105 u32 value = clk_readl(cpg->reg + CPG_FRQCRA); r8a7740_cpg_register_clock() 110 u32 value = clk_readl(cpg->reg + CPG_PLLC2CR); r8a7740_cpg_register_clock() 114 u32 value = clk_readl(cpg->reg + CPG_USBCKCR); r8a7740_cpg_register_clock() 142 cpg->reg + reg, shift, 4, 0, r8a7740_cpg_register_clock() 143 table, &cpg->lock); r8a7740_cpg_register_clock() 149 struct r8a7740_cpg *cpg; r8a7740_cpg_clocks_init() local 163 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); r8a7740_cpg_clocks_init() 165 if (cpg == NULL || clks == NULL) { r8a7740_cpg_clocks_init() 172 spin_lock_init(&cpg->lock); r8a7740_cpg_clocks_init() 174 cpg->data.clks = clks; r8a7740_cpg_clocks_init() 175 cpg->data.clk_num = num_clks; r8a7740_cpg_clocks_init() 177 cpg->reg = of_iomap(np, 0); r8a7740_cpg_clocks_init() 178 if (WARN_ON(cpg->reg == NULL)) r8a7740_cpg_clocks_init() 188 clk = r8a7740_cpg_register_clock(np, cpg, name); r8a7740_cpg_clocks_init() 193 cpg->data.clks[i] = clk; r8a7740_cpg_clocks_init() 196 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); r8a7740_cpg_clocks_init() 198 CLK_OF_DECLARE(r8a7740_cpg_clks, "renesas,r8a7740-cpg-clocks",
|
H A D | clk-r8a7779.c | 94 r8a7779_cpg_register_clock(struct device_node *np, struct r8a7779_cpg *cpg, r8a7779_cpg_register_clock() argument 126 struct r8a7779_cpg *cpg; r8a7779_cpg_clocks_init() local 137 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); r8a7779_cpg_clocks_init() 139 if (cpg == NULL || clks == NULL) { r8a7779_cpg_clocks_init() 146 spin_lock_init(&cpg->lock); r8a7779_cpg_clocks_init() 148 cpg->data.clks = clks; r8a7779_cpg_clocks_init() 149 cpg->data.clk_num = num_clks; r8a7779_cpg_clocks_init() 161 clk = r8a7779_cpg_register_clock(np, cpg, config, r8a7779_cpg_clocks_init() 167 cpg->data.clks[i] = clk; r8a7779_cpg_clocks_init() 170 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); r8a7779_cpg_clocks_init() 172 CLK_OF_DECLARE(r8a7779_cpg_clks, "renesas,r8a7779-cpg-clocks",
|
H A D | clk-rcar-gen2.c | 138 static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg) cpg_z_clk_register() argument 155 zclk->reg = cpg->reg + CPG_FRQCRC; cpg_z_clk_register() 156 zclk->kick_reg = cpg->reg + CPG_FRQCRB; cpg_z_clk_register() 166 static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg, cpg_rcan_clk_register() argument 187 gate->reg = cpg->reg + CPG_RCANCKCR; cpg_rcan_clk_register() 190 gate->lock = &cpg->lock; cpg_rcan_clk_register() 210 static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg) cpg_adsp_clk_register() argument 221 div->reg = cpg->reg + CPG_ADSPCKCR; cpg_adsp_clk_register() 224 div->lock = &cpg->lock; cpg_adsp_clk_register() 232 gate->reg = cpg->reg + CPG_ADSPCKCR; cpg_adsp_clk_register() 235 gate->lock = &cpg->lock; cpg_adsp_clk_register() 301 rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, rcar_gen2_cpg_register_clock() argument 320 u32 value = clk_readl(cpg->reg + CPG_PLL0CR); rcar_gen2_cpg_register_clock() 349 return cpg_z_clk_register(cpg); rcar_gen2_cpg_register_clock() 351 return cpg_rcan_clk_register(cpg, np); rcar_gen2_cpg_register_clock() 353 return cpg_adsp_clk_register(cpg); rcar_gen2_cpg_register_clock() 363 cpg->reg + CPG_SDCKCR, shift, rcar_gen2_cpg_register_clock() 364 4, 0, table, &cpg->lock); rcar_gen2_cpg_register_clock() 370 struct rcar_gen2_cpg *cpg; rcar_gen2_cpg_clocks_init() local 381 cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); rcar_gen2_cpg_clocks_init() 383 if (cpg == NULL || clks == NULL) { rcar_gen2_cpg_clocks_init() 387 pr_err("%s: failed to allocate cpg\n", __func__); rcar_gen2_cpg_clocks_init() 391 spin_lock_init(&cpg->lock); rcar_gen2_cpg_clocks_init() 393 cpg->data.clks = clks; rcar_gen2_cpg_clocks_init() 394 cpg->data.clk_num = num_clks; rcar_gen2_cpg_clocks_init() 396 cpg->reg = of_iomap(np, 0); rcar_gen2_cpg_clocks_init() 397 if (WARN_ON(cpg->reg == NULL)) rcar_gen2_cpg_clocks_init() 409 clk = rcar_gen2_cpg_register_clock(np, cpg, config, name); rcar_gen2_cpg_clocks_init() 414 cpg->data.clks[i] = clk; rcar_gen2_cpg_clocks_init() 417 of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); rcar_gen2_cpg_clocks_init() 419 CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
|
H A D | clk-mstp.c | 238 CLK_OF_DECLARE(cpg_mstp_clks, "renesas,cpg-mstp-clocks", cpg_mstp_clocks_init);
|
H A D | clk-div6.c | 284 CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init);
|
/linux-4.1.27/drivers/crypto/ |
H A D | mv_cesa.c | 103 static struct crypto_priv *cpg; variable in typeref:struct:crypto_priv 148 int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; mv_completion_timer_callback() 154 del_timer(&cpg->completion_timer); mv_completion_timer_callback() 155 writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD); mv_completion_timer_callback() 156 while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC) mv_completion_timer_callback() 158 cpg->eng_st = ENGINE_W_DEQUEUE; mv_completion_timer_callback() 159 wake_up_process(cpg->queue_th); mv_completion_timer_callback() 164 setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0); mv_setup_timer() 165 mod_timer(&cpg->completion_timer, mv_setup_timer() 245 struct req_progress *p = &cpg->p; setup_data_in() 247 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); setup_data_in() 248 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, setup_data_in() 255 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); mv_process_current_q() 270 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); mv_process_current_q() 275 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, mv_process_current_q() 279 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, mv_process_current_q() 299 op.enc_len = cpg->p.crypt_len; mv_process_current_q() 300 memcpy(cpg->sram + SRAM_CONFIG, &op, mv_process_current_q() 305 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); mv_process_current_q() 310 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); mv_crypto_algo_completion() 313 sg_miter_stop(&cpg->p.src_sg_it); mv_crypto_algo_completion() 314 sg_miter_stop(&cpg->p.dst_sg_it); mv_crypto_algo_completion() 319 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); mv_crypto_algo_completion() 324 struct ahash_request *req = ahash_request_cast(cpg->cur_req); mv_process_hash_current() 327 struct req_progress *p = &cpg->p; mv_process_hash_current() 338 memcpy(cpg->sram + SRAM_HMAC_IV_IN, mv_process_hash_current() 373 writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); mv_process_hash_current() 374 writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); mv_process_hash_current() 375 writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); mv_process_hash_current() 376 writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); mv_process_hash_current() 377 writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); mv_process_hash_current() 381 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); mv_process_hash_current() 385 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); mv_process_hash_current() 428 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); mv_save_digest_state() 429 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); mv_save_digest_state() 430 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); mv_save_digest_state() 431 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); mv_save_digest_state() 432 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); mv_save_digest_state() 437 struct ahash_request *req = ahash_request_cast(cpg->cur_req); mv_hash_algo_completion() 441 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); mv_hash_algo_completion() 442 sg_miter_stop(&cpg->p.src_sg_it); mv_hash_algo_completion() 446 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, mv_hash_algo_completion() 460 struct crypto_async_request *req = cpg->cur_req; dequeue_complete_req() 463 cpg->p.hw_processed_bytes += cpg->p.crypt_len; dequeue_complete_req() 464 if (cpg->p.copy_back) { dequeue_complete_req() 465 int need_copy_len = cpg->p.crypt_len; dequeue_complete_req() 470 if (!cpg->p.sg_dst_left) { dequeue_complete_req() 471 ret = sg_miter_next(&cpg->p.dst_sg_it); dequeue_complete_req() 473 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; dequeue_complete_req() 474 cpg->p.dst_start = 0; dequeue_complete_req() 477 buf = cpg->p.dst_sg_it.addr; dequeue_complete_req() 478 buf += cpg->p.dst_start; dequeue_complete_req() 480 dst_copy = min(need_copy_len, cpg->p.sg_dst_left); dequeue_complete_req() 483 cpg->sram + SRAM_DATA_OUT_START + sram_offset, dequeue_complete_req() 486 cpg->p.sg_dst_left -= dst_copy; dequeue_complete_req() 488 cpg->p.dst_start += dst_copy; dequeue_complete_req() 492 cpg->p.crypt_len = 0; dequeue_complete_req() 494 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); dequeue_complete_req() 495 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { dequeue_complete_req() 497 cpg->eng_st = ENGINE_BUSY; dequeue_complete_req() 498 cpg->p.process(0); dequeue_complete_req() 500 cpg->p.complete(); dequeue_complete_req() 501 cpg->eng_st = ENGINE_IDLE; dequeue_complete_req() 527 struct req_progress *p = &cpg->p; mv_start_new_crypt_req() 530 cpg->cur_req = &req->base; mv_start_new_crypt_req() 548 struct req_progress *p = &cpg->p; mv_start_new_hash_req() 551 cpg->cur_req = &req->base; mv_start_new_hash_req() 572 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, mv_start_new_hash_req() 586 cpg->eng_st = ENGINE_IDLE; mv_start_new_hash_req() 595 cpg->eng_st = ENGINE_IDLE; queue_manag() 602 if (cpg->eng_st == ENGINE_W_DEQUEUE) queue_manag() 605 spin_lock_irq(&cpg->lock); queue_manag() 606 if (cpg->eng_st == ENGINE_IDLE) { queue_manag() 607 backlog = crypto_get_backlog(&cpg->queue); queue_manag() 608 async_req = crypto_dequeue_request(&cpg->queue); queue_manag() 610 BUG_ON(cpg->eng_st != ENGINE_IDLE); queue_manag() 611 cpg->eng_st = ENGINE_BUSY; queue_manag() 614 spin_unlock_irq(&cpg->lock); queue_manag() 646 spin_lock_irqsave(&cpg->lock, flags); mv_handle_req() 647 ret = crypto_enqueue_request(&cpg->queue, req); mv_handle_req() 648 spin_unlock_irqrestore(&cpg->lock, flags); mv_handle_req() 649 wake_up_process(cpg->queue_th); mv_handle_req() 909 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); crypto_int() 913 if (!del_timer(&cpg->completion_timer)) { crypto_int() 918 writel(val, cpg->reg + FPGA_INT_STATUS); crypto_int() 919 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); crypto_int() 920 BUG_ON(cpg->eng_st != ENGINE_BUSY); crypto_int() 921 cpg->eng_st = ENGINE_W_DEQUEUE; crypto_int() 922 wake_up_process(cpg->queue_th); crypto_int() 1029 if (cpg) { mv_probe() 1074 cpg = cp; mv_probe() 1093 writel(0, cpg->reg + SEC_ACCEL_INT_STATUS); mv_probe() 1094 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); mv_probe() 1095 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); mv_probe() 1096 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); mv_probe() 1114 cpg->has_sha1 = 1; mv_probe() 1120 cpg->has_hmac_sha1 = 1; mv_probe() 1143 cpg = NULL; mv_probe() 1169 cpg = NULL; mv_remove()
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | clock.h | 10 /* arch/sh/kernel/cpu/clock-cpg.c */
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
H A D | vvp_page.c | 141 struct ccc_page *cpg = cl2ccc_page(slice); vvp_page_discard() local 148 if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used) vvp_page_discard() 529 struct ccc_page *cpg = cl_object_page_slice(obj, page); vvp_page_init() local 533 cpg->cpg_page = vmpage; vvp_page_init() 536 INIT_LIST_HEAD(&cpg->cpg_pending_linkage); vvp_page_init() 540 cl_page_slice_add(page, &cpg->cpg_cl, obj, vvp_page_init() 546 cl_page_slice_add(page, &cpg->cpg_cl, obj, vvp_page_init()
|
H A D | vvp_dev.c | 404 struct ccc_page *cpg; vvp_pgcache_page_show() local 408 cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type)); vvp_pgcache_page_show() 409 vmpage = cpg->cpg_page; vvp_pgcache_page_show() 412 cpg, page, vvp_pgcache_page_show() 414 cpg->cpg_write_queued ? "wq" : "- ", vvp_pgcache_page_show() 415 cpg->cpg_defer_uptodate ? "du" : "- ", vvp_pgcache_page_show()
|
H A D | rw26.c | 169 struct vvp_page *cpg; ll_set_page_dirty() 175 cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type)); ll_set_page_dirty() 181 vvp_write_pending(obj, cpg); ll_set_page_dirty()
|
/linux-4.1.27/drivers/char/agp/ |
H A D | efficeon-agp.c | 11 * NOTE-cpg-040217:
|
/linux-4.1.27/fs/ocfs2/ |
H A D | journal.h | 570 unsigned int cpg) ocfs2_calc_group_alloc_credits() 569 ocfs2_calc_group_alloc_credits(struct super_block *sb, unsigned int cpg) ocfs2_calc_group_alloc_credits() argument
|
H A D | suballoc.c | 565 * the cpg. So bail out. ocfs2_block_group_grow_discontig() 1499 /* Tail groups in cluster bitmaps which aren't cpg ocfs2_cluster_group_search()
|
/linux-4.1.27/fs/ufs/ |
H A D | super.c | 218 pr_debug(" cpg: %u\n", fs32_to_cpu(sb, usb1->fs_cpg)); ufs_print_super_stuff()
|