era 2645 arch/x86/events/intel/core.c struct er_account *era; era 2658 arch/x86/events/intel/core.c era = &cpuc->shared_regs->regs[idx]; era 2663 arch/x86/events/intel/core.c raw_spin_lock_irqsave(&era->lock, flags); era 2665 arch/x86/events/intel/core.c if (!atomic_read(&era->ref) || era->config == reg->config) { era 2691 arch/x86/events/intel/core.c era->config = reg->config; era 2692 arch/x86/events/intel/core.c era->reg = reg->reg; era 2695 arch/x86/events/intel/core.c atomic_inc(&era->ref); era 2705 arch/x86/events/intel/core.c raw_spin_unlock_irqrestore(&era->lock, flags); era 2709 arch/x86/events/intel/core.c raw_spin_unlock_irqrestore(&era->lock, flags); era 2718 arch/x86/events/intel/core.c struct er_account *era; era 2731 arch/x86/events/intel/core.c era = &cpuc->shared_regs->regs[reg->idx]; era 2734 arch/x86/events/intel/core.c atomic_dec(&era->ref); era 141 drivers/crypto/caam/caamalg.c ctrlpriv->era); era 160 drivers/crypto/caam/caamalg.c ctrlpriv->era); era 243 drivers/crypto/caam/caamalg.c false, ctrlpriv->era); era 265 drivers/crypto/caam/caamalg.c nonce, ctx1_iv_off, false, ctrlpriv->era); era 289 drivers/crypto/caam/caamalg.c ctx1_iv_off, false, ctrlpriv->era); era 584 drivers/crypto/caam/caamalg.c if (ctrlpriv->era >= 6) { era 1250 drivers/crypto/caam/caamalg.c if (ctrlpriv->era < 3) era 3421 drivers/crypto/caam/caamalg.c if (priv->era >= 6 && uses_dkp) era 3544 drivers/crypto/caam/caamalg.c if (priv->era < 10) { era 57 drivers/crypto/caam/caamalg_desc.c unsigned int icvsize, int era) era 66 drivers/crypto/caam/caamalg_desc.c if (era < 6) { era 136 drivers/crypto/caam/caamalg_desc.c unsigned int icvsize, int era) era 145 drivers/crypto/caam/caamalg_desc.c if (era < 6) { era 213 drivers/crypto/caam/caamalg_desc.c const bool is_rfc3686, u32 *nonce, int era) era 233 drivers/crypto/caam/caamalg_desc.c if (era < 6) { era 292 drivers/crypto/caam/caamalg_desc.c int era) era 295 drivers/crypto/caam/caamalg_desc.c init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); era 321 drivers/crypto/caam/caamalg_desc.c if (is_qi || era < 3) { era 386 drivers/crypto/caam/caamalg_desc.c const u32 ctx1_iv_off, const bool is_qi, int era) era 389 drivers/crypto/caam/caamalg_desc.c init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); era 416 drivers/crypto/caam/caamalg_desc.c if (is_qi || era < 3) { era 503 drivers/crypto/caam/caamalg_desc.c const bool is_qi, int era) era 509 drivers/crypto/caam/caamalg_desc.c init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); era 559 drivers/crypto/caam/caamalg_desc.c if (is_qi || era < 3) { era 52 drivers/crypto/caam/caamalg_desc.h unsigned int icvsize, int era); era 55 drivers/crypto/caam/caamalg_desc.h unsigned int icvsize, int era); era 61 drivers/crypto/caam/caamalg_desc.h const bool is_qi, int era); era 67 drivers/crypto/caam/caamalg_desc.h const u32 ctx1_iv_off, const bool is_qi, int era); era 73 drivers/crypto/caam/caamalg_desc.h const bool is_qi, int era); era 138 drivers/crypto/caam/caamalg_qi.c ctx1_iv_off, true, ctrlpriv->era); era 154 drivers/crypto/caam/caamalg_qi.c ctrlpriv->era); era 171 drivers/crypto/caam/caamalg_qi.c ctx1_iv_off, true, ctrlpriv->era); era 209 drivers/crypto/caam/caamalg_qi.c if (ctrlpriv->era >= 6) { era 2434 drivers/crypto/caam/caamalg_qi.c if (priv->era >= 6 && uses_dkp) era 2562 drivers/crypto/caam/caamalg_qi.c if (priv->era < 10) { era 235 drivers/crypto/caam/caamalg_qi2.c priv->sec_attr.era); era 239 drivers/crypto/caam/caamalg_qi2.c ctx1_iv_off, true, priv->sec_attr.era); era 261 drivers/crypto/caam/caamalg_qi2.c priv->sec_attr.era); era 3099 drivers/crypto/caam/caamalg_qi2.c ctx->ctx_len, true, priv->sec_attr.era); era 3111 drivers/crypto/caam/caamalg_qi2.c ctx->ctx_len, false, priv->sec_attr.era); era 3123 drivers/crypto/caam/caamalg_qi2.c ctx->ctx_len, true, priv->sec_attr.era); era 3135 drivers/crypto/caam/caamalg_qi2.c ctx->ctx_len, false, priv->sec_attr.era); era 235 drivers/crypto/caam/caamhash.c ctx->ctx_len, true, ctrlpriv->era); era 246 drivers/crypto/caam/caamhash.c ctx->ctx_len, false, ctrlpriv->era); era 256 drivers/crypto/caam/caamhash.c ctx->ctx_len, true, ctrlpriv->era); era 267 drivers/crypto/caam/caamhash.c ctx->ctx_len, false, ctrlpriv->era); era 470 drivers/crypto/caam/caamhash.c if (ctrlpriv->era >= 6) { era 1850 drivers/crypto/caam/caamhash.c if (priv->era >= 6) { era 1993 drivers/crypto/caam/caamhash.c if (priv->era < 10) { era 29 drivers/crypto/caam/caamhash_desc.c int digestsize, int ctx_len, bool import_ctx, int era) era 43 drivers/crypto/caam/caamhash_desc.c if (era < 6) era 25 drivers/crypto/caam/caamhash_desc.h int digestsize, int ctx_len, bool import_ctx, int era); era 1095 drivers/crypto/caam/caampkc.c if (priv->era < 10) era 322 drivers/crypto/caam/caamrng.c if (priv->era < 10) era 145 drivers/crypto/caam/ctrl.c if (ctrlpriv->era < 10) era 399 drivers/crypto/caam/ctrl.c u8 era; era 416 drivers/crypto/caam/ctrl.c u8 maj_rev, era; era 421 drivers/crypto/caam/ctrl.c era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT; era 422 drivers/crypto/caam/ctrl.c if (era) /* This is '0' prior to CAAM ERA-6 */ era 423 drivers/crypto/caam/ctrl.c return era; era 431 drivers/crypto/caam/ctrl.c return id[i].era; era 726 drivers/crypto/caam/ctrl.c ctrlpriv->era = caam_get_era(ctrl); era 784 drivers/crypto/caam/ctrl.c if (ctrlpriv->era < 10) era 863 drivers/crypto/caam/ctrl.c ctrlpriv->era); era 304 drivers/crypto/caam/dpseci.c attr->era = rsp_params->era; era 230 drivers/crypto/caam/dpseci.h u8 era; era 106 drivers/crypto/caam/dpseci_cmd.h u8 era; era 90 drivers/crypto/caam/intern.h int era; /* CAAM Era (internal HW revision) */ era 655 drivers/md/dm-era-target.c uint32_t era; era 671 drivers/md/dm-era-target.c uint64_t key = d->era; era 732 drivers/md/dm-era-target.c d->era = key; era 1107 drivers/md/dm-era-target.c uint32_t era; era 1131 drivers/md/dm-era-target.c s->era = md->current_era; era 1177 drivers/md/dm-era-target.c static bool block_size_is_power_of_two(struct era *era) era 1179 drivers/md/dm-era-target.c return era->sectors_per_block_shift >= 0; era 1182 drivers/md/dm-era-target.c static dm_block_t get_block(struct era *era, struct bio *bio) era 1186 drivers/md/dm-era-target.c if (!block_size_is_power_of_two(era)) era 1187 drivers/md/dm-era-target.c (void) sector_div(block_nr, era->sectors_per_block); era 1189 drivers/md/dm-era-target.c block_nr >>= era->sectors_per_block_shift; era 1194 drivers/md/dm-era-target.c static void remap_to_origin(struct era *era, struct bio *bio) era 1196 drivers/md/dm-era-target.c bio_set_dev(bio, era->origin_dev->bdev); era 1202 drivers/md/dm-era-target.c static void wake_worker(struct era *era) era 1204 drivers/md/dm-era-target.c if (!atomic_read(&era->suspended)) era 1205 drivers/md/dm-era-target.c queue_work(era->wq, &era->worker); era 1208 drivers/md/dm-era-target.c static void process_old_eras(struct era *era) era 1212 drivers/md/dm-era-target.c if (!era->digest.step) era 1215 drivers/md/dm-era-target.c r = era->digest.step(era->md, &era->digest); era 1218 drivers/md/dm-era-target.c era->digest.step = NULL; era 1220 drivers/md/dm-era-target.c } else if (era->digest.step) era 1221 drivers/md/dm-era-target.c wake_worker(era); era 1224 drivers/md/dm-era-target.c static void process_deferred_bios(struct era *era) era 1235 drivers/md/dm-era-target.c spin_lock(&era->deferred_lock); era 1236 drivers/md/dm-era-target.c bio_list_merge(&deferred_bios, &era->deferred_bios); era 1237 drivers/md/dm-era-target.c bio_list_init(&era->deferred_bios); era 1238 drivers/md/dm-era-target.c spin_unlock(&era->deferred_lock); era 1241 drivers/md/dm-era-target.c r = writeset_test_and_set(&era->md->bitset_info, era 1242 drivers/md/dm-era-target.c era->md->current_writeset, era 1243 drivers/md/dm-era-target.c get_block(era, bio)); era 1258 drivers/md/dm-era-target.c r = metadata_commit(era->md); era 1271 drivers/md/dm-era-target.c static void process_rpc_calls(struct era *era) era 1279 drivers/md/dm-era-target.c spin_lock(&era->rpc_lock); era 1280 drivers/md/dm-era-target.c list_splice_init(&era->rpc_calls, &calls); era 1281 drivers/md/dm-era-target.c spin_unlock(&era->rpc_lock); era 1284 drivers/md/dm-era-target.c rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); era 1289 drivers/md/dm-era-target.c r = metadata_commit(era->md); era 1299 drivers/md/dm-era-target.c static void kick_off_digest(struct era *era) era 1301 drivers/md/dm-era-target.c if (era->md->archived_writesets) { era 1302 drivers/md/dm-era-target.c era->md->archived_writesets = false; era 1303 drivers/md/dm-era-target.c metadata_digest_start(era->md, &era->digest); era 1309 drivers/md/dm-era-target.c struct era *era = container_of(ws, struct era, worker); era 1311 drivers/md/dm-era-target.c kick_off_digest(era); era 1312 drivers/md/dm-era-target.c process_old_eras(era); era 1313 drivers/md/dm-era-target.c process_deferred_bios(era); era 1314 drivers/md/dm-era-target.c process_rpc_calls(era); era 1317 drivers/md/dm-era-target.c static void defer_bio(struct era *era, struct bio *bio) era 1319 drivers/md/dm-era-target.c spin_lock(&era->deferred_lock); era 1320 drivers/md/dm-era-target.c bio_list_add(&era->deferred_bios, bio); era 1321 drivers/md/dm-era-target.c spin_unlock(&era->deferred_lock); era 1323 drivers/md/dm-era-target.c wake_worker(era); era 1329 drivers/md/dm-era-target.c static int perform_rpc(struct era *era, struct rpc *rpc) era 1334 drivers/md/dm-era-target.c spin_lock(&era->rpc_lock); era 1335 drivers/md/dm-era-target.c list_add(&rpc->list, &era->rpc_calls); era 1336 drivers/md/dm-era-target.c spin_unlock(&era->rpc_lock); era 1338 drivers/md/dm-era-target.c wake_worker(era); era 1344 drivers/md/dm-era-target.c static int in_worker0(struct era *era, int (*fn)(struct era_metadata *)) era 1350 drivers/md/dm-era-target.c return perform_rpc(era, &rpc); era 1353 drivers/md/dm-era-target.c static int in_worker1(struct era *era, era 1361 drivers/md/dm-era-target.c return perform_rpc(era, &rpc); era 1364 drivers/md/dm-era-target.c static void start_worker(struct era *era) era 1366 drivers/md/dm-era-target.c atomic_set(&era->suspended, 0); era 1369 drivers/md/dm-era-target.c static void stop_worker(struct era *era) era 1371 drivers/md/dm-era-target.c atomic_set(&era->suspended, 1); era 1372 drivers/md/dm-era-target.c flush_workqueue(era->wq); era 1386 drivers/md/dm-era-target.c struct era *era = container_of(cb, struct era, callbacks); era 1387 drivers/md/dm-era-target.c return dev_is_congested(era->origin_dev, bdi_bits); era 1390 drivers/md/dm-era-target.c static void era_destroy(struct era *era) era 1392 drivers/md/dm-era-target.c if (era->md) era 1393 drivers/md/dm-era-target.c metadata_close(era->md); era 1395 drivers/md/dm-era-target.c if (era->wq) era 1396 drivers/md/dm-era-target.c destroy_workqueue(era->wq); era 1398 drivers/md/dm-era-target.c if (era->origin_dev) era 1399 drivers/md/dm-era-target.c dm_put_device(era->ti, era->origin_dev); era 1401 drivers/md/dm-era-target.c if (era->metadata_dev) era 1402 drivers/md/dm-era-target.c dm_put_device(era->ti, era->metadata_dev); era 1404 drivers/md/dm-era-target.c kfree(era); era 1407 drivers/md/dm-era-target.c static dm_block_t calc_nr_blocks(struct era *era) era 1409 drivers/md/dm-era-target.c return dm_sector_div_up(era->ti->len, era->sectors_per_block); era 1427 drivers/md/dm-era-target.c struct era *era; era 1435 drivers/md/dm-era-target.c era = kzalloc(sizeof(*era), GFP_KERNEL); era 1436 drivers/md/dm-era-target.c if (!era) { era 1441 drivers/md/dm-era-target.c era->ti = ti; era 1443 drivers/md/dm-era-target.c r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); era 1446 drivers/md/dm-era-target.c era_destroy(era); era 1450 drivers/md/dm-era-target.c r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); era 1453 drivers/md/dm-era-target.c era_destroy(era); era 1457 drivers/md/dm-era-target.c r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); era 1460 drivers/md/dm-era-target.c era_destroy(era); era 1464 drivers/md/dm-era-target.c r = dm_set_target_max_io_len(ti, era->sectors_per_block); era 1467 drivers/md/dm-era-target.c era_destroy(era); era 1471 drivers/md/dm-era-target.c if (!valid_block_size(era->sectors_per_block)) { era 1473 drivers/md/dm-era-target.c era_destroy(era); era 1476 drivers/md/dm-era-target.c if (era->sectors_per_block & (era->sectors_per_block - 1)) era 1477 drivers/md/dm-era-target.c era->sectors_per_block_shift = -1; era 1479 drivers/md/dm-era-target.c era->sectors_per_block_shift = __ffs(era->sectors_per_block); era 1481 drivers/md/dm-era-target.c md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); era 1484 drivers/md/dm-era-target.c era_destroy(era); era 1487 drivers/md/dm-era-target.c era->md = md; era 1489 drivers/md/dm-era-target.c era->nr_blocks = calc_nr_blocks(era); era 1491 drivers/md/dm-era-target.c r = metadata_resize(era->md, &era->nr_blocks); era 1494 drivers/md/dm-era-target.c era_destroy(era); era 1498 drivers/md/dm-era-target.c era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); era 1499 drivers/md/dm-era-target.c if (!era->wq) { era 1501 drivers/md/dm-era-target.c era_destroy(era); era 1504 drivers/md/dm-era-target.c INIT_WORK(&era->worker, do_work); era 1506 drivers/md/dm-era-target.c spin_lock_init(&era->deferred_lock); era 1507 drivers/md/dm-era-target.c bio_list_init(&era->deferred_bios); era 1509 drivers/md/dm-era-target.c spin_lock_init(&era->rpc_lock); era 1510 drivers/md/dm-era-target.c INIT_LIST_HEAD(&era->rpc_calls); era 1512 drivers/md/dm-era-target.c ti->private = era; era 1517 drivers/md/dm-era-target.c era->callbacks.congested_fn = era_is_congested; era 1518 drivers/md/dm-era-target.c dm_table_add_target_callbacks(ti->table, &era->callbacks); era 1530 drivers/md/dm-era-target.c struct era *era = ti->private; era 1531 drivers/md/dm-era-target.c dm_block_t block = get_block(era, bio); era 1538 drivers/md/dm-era-target.c remap_to_origin(era, bio); era 1545 drivers/md/dm-era-target.c !metadata_current_marked(era->md, block)) { era 1546 drivers/md/dm-era-target.c defer_bio(era, bio); era 1556 drivers/md/dm-era-target.c struct era *era = ti->private; era 1558 drivers/md/dm-era-target.c r = in_worker0(era, metadata_era_archive); era 1564 drivers/md/dm-era-target.c stop_worker(era); era 1570 drivers/md/dm-era-target.c struct era *era = ti->private; era 1571 drivers/md/dm-era-target.c dm_block_t new_size = calc_nr_blocks(era); era 1573 drivers/md/dm-era-target.c if (era->nr_blocks != new_size) { era 1574 drivers/md/dm-era-target.c r = in_worker1(era, metadata_resize, &new_size); era 1578 drivers/md/dm-era-target.c era->nr_blocks = new_size; era 1581 drivers/md/dm-era-target.c start_worker(era); era 1583 drivers/md/dm-era-target.c r = in_worker0(era, metadata_new_era); era 1602 drivers/md/dm-era-target.c struct era *era = ti->private; era 1609 drivers/md/dm-era-target.c r = in_worker1(era, metadata_get_stats, &stats); era 1617 drivers/md/dm-era-target.c (unsigned) stats.era); era 1626 drivers/md/dm-era-target.c format_dev_t(buf, era->metadata_dev->bdev->bd_dev); era 1628 drivers/md/dm-era-target.c format_dev_t(buf, era->origin_dev->bdev->bd_dev); era 1629 drivers/md/dm-era-target.c DMEMIT("%s %u", buf, era->sectors_per_block); era 1642 drivers/md/dm-era-target.c struct era *era = ti->private; era 1650 drivers/md/dm-era-target.c return in_worker0(era, metadata_checkpoint); era 1653 drivers/md/dm-era-target.c return in_worker0(era, metadata_take_snap); era 1656 drivers/md/dm-era-target.c return in_worker0(era, metadata_drop_snap); era 1670 drivers/md/dm-era-target.c struct era *era = ti->private; era 1671 drivers/md/dm-era-target.c return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); era 1676 drivers/md/dm-era-target.c struct era *era = ti->private; era 1683 drivers/md/dm-era-target.c if (io_opt_sectors < era->sectors_per_block || era 1684 drivers/md/dm-era-target.c do_div(io_opt_sectors, era->sectors_per_block)) { era 1686 drivers/md/dm-era-target.c blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);