mg 422 arch/mips/include/asm/octeon/cvmx-pci-defs.h uint32_t mg:8; mg 428 arch/mips/include/asm/octeon/cvmx-pci-defs.h uint32_t mg:8; mg 87 arch/mips/include/asm/txx9/tx3927.h endian_def_b4(ml, mg, ip, il); mg 153 drivers/gpio/gpio-msic.c struct msic_gpio *mg = irq_data_get_irq_chip_data(data); mg 154 drivers/gpio/gpio-msic.c u32 gpio = data->irq - mg->irq_base; mg 156 drivers/gpio/gpio-msic.c if (gpio >= mg->chip.ngpio) mg 160 drivers/gpio/gpio-msic.c mg->trig_change_mask |= (1 << gpio); mg 161 drivers/gpio/gpio-msic.c mg->trig_type = type; mg 168 drivers/gpio/gpio-msic.c struct msic_gpio *mg = gpiochip_get_data(chip); mg 169 drivers/gpio/gpio-msic.c return mg->irq_base + offset; mg 174 drivers/gpio/gpio-msic.c struct msic_gpio *mg = irq_data_get_irq_chip_data(data); mg 175 drivers/gpio/gpio-msic.c mutex_lock(&mg->buslock); mg 180 drivers/gpio/gpio-msic.c struct msic_gpio *mg = irq_data_get_irq_chip_data(data); mg 188 drivers/gpio/gpio-msic.c if (mg->trig_change_mask) { mg 189 drivers/gpio/gpio-msic.c offset = __ffs(mg->trig_change_mask); mg 195 drivers/gpio/gpio-msic.c if (mg->trig_type & IRQ_TYPE_EDGE_RISING) mg 197 drivers/gpio/gpio-msic.c if (mg->trig_type & IRQ_TYPE_EDGE_FALLING) mg 201 drivers/gpio/gpio-msic.c mg->trig_change_mask = 0; mg 204 drivers/gpio/gpio-msic.c mutex_unlock(&mg->buslock); mg 224 drivers/gpio/gpio-msic.c struct msic_gpio *mg = irq_data_get_irq_handler_data(data); mg 226 drivers/gpio/gpio-msic.c struct intel_msic *msic = pdev_to_intel_msic(mg->pdev); mg 232 drivers/gpio/gpio-msic.c for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) { mg 237 drivers/gpio/gpio-msic.c generic_handle_irq(mg->irq_base + i * BITS_PER_BYTE + bitnr); mg 246 drivers/gpio/gpio-msic.c struct msic_gpio *mg; mg 261 drivers/gpio/gpio-msic.c mg = kzalloc(sizeof(*mg), GFP_KERNEL); mg 262 drivers/gpio/gpio-msic.c if (!mg) mg 265 drivers/gpio/gpio-msic.c dev_set_drvdata(dev, mg); mg 267 drivers/gpio/gpio-msic.c mg->pdev = pdev; mg 268 drivers/gpio/gpio-msic.c mg->irq = irq; mg 269 drivers/gpio/gpio-msic.c mg->irq_base = pdata->gpio_base + MSIC_GPIO_IRQ_OFFSET; mg 270 drivers/gpio/gpio-msic.c mg->chip.label = "msic_gpio"; mg 271 drivers/gpio/gpio-msic.c mg->chip.direction_input = msic_gpio_direction_input; mg 272 drivers/gpio/gpio-msic.c mg->chip.direction_output = msic_gpio_direction_output; mg 273 drivers/gpio/gpio-msic.c mg->chip.get = msic_gpio_get; mg 274 drivers/gpio/gpio-msic.c mg->chip.set = msic_gpio_set; mg 275 drivers/gpio/gpio-msic.c mg->chip.to_irq = msic_gpio_to_irq; mg 276 drivers/gpio/gpio-msic.c mg->chip.base = pdata->gpio_base; mg 277 drivers/gpio/gpio-msic.c mg->chip.ngpio = MSIC_NUM_GPIO; mg 278 drivers/gpio/gpio-msic.c mg->chip.can_sleep = true; mg 279 drivers/gpio/gpio-msic.c mg->chip.parent = dev; mg 281 drivers/gpio/gpio-msic.c mutex_init(&mg->buslock); mg 283 drivers/gpio/gpio-msic.c retval = gpiochip_add_data(&mg->chip, mg); mg 289 drivers/gpio/gpio-msic.c for (i = 0; i < mg->chip.ngpio; i++) { mg 290 drivers/gpio/gpio-msic.c irq_set_chip_data(i + mg->irq_base, mg); mg 291 drivers/gpio/gpio-msic.c irq_set_chip_and_handler(i + mg->irq_base, mg 295 drivers/gpio/gpio-msic.c irq_set_chained_handler_and_data(mg->irq, msic_gpio_irq_handler, mg); mg 299 drivers/gpio/gpio-msic.c kfree(mg); mg 555 drivers/md/dm-cache-target.c struct dm_cache_migration *mg; mg 557 drivers/md/dm-cache-target.c mg = mempool_alloc(&cache->migration_pool, GFP_NOIO); mg 559 drivers/md/dm-cache-target.c memset(mg, 0, sizeof(*mg)); mg 561 drivers/md/dm-cache-target.c mg->cache = cache; mg 564 drivers/md/dm-cache-target.c return mg; mg 567 drivers/md/dm-cache-target.c static void free_migration(struct dm_cache_migration *mg) mg 569 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 574 drivers/md/dm-cache-target.c mempool_free(mg, &cache->migration_pool); mg 1163 drivers/md/dm-cache-target.c static void quiesce(struct dm_cache_migration *mg, mg 1166 drivers/md/dm-cache-target.c init_continuation(&mg->k, continuation); mg 1167 drivers/md/dm-cache-target.c dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws); mg 1178 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k); mg 1181 drivers/md/dm-cache-target.c mg->k.input = BLK_STS_IOERR; mg 1183 drivers/md/dm-cache-target.c queue_continuation(mg->cache->wq, &mg->k); mg 1186 drivers/md/dm-cache-target.c static void copy(struct dm_cache_migration *mg, bool promote) mg 1189 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1192 drivers/md/dm-cache-target.c o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block; mg 1196 drivers/md/dm-cache-target.c c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block; mg 1200 drivers/md/dm-cache-target.c dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k); mg 1202 drivers/md/dm-cache-target.c dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k); mg 1216 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = bio->bi_private; mg 1217 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1223 drivers/md/dm-cache-target.c mg->k.input = bio->bi_status; mg 1225 drivers/md/dm-cache-target.c queue_continuation(cache->wq, &mg->k); mg 1228 drivers/md/dm-cache-target.c static void overwrite(struct dm_cache_migration *mg, mg 1231 drivers/md/dm-cache-target.c struct bio *bio = mg->overwrite_bio; mg 1234 drivers/md/dm-cache-target.c dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg); mg 1240 drivers/md/dm-cache-target.c if (mg->op->op == POLICY_PROMOTE) mg 1241 drivers/md/dm-cache-target.c remap_to_cache(mg->cache, bio, mg->op->cblock); mg 1243 drivers/md/dm-cache-target.c remap_to_origin(mg->cache, bio); mg 1245 drivers/md/dm-cache-target.c init_continuation(&mg->k, continuation); mg 1246 drivers/md/dm-cache-target.c accounted_request(mg->cache, bio); mg 1260 drivers/md/dm-cache-target.c static void mg_complete(struct dm_cache_migration *mg, bool success) mg 1263 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1264 drivers/md/dm-cache-target.c struct policy_work *op = mg->op; mg 1275 drivers/md/dm-cache-target.c if (mg->overwrite_bio) { mg 1278 drivers/md/dm-cache-target.c else if (mg->k.input) mg 1279 drivers/md/dm-cache-target.c mg->overwrite_bio->bi_status = mg->k.input; mg 1281 drivers/md/dm-cache-target.c mg->overwrite_bio->bi_status = BLK_STS_IOERR; mg 1282 drivers/md/dm-cache-target.c bio_endio(mg->overwrite_bio); mg 1309 drivers/md/dm-cache-target.c if (mg->cell) { mg 1310 drivers/md/dm-cache-target.c if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) mg 1311 drivers/md/dm-cache-target.c free_prison_cell(cache, mg->cell); mg 1314 drivers/md/dm-cache-target.c free_migration(mg); mg 1323 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = ws_to_mg(ws); mg 1324 drivers/md/dm-cache-target.c mg_complete(mg, mg->k.input == 0); mg 1330 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = ws_to_mg(ws); mg 1331 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1332 drivers/md/dm-cache-target.c struct policy_work *op = mg->op; mg 1342 drivers/md/dm-cache-target.c mg_complete(mg, false); mg 1345 drivers/md/dm-cache-target.c mg_complete(mg, true); mg 1355 drivers/md/dm-cache-target.c mg_complete(mg, false); mg 1378 drivers/md/dm-cache-target.c init_continuation(&mg->k, mg_success); mg 1379 drivers/md/dm-cache-target.c continue_after_commit(&cache->committer, &mg->k); mg 1384 drivers/md/dm-cache-target.c mg_complete(mg, true); mg 1391 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = ws_to_mg(ws); mg 1396 drivers/md/dm-cache-target.c if (mg->k.input) mg 1397 drivers/md/dm-cache-target.c mg_complete(mg, false); mg 1405 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = ws_to_mg(ws); mg 1410 drivers/md/dm-cache-target.c if (mg->k.input) mg 1411 drivers/md/dm-cache-target.c mg_complete(mg, false); mg 1417 drivers/md/dm-cache-target.c r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell, mg 1420 drivers/md/dm-cache-target.c mg_complete(mg, false); mg 1423 drivers/md/dm-cache-target.c quiesce(mg, mg_update_metadata); mg 1432 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = ws_to_mg(ws); mg 1433 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1434 drivers/md/dm-cache-target.c struct policy_work *op = mg->op; mg 1443 drivers/md/dm-cache-target.c init_continuation(&mg->k, mg_upgrade_lock); mg 1444 drivers/md/dm-cache-target.c copy(mg, is_policy_promote); mg 1449 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = ws_to_mg(ws); mg 1451 drivers/md/dm-cache-target.c if (mg->overwrite_bio) { mg 1457 drivers/md/dm-cache-target.c if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { mg 1461 drivers/md/dm-cache-target.c bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); mg 1463 drivers/md/dm-cache-target.c mg->overwrite_bio = NULL; mg 1464 drivers/md/dm-cache-target.c inc_io_migrations(mg->cache); mg 1476 drivers/md/dm-cache-target.c overwrite(mg, mg_update_metadata_after_copy); mg 1482 drivers/md/dm-cache-target.c static int mg_lock_writes(struct dm_cache_migration *mg) mg 1486 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1496 drivers/md/dm-cache-target.c build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key); mg 1498 drivers/md/dm-cache-target.c mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL, mg 1499 drivers/md/dm-cache-target.c prealloc, &mg->cell); mg 1502 drivers/md/dm-cache-target.c mg_complete(mg, false); mg 1506 drivers/md/dm-cache-target.c if (mg->cell != prealloc) mg 1510 drivers/md/dm-cache-target.c mg_copy(&mg->k.ws); mg 1512 drivers/md/dm-cache-target.c quiesce(mg, mg_copy); mg 1519 drivers/md/dm-cache-target.c struct dm_cache_migration *mg; mg 1526 drivers/md/dm-cache-target.c mg = alloc_migration(cache); mg 1528 drivers/md/dm-cache-target.c mg->op = op; mg 1529 drivers/md/dm-cache-target.c mg->overwrite_bio = bio; mg 1534 drivers/md/dm-cache-target.c return mg_lock_writes(mg); mg 1541 drivers/md/dm-cache-target.c static void invalidate_complete(struct dm_cache_migration *mg, bool success) mg 1544 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1547 drivers/md/dm-cache-target.c if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) mg 1548 drivers/md/dm-cache-target.c free_prison_cell(cache, mg->cell); mg 1550 drivers/md/dm-cache-target.c if (!success && mg->overwrite_bio) mg 1551 drivers/md/dm-cache-target.c bio_io_error(mg->overwrite_bio); mg 1553 drivers/md/dm-cache-target.c free_migration(mg); mg 1561 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = ws_to_mg(ws); mg 1562 drivers/md/dm-cache-target.c invalidate_complete(mg, !mg->k.input); mg 1591 drivers/md/dm-cache-target.c struct dm_cache_migration *mg = ws_to_mg(ws); mg 1592 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1594 drivers/md/dm-cache-target.c r = invalidate_cblock(cache, mg->invalidate_cblock); mg 1596 drivers/md/dm-cache-target.c invalidate_complete(mg, false); mg 1600 drivers/md/dm-cache-target.c init_continuation(&mg->k, invalidate_completed); mg 1601 drivers/md/dm-cache-target.c continue_after_commit(&cache->committer, &mg->k); mg 1602 drivers/md/dm-cache-target.c remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock); mg 1603 drivers/md/dm-cache-target.c mg->overwrite_bio = NULL; mg 1607 drivers/md/dm-cache-target.c static int invalidate_lock(struct dm_cache_migration *mg) mg 1611 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; mg 1616 drivers/md/dm-cache-target.c build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key); mg 1618 drivers/md/dm-cache-target.c READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell); mg 1621 drivers/md/dm-cache-target.c invalidate_complete(mg, false); mg 1625 drivers/md/dm-cache-target.c if (mg->cell != prealloc) mg 1629 drivers/md/dm-cache-target.c quiesce(mg, invalidate_remove); mg 1636 drivers/md/dm-cache-target.c init_continuation(&mg->k, invalidate_remove); mg 1637 drivers/md/dm-cache-target.c queue_work(cache->wq, &mg->k.ws); mg 1646 drivers/md/dm-cache-target.c struct dm_cache_migration *mg; mg 1651 drivers/md/dm-cache-target.c mg = alloc_migration(cache); mg 1653 drivers/md/dm-cache-target.c mg->overwrite_bio = bio; mg 1654 drivers/md/dm-cache-target.c mg->invalidate_cblock = cblock; mg 1655 drivers/md/dm-cache-target.c mg->invalidate_oblock = oblock; mg 1657 drivers/md/dm-cache-target.c return invalidate_lock(mg); mg 179 drivers/media/usb/usbvision/usbvision.h #define YUV_TO_RGB_BY_THE_BOOK(my, mu, mv, mr, mg, mb) { \ mg 189 drivers/media/usb/usbvision/usbvision.h mg = LIMIT_RGB(mm_g); \ mg 421 net/mac80211/rc80211_minstrel_ht.c struct minstrel_mcs_group_data *mg; mg 430 net/mac80211/rc80211_minstrel_ht.c mg = &mi->groups[index / MCS_GROUP_RATES]; mg 431 net/mac80211/rc80211_minstrel_ht.c mrs = &mg->rates[index % MCS_GROUP_RATES]; mg 445 net/mac80211/rc80211_minstrel_ht.c max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES; mg 446 net/mac80211/rc80211_minstrel_ht.c max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; mg 459 net/mac80211/rc80211_minstrel_ht.c mg->max_group_prob_rate = index; mg 464 net/mac80211/rc80211_minstrel_ht.c mg->max_group_prob_rate = index; mg 509 net/mac80211/rc80211_minstrel_ht.c struct minstrel_mcs_group_data *mg; mg 516 net/mac80211/rc80211_minstrel_ht.c mg = &mi->groups[group]; mg 520 net/mac80211/rc80211_minstrel_ht.c tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES; mg 525 net/mac80211/rc80211_minstrel_ht.c mi->max_prob_rate = mg->max_group_prob_rate; mg 673 net/mac80211/rc80211_minstrel_ht.c struct minstrel_mcs_group_data *mg; mg 722 net/mac80211/rc80211_minstrel_ht.c mg = &mi->groups[group]; mg 738 net/mac80211/rc80211_minstrel_ht.c mrs = &mg->rates[i]; mg 763 net/mac80211/rc80211_minstrel_ht.c memcpy(mg->max_group_tp_rate, tmp_group_tp_rate, mg 764 net/mac80211/rc80211_minstrel_ht.c sizeof(mg->max_group_tp_rate)); mg 816 net/mac80211/rc80211_minstrel_ht.c struct minstrel_mcs_group_data *mg; mg 821 net/mac80211/rc80211_minstrel_ht.c mg = &mi->groups[mi->sample_group]; mg 826 net/mac80211/rc80211_minstrel_ht.c if (++mg->index >= MCS_GROUP_RATES) { mg 827 net/mac80211/rc80211_minstrel_ht.c mg->index = 0; mg 828 net/mac80211/rc80211_minstrel_ht.c if (++mg->column >= ARRAY_SIZE(sample_table)) mg 829 net/mac80211/rc80211_minstrel_ht.c mg->column = 0; mg 1193 net/mac80211/rc80211_minstrel_ht.c struct minstrel_mcs_group_data *mg; mg 1212 net/mac80211/rc80211_minstrel_ht.c mg = &mi->groups[sample_group]; mg 1213 net/mac80211/rc80211_minstrel_ht.c sample_idx = sample_table[mg->column][mg->index]; mg 1219 net/mac80211/rc80211_minstrel_ht.c mrs = &mg->rates[sample_idx]; mg 1257 net/mac80211/rc80211_minstrel_ht.c (minstrel_get_duration(mg->max_group_tp_rate[0]) < sample_dur || mg 34 net/mac80211/rc80211_minstrel_ht_debugfs.c const struct mcs_group *mg; mg 43 net/mac80211/rc80211_minstrel_ht_debugfs.c mg = &minstrel_mcs_groups[i]; mg 44 net/mac80211/rc80211_minstrel_ht_debugfs.c gflags = mg->flags; mg 65 net/mac80211/rc80211_minstrel_ht_debugfs.c p += sprintf(p, "%d ", mg->streams); mg 69 net/mac80211/rc80211_minstrel_ht_debugfs.c p += sprintf(p, "%d ", mg->streams); mg 83 net/mac80211/rc80211_minstrel_ht_debugfs.c p += sprintf(p, " MCS%-2u", (mg->streams - 1) * 8 + j); mg 85 net/mac80211/rc80211_minstrel_ht_debugfs.c p += sprintf(p, " MCS%-1u/%1u", j, mg->streams); mg 95 net/mac80211/rc80211_minstrel_ht_debugfs.c duration = mg->duration[j]; mg 96 net/mac80211/rc80211_minstrel_ht_debugfs.c duration <<= mg->shift; mg 181 net/mac80211/rc80211_minstrel_ht_debugfs.c const struct mcs_group *mg; mg 190 net/mac80211/rc80211_minstrel_ht_debugfs.c mg = &minstrel_mcs_groups[i]; mg 191 net/mac80211/rc80211_minstrel_ht_debugfs.c gflags = mg->flags; mg 212 net/mac80211/rc80211_minstrel_ht_debugfs.c p += sprintf(p, "%d,", mg->streams); mg 216 net/mac80211/rc80211_minstrel_ht_debugfs.c p += sprintf(p, "%d,", mg->streams); mg 230 net/mac80211/rc80211_minstrel_ht_debugfs.c p += sprintf(p, ",MCS%-2u,", (mg->streams - 1) * 8 + j); mg 232 net/mac80211/rc80211_minstrel_ht_debugfs.c p += sprintf(p, ",MCS%-1u/%1u,", j, mg->streams); mg 240 net/mac80211/rc80211_minstrel_ht_debugfs.c duration = mg->duration[j]; mg 241 net/mac80211/rc80211_minstrel_ht_debugfs.c duration <<= mg->shift; mg 29 tools/perf/arch/arm/tests/dwarf-unwind.c map = map_groups__find(thread->mg, (u64)sp); mg 29 tools/perf/arch/arm64/tests/dwarf-unwind.c map = map_groups__find(thread->mg, (u64)sp); mg 30 tools/perf/arch/powerpc/tests/dwarf-unwind.c map = map_groups__find(thread->mg, (u64)sp); mg 30 tools/perf/arch/x86/tests/dwarf-unwind.c map = map_groups__find(thread->mg, (u64)sp); mg 747 tools/perf/builtin-report.c static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp) mg 749 tools/perf/builtin-report.c return maps__fprintf_task(&mg->maps, indent, fp); mg 762 tools/perf/builtin-report.c map_groups__fprintf_task(thread->mg, comm_indent, fp); mg 278 tools/perf/tests/code-reading.c ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine, mg 16 tools/perf/tests/map_groups.c static int check_maps(struct map_def *merged, unsigned int size, struct map_groups *mg) mg 21 tools/perf/tests/map_groups.c map = map_groups__first(mg); mg 39 tools/perf/tests/map_groups.c struct map_groups mg; mg 68 tools/perf/tests/map_groups.c map_groups__init(&mg, NULL); mg 78 tools/perf/tests/map_groups.c map_groups__insert(&mg, map); mg 103 tools/perf/tests/map_groups.c ret = map_groups__merge_in(&mg, map_kcore1); mg 106 tools/perf/tests/map_groups.c ret = check_maps(merged12, ARRAY_SIZE(merged12), &mg); mg 109 tools/perf/tests/map_groups.c ret = map_groups__merge_in(&mg, map_kcore2); mg 112 tools/perf/tests/map_groups.c ret = check_maps(merged12, ARRAY_SIZE(merged12), &mg); mg 115 tools/perf/tests/map_groups.c ret = map_groups__merge_in(&mg, map_kcore3); mg 118 tools/perf/tests/map_groups.c ret = check_maps(merged3, ARRAY_SIZE(merged3), &mg); mg 15 tools/perf/tests/thread-mg-share.c struct map_groups *mg; mg 45 tools/perf/tests/thread-mg-share.c mg = leader->mg; mg 46 tools/perf/tests/thread-mg-share.c TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 4); mg 49 tools/perf/tests/thread-mg-share.c TEST_ASSERT_VAL("map groups don't match", mg == t1->mg); mg 50 tools/perf/tests/thread-mg-share.c TEST_ASSERT_VAL("map groups don't match", mg == t2->mg); mg 51 tools/perf/tests/thread-mg-share.c TEST_ASSERT_VAL("map groups don't match", mg == t3->mg); mg 73 tools/perf/tests/thread-mg-share.c other_mg = other->mg; mg 76 tools/perf/tests/thread-mg-share.c TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg); mg 80 tools/perf/tests/thread-mg-share.c TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 3); mg 83 tools/perf/tests/thread-mg-share.c TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 2); mg 86 tools/perf/tests/thread-mg-share.c TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 1); mg 861 tools/perf/ui/stdio/hist.c map_groups__fprintf(h->thread->mg, fp); mg 460 tools/perf/util/event.c struct map_groups *mg = thread->mg; mg 461 tools/perf/util/event.c struct machine *machine = mg->machine; mg 477 tools/perf/util/event.c mg = &machine->kmaps; mg 483 tools/perf/util/event.c mg = &machine->kmaps; mg 503 tools/perf/util/event.c al->map = map_groups__find(mg, al->addr); mg 526 tools/perf/util/event.c struct machine *machine = thread->mg->machine; mg 410 tools/perf/util/machine.c if (!leader->mg) mg 411 tools/perf/util/machine.c leader->mg = map_groups__new(machine); mg 413 tools/perf/util/machine.c if (!leader->mg) mg 416 tools/perf/util/machine.c if (th->mg == leader->mg) mg 419 tools/perf/util/machine.c if (th->mg) { mg 425 tools/perf/util/machine.c if (!map_groups__empty(th->mg)) mg 428 tools/perf/util/machine.c map_groups__put(th->mg); mg 431 tools/perf/util/machine.c th->mg = map_groups__get(leader->mg); mg 1264 tools/perf/util/machine.c static int map_groups__set_module_path(struct map_groups *mg, const char *path, mg 1268 tools/perf/util/machine.c struct map *map = map_groups__find_by_name(mg, m->name); mg 1292 tools/perf/util/machine.c static int map_groups__set_modules_path_dir(struct map_groups *mg, mg 1325 tools/perf/util/machine.c ret = map_groups__set_modules_path_dir(mg, path, mg 1337 tools/perf/util/machine.c ret = map_groups__set_module_path(mg, path, &m); mg 582 tools/perf/util/map.c void map_groups__init(struct map_groups *mg, struct machine *machine) mg 584 tools/perf/util/map.c maps__init(&mg->maps); mg 585 tools/perf/util/map.c mg->machine = machine; mg 586 tools/perf/util/map.c refcount_set(&mg->refcnt, 1); mg 589 tools/perf/util/map.c void map_groups__insert(struct map_groups *mg, struct map *map) mg 591 tools/perf/util/map.c maps__insert(&mg->maps, map); mg 592 tools/perf/util/map.c map->groups = mg; mg 631 tools/perf/util/map.c void map_groups__exit(struct map_groups *mg) mg 633 tools/perf/util/map.c maps__exit(&mg->maps); mg 636 tools/perf/util/map.c bool map_groups__empty(struct map_groups *mg) mg 638 tools/perf/util/map.c return !maps__first(&mg->maps); mg 643 tools/perf/util/map.c struct map_groups *mg = zalloc(sizeof(*mg)); mg 645 tools/perf/util/map.c if (mg != NULL) mg 646 tools/perf/util/map.c map_groups__init(mg, machine); mg 648 tools/perf/util/map.c return mg; mg 651 tools/perf/util/map.c void map_groups__delete(struct map_groups *mg) mg 653 tools/perf/util/map.c map_groups__exit(mg); mg 654 tools/perf/util/map.c unwind__finish_access(mg); mg 655 tools/perf/util/map.c free(mg); mg 658 tools/perf/util/map.c void map_groups__put(struct map_groups *mg) mg 660 tools/perf/util/map.c if (mg && refcount_dec_and_test(&mg->refcnt)) mg 661 tools/perf/util/map.c map_groups__delete(mg); mg 664 tools/perf/util/map.c struct symbol *map_groups__find_symbol(struct map_groups *mg, mg 667 tools/perf/util/map.c struct map *map = map_groups__find(mg, addr); mg 716 tools/perf/util/map.c struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, mg 720 tools/perf/util/map.c return maps__find_symbol_by_name(&mg->maps, name, mapp); mg 761 tools/perf/util/map.c size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) mg 763 tools/perf/util/map.c return maps__fprintf(&mg->maps, fp); mg 766 tools/perf/util/map.c static void __map_groups__insert(struct map_groups *mg, struct map *map) mg 768 tools/perf/util/map.c __maps__insert(&mg->maps, map); mg 769 tools/perf/util/map.c __maps__insert_name(&mg->maps, map); mg 770 tools/perf/util/map.c map->groups = mg; mg 874 tools/perf/util/map.c int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, mg 877 tools/perf/util/map.c return maps__fixup_overlappings(&mg->maps, map, fp); mg 885 tools/perf/util/map.c struct map_groups *mg = thread->mg; mg 897 tools/perf/util/map.c err = unwind__prepare_access(mg, new, NULL); mg 901 tools/perf/util/map.c map_groups__insert(mg, new); mg 49 tools/perf/util/map_groups.h void map_groups__delete(struct map_groups *mg); mg 50 tools/perf/util/map_groups.h bool map_groups__empty(struct map_groups *mg); mg 52 tools/perf/util/map_groups.h static inline struct map_groups *map_groups__get(struct map_groups *mg) mg 54 tools/perf/util/map_groups.h if (mg) mg 55 tools/perf/util/map_groups.h refcount_inc(&mg->refcnt); mg 56 tools/perf/util/map_groups.h return mg; mg 59 tools/perf/util/map_groups.h void map_groups__put(struct map_groups *mg); mg 60 tools/perf/util/map_groups.h void map_groups__init(struct map_groups *mg, struct machine *machine); mg 61 tools/perf/util/map_groups.h void map_groups__exit(struct map_groups *mg); mg 63 tools/perf/util/map_groups.h size_t map_groups__fprintf(struct map_groups *mg, FILE *fp); mg 65 tools/perf/util/map_groups.h void map_groups__insert(struct map_groups *mg, struct map *map); mg 67 tools/perf/util/map_groups.h static inline void map_groups__remove(struct map_groups *mg, struct map *map) mg 69 tools/perf/util/map_groups.h maps__remove(&mg->maps, map); mg 72 tools/perf/util/map_groups.h static inline struct map *map_groups__find(struct map_groups *mg, u64 addr) mg 74 tools/perf/util/map_groups.h return maps__find(&mg->maps, addr); mg 77 tools/perf/util/map_groups.h struct map *map_groups__first(struct map_groups *mg); mg 84 tools/perf/util/map_groups.h struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp); mg 85 tools/perf/util/map_groups.h struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp); mg 91 tools/perf/util/map_groups.h int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp); mg 93 tools/perf/util/map_groups.h struct map *map_groups__find_by_name(struct map_groups *mg, const char *name); mg 338 tools/perf/util/metricgroup.c char *mg = strdup(g); mg 340 tools/perf/util/metricgroup.c if (!mg) mg 342 tools/perf/util/metricgroup.c omg = mg; mg 343 tools/perf/util/metricgroup.c while ((g = strsep(&mg, ";")) != NULL) { mg 242 tools/perf/util/symbol.c void map_groups__fixup_end(struct map_groups *mg) mg 244 tools/perf/util/symbol.c struct maps *maps = &mg->maps; mg 1056 tools/perf/util/symbol.c struct map *map_groups__first(struct map_groups *mg) mg 1058 tools/perf/util/symbol.c return maps__first(&mg->maps); mg 1787 tools/perf/util/symbol.c struct map *map_groups__find_by_name(struct map_groups *mg, const char *name) mg 1789 tools/perf/util/symbol.c struct maps *maps = &mg->maps; mg 199 tools/perf/util/symbol.h void map_groups__fixup_end(struct map_groups *mg); mg 137 tools/perf/util/thread-stack.c if (thread->mg && thread->mg->machine) { mg 138 tools/perf/util/thread-stack.c struct machine *machine = thread->mg->machine; mg 27 tools/perf/util/thread.c thread->mg = map_groups__new(machine); mg 31 tools/perf/util/thread.c thread->mg = map_groups__get(leader->mg); mg 36 tools/perf/util/thread.c return thread->mg ? 0 : -1; mg 89 tools/perf/util/thread.c if (thread->mg) { mg 90 tools/perf/util/thread.c map_groups__put(thread->mg); mg 91 tools/perf/util/thread.c thread->mg = NULL; mg 254 tools/perf/util/thread.c unwind__flush_access(thread->mg); mg 327 tools/perf/util/thread.c map_groups__fprintf(thread->mg, fp); mg 334 tools/perf/util/thread.c ret = unwind__prepare_access(thread->mg, map, NULL); mg 338 tools/perf/util/thread.c map_groups__fixup_overlappings(thread->mg, map, stderr); mg 339 tools/perf/util/thread.c map_groups__insert(thread->mg, map); mg 348 tools/perf/util/thread.c struct maps *maps = &thread->mg->maps; mg 354 tools/perf/util/thread.c err = unwind__prepare_access(thread->mg, map, &initialized); mg 382 tools/perf/util/thread.c if (thread->mg == parent->mg) { mg 388 tools/perf/util/thread.c return do_maps_clone ? map_groups__clone(thread, parent->mg) : 0; mg 28 tools/perf/util/thread.h struct map_groups *mg; mg 202 tools/perf/util/unwind-libdw.c .machine = thread->mg->machine, mg 618 tools/perf/util/unwind-libunwind-local.c static int _unwind__prepare_access(struct map_groups *mg) mg 620 tools/perf/util/unwind-libunwind-local.c mg->addr_space = unw_create_addr_space(&accessors, 0); mg 621 tools/perf/util/unwind-libunwind-local.c if (!mg->addr_space) { mg 626 tools/perf/util/unwind-libunwind-local.c unw_set_caching_policy(mg->addr_space, UNW_CACHE_GLOBAL); mg 630 tools/perf/util/unwind-libunwind-local.c static void _unwind__flush_access(struct map_groups *mg) mg 632 tools/perf/util/unwind-libunwind-local.c unw_flush_cache(mg->addr_space, 0, 0); mg 635 tools/perf/util/unwind-libunwind-local.c static void _unwind__finish_access(struct map_groups *mg) mg 637 tools/perf/util/unwind-libunwind-local.c unw_destroy_addr_space(mg->addr_space); mg 662 tools/perf/util/unwind-libunwind-local.c addr_space = ui->thread->mg->addr_space; mg 711 tools/perf/util/unwind-libunwind-local.c .machine = thread->mg->machine, mg 15 tools/perf/util/unwind-libunwind.c static void unwind__register_ops(struct map_groups *mg, mg 18 tools/perf/util/unwind-libunwind.c mg->unwind_libunwind_ops = ops; mg 21 tools/perf/util/unwind-libunwind.c int unwind__prepare_access(struct map_groups *mg, struct map *map, mg 32 tools/perf/util/unwind-libunwind.c if (mg->addr_space) { mg 41 tools/perf/util/unwind-libunwind.c if (!mg->machine->env || !mg->machine->env->arch) mg 44 tools/perf/util/unwind-libunwind.c dso_type = dso__type(map->dso, mg->machine); mg 48 tools/perf/util/unwind-libunwind.c arch = perf_env__arch(mg->machine->env); mg 63 tools/perf/util/unwind-libunwind.c unwind__register_ops(mg, ops); mg 65 tools/perf/util/unwind-libunwind.c err = mg->unwind_libunwind_ops->prepare_access(mg); mg 71 tools/perf/util/unwind-libunwind.c void unwind__flush_access(struct map_groups *mg) mg 73 tools/perf/util/unwind-libunwind.c if (mg->unwind_libunwind_ops) mg 74 tools/perf/util/unwind-libunwind.c mg->unwind_libunwind_ops->flush_access(mg); mg 77 tools/perf/util/unwind-libunwind.c void unwind__finish_access(struct map_groups *mg) mg 79 tools/perf/util/unwind-libunwind.c if (mg->unwind_libunwind_ops) mg 80 tools/perf/util/unwind-libunwind.c mg->unwind_libunwind_ops->finish_access(mg); mg 87 tools/perf/util/unwind-libunwind.c if (thread->mg->unwind_libunwind_ops) mg 88 tools/perf/util/unwind-libunwind.c return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack); mg 23 tools/perf/util/unwind.h int (*prepare_access)(struct map_groups *mg); mg 24 tools/perf/util/unwind.h void (*flush_access)(struct map_groups *mg); mg 25 tools/perf/util/unwind.h void (*finish_access)(struct map_groups *mg); mg 50 tools/perf/util/unwind.h int unwind__prepare_access(struct map_groups *mg, struct map *map, mg 52 tools/perf/util/unwind.h void unwind__flush_access(struct map_groups *mg); mg 53 tools/perf/util/unwind.h void unwind__finish_access(struct map_groups *mg); mg 55 tools/perf/util/unwind.h static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused, mg 62 tools/perf/util/unwind.h static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {} mg 63 tools/perf/util/unwind.h static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {} mg 76 tools/perf/util/unwind.h static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused, mg 83 tools/perf/util/unwind.h static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {} mg 84 tools/perf/util/unwind.h static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {} mg 145 tools/perf/util/vdso.c struct map *map = map_groups__first(thread->mg);