root/arch/powerpc/perf/hv-gpci.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kernel_version_show
  2. single_gpci_request
  3. h_gpci_get_value
  4. h_gpci_event_update
  5. h_gpci_event_start
  6. h_gpci_event_stop
  7. h_gpci_event_add
  8. h_gpci_event_init
  9. hv_gpci_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Hypervisor supplied "gpci" ("get performance counter info") performance
   4  * counter support
   5  *
   6  * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
   7  * Copyright 2014 IBM Corporation.
   8  */
   9 
  10 #define pr_fmt(fmt) "hv-gpci: " fmt
  11 
  12 #include <linux/init.h>
  13 #include <linux/perf_event.h>
  14 #include <asm/firmware.h>
  15 #include <asm/hvcall.h>
  16 #include <asm/io.h>
  17 
  18 #include "hv-gpci.h"
  19 #include "hv-common.h"
  20 
  21 /*
  22  * Example usage:
  23  *  perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8,
  24  *                secondary_index=0,starting_index=0xffffffff,request=0x10/' ...
  25  */
  26 
  27 /* u32 */
  28 EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31);
  29 /* u32 */
  30 /*
  31  * Note that starting_index, phys_processor_idx, sibling_part_id,
  32  * hw_chip_id, partition_id all refer to the same bit range. They
  33  * are basically aliases for the starting_index. The specific alias
  34  * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
  35  */
  36 EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63);
  37 EVENT_DEFINE_RANGE_FORMAT_LITE(phys_processor_idx, config, 32, 63);
  38 EVENT_DEFINE_RANGE_FORMAT_LITE(sibling_part_id, config, 32, 63);
  39 EVENT_DEFINE_RANGE_FORMAT_LITE(hw_chip_id, config, 32, 63);
  40 EVENT_DEFINE_RANGE_FORMAT_LITE(partition_id, config, 32, 63);
  41 
  42 /* u16 */
  43 EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15);
  44 /* u8 */
  45 EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23);
  46 /* u8, bytes of data (1-8) */
  47 EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31);
  48 /* u32, byte offset */
  49 EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
  50 
  51 static struct attribute *format_attrs[] = {
  52         &format_attr_request.attr,
  53         &format_attr_starting_index.attr,
  54         &format_attr_phys_processor_idx.attr,
  55         &format_attr_sibling_part_id.attr,
  56         &format_attr_hw_chip_id.attr,
  57         &format_attr_partition_id.attr,
  58         &format_attr_secondary_index.attr,
  59         &format_attr_counter_info_version.attr,
  60 
  61         &format_attr_offset.attr,
  62         &format_attr_length.attr,
  63         NULL,
  64 };
  65 
  66 static struct attribute_group format_group = {
  67         .name = "format",
  68         .attrs = format_attrs,
  69 };
  70 
  71 static struct attribute_group event_group = {
  72         .name  = "events",
  73         .attrs = hv_gpci_event_attrs,
  74 };
  75 
  76 #define HV_CAPS_ATTR(_name, _format)                            \
  77 static ssize_t _name##_show(struct device *dev,                 \
  78                             struct device_attribute *attr,      \
  79                             char *page)                         \
  80 {                                                               \
  81         struct hv_perf_caps caps;                               \
  82         unsigned long hret = hv_perf_caps_get(&caps);           \
  83         if (hret)                                               \
  84                 return -EIO;                                    \
  85                                                                 \
  86         return sprintf(page, _format, caps._name);              \
  87 }                                                               \
  88 static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name)
  89 
  90 static ssize_t kernel_version_show(struct device *dev,
  91                                    struct device_attribute *attr,
  92                                    char *page)
  93 {
  94         return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
  95 }
  96 
  97 static DEVICE_ATTR_RO(kernel_version);
  98 HV_CAPS_ATTR(version, "0x%x\n");
  99 HV_CAPS_ATTR(ga, "%d\n");
 100 HV_CAPS_ATTR(expanded, "%d\n");
 101 HV_CAPS_ATTR(lab, "%d\n");
 102 HV_CAPS_ATTR(collect_privileged, "%d\n");
 103 
 104 static struct attribute *interface_attrs[] = {
 105         &dev_attr_kernel_version.attr,
 106         &hv_caps_attr_version.attr,
 107         &hv_caps_attr_ga.attr,
 108         &hv_caps_attr_expanded.attr,
 109         &hv_caps_attr_lab.attr,
 110         &hv_caps_attr_collect_privileged.attr,
 111         NULL,
 112 };
 113 
 114 static struct attribute_group interface_group = {
 115         .name = "interface",
 116         .attrs = interface_attrs,
 117 };
 118 
 119 static const struct attribute_group *attr_groups[] = {
 120         &format_group,
 121         &event_group,
 122         &interface_group,
 123         NULL,
 124 };
 125 
 126 #define HGPCI_REQ_BUFFER_SIZE   4096
 127 #define HGPCI_MAX_DATA_BYTES \
 128         (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params))
 129 
 130 static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
 131 
 132 struct hv_gpci_request_buffer {
 133         struct hv_get_perf_counter_info_params params;
 134         uint8_t bytes[HGPCI_MAX_DATA_BYTES];
 135 } __packed;
 136 
 137 static unsigned long single_gpci_request(u32 req, u32 starting_index,
 138                 u16 secondary_index, u8 version_in, u32 offset, u8 length,
 139                 u64 *value)
 140 {
 141         unsigned long ret;
 142         size_t i;
 143         u64 count;
 144         struct hv_gpci_request_buffer *arg;
 145 
 146         arg = (void *)get_cpu_var(hv_gpci_reqb);
 147         memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
 148 
 149         arg->params.counter_request = cpu_to_be32(req);
 150         arg->params.starting_index = cpu_to_be32(starting_index);
 151         arg->params.secondary_index = cpu_to_be16(secondary_index);
 152         arg->params.counter_info_version_in = version_in;
 153 
 154         ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
 155                         virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
 156         if (ret) {
 157                 pr_devel("hcall failed: 0x%lx\n", ret);
 158                 goto out;
 159         }
 160 
 161         /*
 162          * we verify offset and length are within the zeroed buffer at event
 163          * init.
 164          */
 165         count = 0;
 166         for (i = offset; i < offset + length; i++)
 167                 count |= arg->bytes[i] << (i - offset);
 168 
 169         *value = count;
 170 out:
 171         put_cpu_var(hv_gpci_reqb);
 172         return ret;
 173 }
 174 
 175 static u64 h_gpci_get_value(struct perf_event *event)
 176 {
 177         u64 count;
 178         unsigned long ret = single_gpci_request(event_get_request(event),
 179                                         event_get_starting_index(event),
 180                                         event_get_secondary_index(event),
 181                                         event_get_counter_info_version(event),
 182                                         event_get_offset(event),
 183                                         event_get_length(event),
 184                                         &count);
 185         if (ret)
 186                 return 0;
 187         return count;
 188 }
 189 
 190 static void h_gpci_event_update(struct perf_event *event)
 191 {
 192         s64 prev;
 193         u64 now = h_gpci_get_value(event);
 194         prev = local64_xchg(&event->hw.prev_count, now);
 195         local64_add(now - prev, &event->count);
 196 }
 197 
 198 static void h_gpci_event_start(struct perf_event *event, int flags)
 199 {
 200         local64_set(&event->hw.prev_count, h_gpci_get_value(event));
 201 }
 202 
 203 static void h_gpci_event_stop(struct perf_event *event, int flags)
 204 {
 205         h_gpci_event_update(event);
 206 }
 207 
 208 static int h_gpci_event_add(struct perf_event *event, int flags)
 209 {
 210         if (flags & PERF_EF_START)
 211                 h_gpci_event_start(event, flags);
 212 
 213         return 0;
 214 }
 215 
 216 static int h_gpci_event_init(struct perf_event *event)
 217 {
 218         u64 count;
 219         u8 length;
 220 
 221         /* Not our event */
 222         if (event->attr.type != event->pmu->type)
 223                 return -ENOENT;
 224 
 225         /* config2 is unused */
 226         if (event->attr.config2) {
 227                 pr_devel("config2 set when reserved\n");
 228                 return -EINVAL;
 229         }
 230 
 231         /* no branch sampling */
 232         if (has_branch_stack(event))
 233                 return -EOPNOTSUPP;
 234 
 235         length = event_get_length(event);
 236         if (length < 1 || length > 8) {
 237                 pr_devel("length invalid\n");
 238                 return -EINVAL;
 239         }
 240 
 241         /* last byte within the buffer? */
 242         if ((event_get_offset(event) + length) > HGPCI_MAX_DATA_BYTES) {
 243                 pr_devel("request outside of buffer: %zu > %zu\n",
 244                                 (size_t)event_get_offset(event) + length,
 245                                 HGPCI_MAX_DATA_BYTES);
 246                 return -EINVAL;
 247         }
 248 
 249         /* check if the request works... */
 250         if (single_gpci_request(event_get_request(event),
 251                                 event_get_starting_index(event),
 252                                 event_get_secondary_index(event),
 253                                 event_get_counter_info_version(event),
 254                                 event_get_offset(event),
 255                                 length,
 256                                 &count)) {
 257                 pr_devel("gpci hcall failed\n");
 258                 return -EINVAL;
 259         }
 260 
 261         return 0;
 262 }
 263 
 264 static struct pmu h_gpci_pmu = {
 265         .task_ctx_nr = perf_invalid_context,
 266 
 267         .name = "hv_gpci",
 268         .attr_groups = attr_groups,
 269         .event_init  = h_gpci_event_init,
 270         .add         = h_gpci_event_add,
 271         .del         = h_gpci_event_stop,
 272         .start       = h_gpci_event_start,
 273         .stop        = h_gpci_event_stop,
 274         .read        = h_gpci_event_update,
 275         .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
 276 };
 277 
 278 static int hv_gpci_init(void)
 279 {
 280         int r;
 281         unsigned long hret;
 282         struct hv_perf_caps caps;
 283 
 284         hv_gpci_assert_offsets_correct();
 285 
 286         if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 287                 pr_debug("not a virtualized system, not enabling\n");
 288                 return -ENODEV;
 289         }
 290 
 291         hret = hv_perf_caps_get(&caps);
 292         if (hret) {
 293                 pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
 294                                 hret);
 295                 return -ENODEV;
 296         }
 297 
 298         /* sampling not supported */
 299         h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 300 
 301         r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
 302         if (r)
 303                 return r;
 304 
 305         return 0;
 306 }
 307 
 308 device_initcall(hv_gpci_init);

/* [<][>][^][v][top][bottom][index][help] */