root/drivers/firmware/qcom_scm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qcom_scm_clk_enable
  2. qcom_scm_clk_disable
  3. qcom_scm_set_cold_boot_addr
  4. qcom_scm_set_warm_boot_addr
  5. qcom_scm_cpu_power_down
  6. qcom_scm_hdcp_available
  7. qcom_scm_hdcp_req
  8. qcom_scm_pas_supported
  9. qcom_scm_pas_init_image
  10. qcom_scm_pas_mem_setup
  11. qcom_scm_pas_auth_and_reset
  12. qcom_scm_pas_shutdown
  13. qcom_scm_pas_reset_assert
  14. qcom_scm_pas_reset_deassert
  15. qcom_scm_restore_sec_cfg
  16. qcom_scm_iommu_secure_ptbl_size
  17. qcom_scm_iommu_secure_ptbl_init
  18. qcom_scm_io_readl
  19. qcom_scm_io_writel
  20. qcom_scm_set_download_mode
  21. qcom_scm_find_dload_address
  22. qcom_scm_is_available
  23. qcom_scm_set_remote_state
  24. qcom_scm_assign_mem
  25. qcom_scm_probe
  26. qcom_scm_shutdown
  27. qcom_scm_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Qualcomm SCM driver
   4  *
   5  * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
   6  * Copyright (C) 2015 Linaro Ltd.
   7  */
   8 #include <linux/platform_device.h>
   9 #include <linux/init.h>
  10 #include <linux/cpumask.h>
  11 #include <linux/export.h>
  12 #include <linux/dma-direct.h>
  13 #include <linux/dma-mapping.h>
  14 #include <linux/module.h>
  15 #include <linux/types.h>
  16 #include <linux/qcom_scm.h>
  17 #include <linux/of.h>
  18 #include <linux/of_address.h>
  19 #include <linux/of_platform.h>
  20 #include <linux/clk.h>
  21 #include <linux/reset-controller.h>
  22 
  23 #include "qcom_scm.h"
  24 
  25 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
  26 module_param(download_mode, bool, 0);
  27 
  28 #define SCM_HAS_CORE_CLK        BIT(0)
  29 #define SCM_HAS_IFACE_CLK       BIT(1)
  30 #define SCM_HAS_BUS_CLK         BIT(2)
  31 
  32 struct qcom_scm {
  33         struct device *dev;
  34         struct clk *core_clk;
  35         struct clk *iface_clk;
  36         struct clk *bus_clk;
  37         struct reset_controller_dev reset;
  38 
  39         u64 dload_mode_addr;
  40 };
  41 
  42 struct qcom_scm_current_perm_info {
  43         __le32 vmid;
  44         __le32 perm;
  45         __le64 ctx;
  46         __le32 ctx_size;
  47         __le32 unused;
  48 };
  49 
  50 struct qcom_scm_mem_map_info {
  51         __le64 mem_addr;
  52         __le64 mem_size;
  53 };
  54 
  55 static struct qcom_scm *__scm;
  56 
  57 static int qcom_scm_clk_enable(void)
  58 {
  59         int ret;
  60 
  61         ret = clk_prepare_enable(__scm->core_clk);
  62         if (ret)
  63                 goto bail;
  64 
  65         ret = clk_prepare_enable(__scm->iface_clk);
  66         if (ret)
  67                 goto disable_core;
  68 
  69         ret = clk_prepare_enable(__scm->bus_clk);
  70         if (ret)
  71                 goto disable_iface;
  72 
  73         return 0;
  74 
  75 disable_iface:
  76         clk_disable_unprepare(__scm->iface_clk);
  77 disable_core:
  78         clk_disable_unprepare(__scm->core_clk);
  79 bail:
  80         return ret;
  81 }
  82 
  83 static void qcom_scm_clk_disable(void)
  84 {
  85         clk_disable_unprepare(__scm->core_clk);
  86         clk_disable_unprepare(__scm->iface_clk);
  87         clk_disable_unprepare(__scm->bus_clk);
  88 }
  89 
  90 /**
  91  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
  92  * @entry: Entry point function for the cpus
  93  * @cpus: The cpumask of cpus that will use the entry point
  94  *
  95  * Set the cold boot address of the cpus. Any cpu outside the supported
  96  * range would be removed from the cpu present mask.
  97  */
  98 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
  99 {
 100         return __qcom_scm_set_cold_boot_addr(entry, cpus);
 101 }
 102 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
 103 
 104 /**
 105  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
 106  * @entry: Entry point function for the cpus
 107  * @cpus: The cpumask of cpus that will use the entry point
 108  *
 109  * Set the Linux entry point for the SCM to transfer control to when coming
 110  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 111  */
 112 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
 113 {
 114         return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
 115 }
 116 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
 117 
 118 /**
 119  * qcom_scm_cpu_power_down() - Power down the cpu
 120  * @flags - Flags to flush cache
 121  *
 122  * This is an end point to power down cpu. If there was a pending interrupt,
 123  * the control would return from this function, otherwise, the cpu jumps to the
 124  * warm boot entry point set for this cpu upon reset.
 125  */
 126 void qcom_scm_cpu_power_down(u32 flags)
 127 {
 128         __qcom_scm_cpu_power_down(flags);
 129 }
 130 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
 131 
 132 /**
 133  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
 134  *
 135  * Return true if HDCP is supported, false if not.
 136  */
 137 bool qcom_scm_hdcp_available(void)
 138 {
 139         int ret = qcom_scm_clk_enable();
 140 
 141         if (ret)
 142                 return ret;
 143 
 144         ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
 145                                                 QCOM_SCM_CMD_HDCP);
 146 
 147         qcom_scm_clk_disable();
 148 
 149         return ret > 0 ? true : false;
 150 }
 151 EXPORT_SYMBOL(qcom_scm_hdcp_available);
 152 
 153 /**
 154  * qcom_scm_hdcp_req() - Send HDCP request.
 155  * @req: HDCP request array
 156  * @req_cnt: HDCP request array count
 157  * @resp: response buffer passed to SCM
 158  *
 159  * Write HDCP register(s) through SCM.
 160  */
 161 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
 162 {
 163         int ret = qcom_scm_clk_enable();
 164 
 165         if (ret)
 166                 return ret;
 167 
 168         ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
 169         qcom_scm_clk_disable();
 170         return ret;
 171 }
 172 EXPORT_SYMBOL(qcom_scm_hdcp_req);
 173 
 174 /**
 175  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
 176  *                            available for the given peripherial
 177  * @peripheral: peripheral id
 178  *
 179  * Returns true if PAS is supported for this peripheral, otherwise false.
 180  */
 181 bool qcom_scm_pas_supported(u32 peripheral)
 182 {
 183         int ret;
 184 
 185         ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
 186                                            QCOM_SCM_PAS_IS_SUPPORTED_CMD);
 187         if (ret <= 0)
 188                 return false;
 189 
 190         return __qcom_scm_pas_supported(__scm->dev, peripheral);
 191 }
 192 EXPORT_SYMBOL(qcom_scm_pas_supported);
 193 
 194 /**
 195  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
 196  *                             state machine for a given peripheral, using the
 197  *                             metadata
 198  * @peripheral: peripheral id
 199  * @metadata:   pointer to memory containing ELF header, program header table
 200  *              and optional blob of data used for authenticating the metadata
 201  *              and the rest of the firmware
 202  * @size:       size of the metadata
 203  *
 204  * Returns 0 on success.
 205  */
 206 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
 207 {
 208         dma_addr_t mdata_phys;
 209         void *mdata_buf;
 210         int ret;
 211 
 212         /*
 213          * During the scm call memory protection will be enabled for the meta
 214          * data blob, so make sure it's physically contiguous, 4K aligned and
 215          * non-cachable to avoid XPU violations.
 216          */
 217         mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
 218                                        GFP_KERNEL);
 219         if (!mdata_buf) {
 220                 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
 221                 return -ENOMEM;
 222         }
 223         memcpy(mdata_buf, metadata, size);
 224 
 225         ret = qcom_scm_clk_enable();
 226         if (ret)
 227                 goto free_metadata;
 228 
 229         ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
 230 
 231         qcom_scm_clk_disable();
 232 
 233 free_metadata:
 234         dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
 235 
 236         return ret;
 237 }
 238 EXPORT_SYMBOL(qcom_scm_pas_init_image);
 239 
 240 /**
 241  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
 242  *                            for firmware loading
 243  * @peripheral: peripheral id
 244  * @addr:       start address of memory area to prepare
 245  * @size:       size of the memory area to prepare
 246  *
 247  * Returns 0 on success.
 248  */
 249 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
 250 {
 251         int ret;
 252 
 253         ret = qcom_scm_clk_enable();
 254         if (ret)
 255                 return ret;
 256 
 257         ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
 258         qcom_scm_clk_disable();
 259 
 260         return ret;
 261 }
 262 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
 263 
 264 /**
 265  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
 266  *                                 and reset the remote processor
 267  * @peripheral: peripheral id
 268  *
 269  * Return 0 on success.
 270  */
 271 int qcom_scm_pas_auth_and_reset(u32 peripheral)
 272 {
 273         int ret;
 274 
 275         ret = qcom_scm_clk_enable();
 276         if (ret)
 277                 return ret;
 278 
 279         ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
 280         qcom_scm_clk_disable();
 281 
 282         return ret;
 283 }
 284 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
 285 
 286 /**
 287  * qcom_scm_pas_shutdown() - Shut down the remote processor
 288  * @peripheral: peripheral id
 289  *
 290  * Returns 0 on success.
 291  */
 292 int qcom_scm_pas_shutdown(u32 peripheral)
 293 {
 294         int ret;
 295 
 296         ret = qcom_scm_clk_enable();
 297         if (ret)
 298                 return ret;
 299 
 300         ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
 301         qcom_scm_clk_disable();
 302 
 303         return ret;
 304 }
 305 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
 306 
 307 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
 308                                      unsigned long idx)
 309 {
 310         if (idx != 0)
 311                 return -EINVAL;
 312 
 313         return __qcom_scm_pas_mss_reset(__scm->dev, 1);
 314 }
 315 
 316 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
 317                                        unsigned long idx)
 318 {
 319         if (idx != 0)
 320                 return -EINVAL;
 321 
 322         return __qcom_scm_pas_mss_reset(__scm->dev, 0);
 323 }
 324 
 325 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 326         .assert = qcom_scm_pas_reset_assert,
 327         .deassert = qcom_scm_pas_reset_deassert,
 328 };
 329 
 330 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
 331 {
 332         return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
 333 }
 334 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
 335 
 336 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
 337 {
 338         return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
 339 }
 340 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
 341 
 342 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
 343 {
 344         return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
 345 }
 346 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
 347 
 348 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
 349 {
 350         return __qcom_scm_io_readl(__scm->dev, addr, val);
 351 }
 352 EXPORT_SYMBOL(qcom_scm_io_readl);
 353 
 354 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
 355 {
 356         return __qcom_scm_io_writel(__scm->dev, addr, val);
 357 }
 358 EXPORT_SYMBOL(qcom_scm_io_writel);
 359 
 360 static void qcom_scm_set_download_mode(bool enable)
 361 {
 362         bool avail;
 363         int ret = 0;
 364 
 365         avail = __qcom_scm_is_call_available(__scm->dev,
 366                                              QCOM_SCM_SVC_BOOT,
 367                                              QCOM_SCM_SET_DLOAD_MODE);
 368         if (avail) {
 369                 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
 370         } else if (__scm->dload_mode_addr) {
 371                 ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
 372                                            enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
 373         } else {
 374                 dev_err(__scm->dev,
 375                         "No available mechanism for setting download mode\n");
 376         }
 377 
 378         if (ret)
 379                 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
 380 }
 381 
 382 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
 383 {
 384         struct device_node *tcsr;
 385         struct device_node *np = dev->of_node;
 386         struct resource res;
 387         u32 offset;
 388         int ret;
 389 
 390         tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
 391         if (!tcsr)
 392                 return 0;
 393 
 394         ret = of_address_to_resource(tcsr, 0, &res);
 395         of_node_put(tcsr);
 396         if (ret)
 397                 return ret;
 398 
 399         ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
 400         if (ret < 0)
 401                 return ret;
 402 
 403         *addr = res.start + offset;
 404 
 405         return 0;
 406 }
 407 
 408 /**
 409  * qcom_scm_is_available() - Checks if SCM is available
 410  */
 411 bool qcom_scm_is_available(void)
 412 {
 413         return !!__scm;
 414 }
 415 EXPORT_SYMBOL(qcom_scm_is_available);
 416 
 417 int qcom_scm_set_remote_state(u32 state, u32 id)
 418 {
 419         return __qcom_scm_set_remote_state(__scm->dev, state, id);
 420 }
 421 EXPORT_SYMBOL(qcom_scm_set_remote_state);
 422 
 423 /**
 424  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
 425  * @mem_addr: mem region whose ownership need to be reassigned
 426  * @mem_sz:   size of the region.
 427  * @srcvm:    vmid for current set of owners, each set bit in
 428  *            flag indicate a unique owner
 429  * @newvm:    array having new owners and corresponding permission
 430  *            flags
 431  * @dest_cnt: number of owners in next set.
 432  *
 433  * Return negative errno on failure or 0 on success with @srcvm updated.
 434  */
 435 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 436                         unsigned int *srcvm,
 437                         const struct qcom_scm_vmperm *newvm,
 438                         unsigned int dest_cnt)
 439 {
 440         struct qcom_scm_current_perm_info *destvm;
 441         struct qcom_scm_mem_map_info *mem_to_map;
 442         phys_addr_t mem_to_map_phys;
 443         phys_addr_t dest_phys;
 444         phys_addr_t ptr_phys;
 445         dma_addr_t ptr_dma;
 446         size_t mem_to_map_sz;
 447         size_t dest_sz;
 448         size_t src_sz;
 449         size_t ptr_sz;
 450         int next_vm;
 451         __le32 *src;
 452         void *ptr;
 453         int ret, i, b;
 454         unsigned long srcvm_bits = *srcvm;
 455 
 456         src_sz = hweight_long(srcvm_bits) * sizeof(*src);
 457         mem_to_map_sz = sizeof(*mem_to_map);
 458         dest_sz = dest_cnt * sizeof(*destvm);
 459         ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
 460                         ALIGN(dest_sz, SZ_64);
 461 
 462         ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
 463         if (!ptr)
 464                 return -ENOMEM;
 465         ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
 466 
 467         /* Fill source vmid detail */
 468         src = ptr;
 469         i = 0;
 470         for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
 471                 src[i++] = cpu_to_le32(b);
 472 
 473         /* Fill details of mem buff to map */
 474         mem_to_map = ptr + ALIGN(src_sz, SZ_64);
 475         mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
 476         mem_to_map->mem_addr = cpu_to_le64(mem_addr);
 477         mem_to_map->mem_size = cpu_to_le64(mem_sz);
 478 
 479         next_vm = 0;
 480         /* Fill details of next vmid detail */
 481         destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
 482         dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
 483         for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
 484                 destvm->vmid = cpu_to_le32(newvm->vmid);
 485                 destvm->perm = cpu_to_le32(newvm->perm);
 486                 destvm->ctx = 0;
 487                 destvm->ctx_size = 0;
 488                 next_vm |= BIT(newvm->vmid);
 489         }
 490 
 491         ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
 492                                     ptr_phys, src_sz, dest_phys, dest_sz);
 493         dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
 494         if (ret) {
 495                 dev_err(__scm->dev,
 496                         "Assign memory protection call failed %d\n", ret);
 497                 return -EINVAL;
 498         }
 499 
 500         *srcvm = next_vm;
 501         return 0;
 502 }
 503 EXPORT_SYMBOL(qcom_scm_assign_mem);
 504 
 505 static int qcom_scm_probe(struct platform_device *pdev)
 506 {
 507         struct qcom_scm *scm;
 508         unsigned long clks;
 509         int ret;
 510 
 511         scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
 512         if (!scm)
 513                 return -ENOMEM;
 514 
 515         ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
 516         if (ret < 0)
 517                 return ret;
 518 
 519         clks = (unsigned long)of_device_get_match_data(&pdev->dev);
 520 
 521         scm->core_clk = devm_clk_get(&pdev->dev, "core");
 522         if (IS_ERR(scm->core_clk)) {
 523                 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
 524                         return PTR_ERR(scm->core_clk);
 525 
 526                 if (clks & SCM_HAS_CORE_CLK) {
 527                         dev_err(&pdev->dev, "failed to acquire core clk\n");
 528                         return PTR_ERR(scm->core_clk);
 529                 }
 530 
 531                 scm->core_clk = NULL;
 532         }
 533 
 534         scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
 535         if (IS_ERR(scm->iface_clk)) {
 536                 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
 537                         return PTR_ERR(scm->iface_clk);
 538 
 539                 if (clks & SCM_HAS_IFACE_CLK) {
 540                         dev_err(&pdev->dev, "failed to acquire iface clk\n");
 541                         return PTR_ERR(scm->iface_clk);
 542                 }
 543 
 544                 scm->iface_clk = NULL;
 545         }
 546 
 547         scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
 548         if (IS_ERR(scm->bus_clk)) {
 549                 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
 550                         return PTR_ERR(scm->bus_clk);
 551 
 552                 if (clks & SCM_HAS_BUS_CLK) {
 553                         dev_err(&pdev->dev, "failed to acquire bus clk\n");
 554                         return PTR_ERR(scm->bus_clk);
 555                 }
 556 
 557                 scm->bus_clk = NULL;
 558         }
 559 
 560         scm->reset.ops = &qcom_scm_pas_reset_ops;
 561         scm->reset.nr_resets = 1;
 562         scm->reset.of_node = pdev->dev.of_node;
 563         ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
 564         if (ret)
 565                 return ret;
 566 
 567         /* vote for max clk rate for highest performance */
 568         ret = clk_set_rate(scm->core_clk, INT_MAX);
 569         if (ret)
 570                 return ret;
 571 
 572         __scm = scm;
 573         __scm->dev = &pdev->dev;
 574 
 575         __qcom_scm_init();
 576 
 577         /*
 578          * If requested enable "download mode", from this point on warmboot
 579          * will cause the the boot stages to enter download mode, unless
 580          * disabled below by a clean shutdown/reboot.
 581          */
 582         if (download_mode)
 583                 qcom_scm_set_download_mode(true);
 584 
 585         return 0;
 586 }
 587 
 588 static void qcom_scm_shutdown(struct platform_device *pdev)
 589 {
 590         /* Clean shutdown, disable download mode to allow normal restart */
 591         if (download_mode)
 592                 qcom_scm_set_download_mode(false);
 593 }
 594 
 595 static const struct of_device_id qcom_scm_dt_match[] = {
 596         { .compatible = "qcom,scm-apq8064",
 597           /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
 598         },
 599         { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
 600                                                              SCM_HAS_IFACE_CLK |
 601                                                              SCM_HAS_BUS_CLK)
 602         },
 603         { .compatible = "qcom,scm-ipq4019" },
 604         { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
 605         { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
 606         { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
 607                                                              SCM_HAS_IFACE_CLK |
 608                                                              SCM_HAS_BUS_CLK)
 609         },
 610         { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
 611                                                              SCM_HAS_IFACE_CLK |
 612                                                              SCM_HAS_BUS_CLK)
 613         },
 614         { .compatible = "qcom,scm-msm8996" },
 615         { .compatible = "qcom,scm" },
 616         {}
 617 };
 618 
 619 static struct platform_driver qcom_scm_driver = {
 620         .driver = {
 621                 .name   = "qcom_scm",
 622                 .of_match_table = qcom_scm_dt_match,
 623         },
 624         .probe = qcom_scm_probe,
 625         .shutdown = qcom_scm_shutdown,
 626 };
 627 
 628 static int __init qcom_scm_init(void)
 629 {
 630         return platform_driver_register(&qcom_scm_driver);
 631 }
 632 subsys_initcall(qcom_scm_init);

/* [<][>][^][v][top][bottom][index][help] */