1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2009-2012 Cavium, Inc. 7 */ 8 9#include <linux/platform_device.h> 10#include <linux/of_address.h> 11#include <linux/of_mdio.h> 12#include <linux/delay.h> 13#include <linux/module.h> 14#include <linux/gfp.h> 15#include <linux/phy.h> 16#include <linux/io.h> 17 18#ifdef CONFIG_CAVIUM_OCTEON_SOC 19#include <asm/octeon/octeon.h> 20#endif 21 22#define DRV_VERSION "1.1" 23#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver" 24 25#define SMI_CMD 0x0 26#define SMI_WR_DAT 0x8 27#define SMI_RD_DAT 0x10 28#define SMI_CLK 0x18 29#define SMI_EN 0x20 30 31#ifdef __BIG_ENDIAN_BITFIELD 32#define OCT_MDIO_BITFIELD_FIELD(field, more) \ 33 field; \ 34 more 35 36#else 37#define OCT_MDIO_BITFIELD_FIELD(field, more) \ 38 more \ 39 field; 40 41#endif 42 43union cvmx_smix_clk { 44 u64 u64; 45 struct cvmx_smix_clk_s { 46 OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39, 47 OCT_MDIO_BITFIELD_FIELD(u64 mode:1, 48 OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3, 49 OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5, 50 OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1, 51 OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1, 52 OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1, 53 OCT_MDIO_BITFIELD_FIELD(u64 preamble:1, 54 OCT_MDIO_BITFIELD_FIELD(u64 sample:4, 55 OCT_MDIO_BITFIELD_FIELD(u64 phase:8, 56 ;)))))))))) 57 } s; 58}; 59 60union cvmx_smix_cmd { 61 u64 u64; 62 struct cvmx_smix_cmd_s { 63 OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, 64 OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2, 65 OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3, 66 OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5, 67 OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3, 68 OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5, 69 ;)))))) 70 } s; 71}; 72 73union cvmx_smix_en { 74 u64 u64; 75 struct cvmx_smix_en_s { 76 OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63, 77 OCT_MDIO_BITFIELD_FIELD(u64 en:1, 78 ;)) 79 } s; 80}; 81 82union cvmx_smix_rd_dat { 83 u64 u64; 84 struct cvmx_smix_rd_dat_s { 85 OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, 86 OCT_MDIO_BITFIELD_FIELD(u64 pending:1, 87 OCT_MDIO_BITFIELD_FIELD(u64 val:1, 88 OCT_MDIO_BITFIELD_FIELD(u64 dat:16, 89 ;)))) 90 } s; 91}; 92 93union cvmx_smix_wr_dat { 94 u64 u64; 95 struct cvmx_smix_wr_dat_s { 96 OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, 97 OCT_MDIO_BITFIELD_FIELD(u64 pending:1, 98 OCT_MDIO_BITFIELD_FIELD(u64 val:1, 99 OCT_MDIO_BITFIELD_FIELD(u64 dat:16, 100 ;)))) 101 } s; 102}; 103 104enum octeon_mdiobus_mode { 105 UNINIT = 0, 106 C22, 107 C45 108}; 109 110struct octeon_mdiobus { 111 struct mii_bus *mii_bus; 112 u64 register_base; 113 resource_size_t mdio_phys; 114 resource_size_t regsize; 115 enum octeon_mdiobus_mode mode; 116 int phy_irq[PHY_MAX_ADDR]; 117}; 118 119#ifdef CONFIG_CAVIUM_OCTEON_SOC 120static void oct_mdio_writeq(u64 val, u64 addr) 121{ 122 cvmx_write_csr(addr, val); 123} 124 125static u64 oct_mdio_readq(u64 addr) 126{ 127 return cvmx_read_csr(addr); 128} 129#else 130#define oct_mdio_writeq(val, addr) writeq_relaxed(val, (void *)addr) 131#define oct_mdio_readq(addr) readq_relaxed((void *)addr) 132#endif 133 134static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p, 135 enum octeon_mdiobus_mode m) 136{ 137 union cvmx_smix_clk smi_clk; 138 139 if (m == p->mode) 140 return; 141 142 smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK); 143 smi_clk.s.mode = (m == C45) ? 1 : 0; 144 smi_clk.s.preamble = 1; 145 oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK); 146 p->mode = m; 147} 148 149static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p, 150 int phy_id, int regnum) 151{ 152 union cvmx_smix_cmd smi_cmd; 153 union cvmx_smix_wr_dat smi_wr; 154 int timeout = 1000; 155 156 octeon_mdiobus_set_mode(p, C45); 157 158 smi_wr.u64 = 0; 159 smi_wr.s.dat = regnum & 0xffff; 160 oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); 161 162 regnum = (regnum >> 16) & 0x1f; 163 164 smi_cmd.u64 = 0; 165 smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */ 166 smi_cmd.s.phy_adr = phy_id; 167 smi_cmd.s.reg_adr = regnum; 168 oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); 169 170 do { 171 /* Wait 1000 clocks so we don't saturate the RSL bus 172 * doing reads. 173 */ 174 __delay(1000); 175 smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); 176 } while (smi_wr.s.pending && --timeout); 177 178 if (timeout <= 0) 179 return -EIO; 180 return 0; 181} 182 183static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) 184{ 185 struct octeon_mdiobus *p = bus->priv; 186 union cvmx_smix_cmd smi_cmd; 187 union cvmx_smix_rd_dat smi_rd; 188 unsigned int op = 1; /* MDIO_CLAUSE_22_READ */ 189 int timeout = 1000; 190 191 if (regnum & MII_ADDR_C45) { 192 int r = octeon_mdiobus_c45_addr(p, phy_id, regnum); 193 if (r < 0) 194 return r; 195 196 regnum = (regnum >> 16) & 0x1f; 197 op = 3; /* MDIO_CLAUSE_45_READ */ 198 } else { 199 octeon_mdiobus_set_mode(p, C22); 200 } 201 202 203 smi_cmd.u64 = 0; 204 smi_cmd.s.phy_op = op; 205 smi_cmd.s.phy_adr = phy_id; 206 smi_cmd.s.reg_adr = regnum; 207 oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); 208 209 do { 210 /* Wait 1000 clocks so we don't saturate the RSL bus 211 * doing reads. 212 */ 213 __delay(1000); 214 smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT); 215 } while (smi_rd.s.pending && --timeout); 216 217 if (smi_rd.s.val) 218 return smi_rd.s.dat; 219 else 220 return -EIO; 221} 222 223static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, 224 int regnum, u16 val) 225{ 226 struct octeon_mdiobus *p = bus->priv; 227 union cvmx_smix_cmd smi_cmd; 228 union cvmx_smix_wr_dat smi_wr; 229 unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */ 230 int timeout = 1000; 231 232 233 if (regnum & MII_ADDR_C45) { 234 int r = octeon_mdiobus_c45_addr(p, phy_id, regnum); 235 if (r < 0) 236 return r; 237 238 regnum = (regnum >> 16) & 0x1f; 239 op = 1; /* MDIO_CLAUSE_45_WRITE */ 240 } else { 241 octeon_mdiobus_set_mode(p, C22); 242 } 243 244 smi_wr.u64 = 0; 245 smi_wr.s.dat = val; 246 oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); 247 248 smi_cmd.u64 = 0; 249 smi_cmd.s.phy_op = op; 250 smi_cmd.s.phy_adr = phy_id; 251 smi_cmd.s.reg_adr = regnum; 252 oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); 253 254 do { 255 /* Wait 1000 clocks so we don't saturate the RSL bus 256 * doing reads. 257 */ 258 __delay(1000); 259 smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); 260 } while (smi_wr.s.pending && --timeout); 261 262 if (timeout <= 0) 263 return -EIO; 264 265 return 0; 266} 267 268static int octeon_mdiobus_probe(struct platform_device *pdev) 269{ 270 struct octeon_mdiobus *bus; 271 struct resource *res_mem; 272 union cvmx_smix_en smi_en; 273 int err = -ENOENT; 274 275 bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); 276 if (!bus) 277 return -ENOMEM; 278 279 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 280 if (res_mem == NULL) { 281 dev_err(&pdev->dev, "found no memory resource\n"); 282 return -ENXIO; 283 } 284 285 bus->mdio_phys = res_mem->start; 286 bus->regsize = resource_size(res_mem); 287 288 if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize, 289 res_mem->name)) { 290 dev_err(&pdev->dev, "request_mem_region failed\n"); 291 return -ENXIO; 292 } 293 294 bus->register_base = 295 (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize); 296 if (!bus->register_base) { 297 dev_err(&pdev->dev, "dev_ioremap failed\n"); 298 return -ENOMEM; 299 } 300 301 bus->mii_bus = mdiobus_alloc(); 302 if (!bus->mii_bus) 303 goto fail; 304 305 smi_en.u64 = 0; 306 smi_en.s.en = 1; 307 oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); 308 309 bus->mii_bus->priv = bus; 310 bus->mii_bus->irq = bus->phy_irq; 311 bus->mii_bus->name = "mdio-octeon"; 312 snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base); 313 bus->mii_bus->parent = &pdev->dev; 314 315 bus->mii_bus->read = octeon_mdiobus_read; 316 bus->mii_bus->write = octeon_mdiobus_write; 317 318 platform_set_drvdata(pdev, bus); 319 320 err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node); 321 if (err) 322 goto fail_register; 323 324 dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); 325 326 return 0; 327fail_register: 328 mdiobus_free(bus->mii_bus); 329fail: 330 smi_en.u64 = 0; 331 oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); 332 return err; 333} 334 335static int octeon_mdiobus_remove(struct platform_device *pdev) 336{ 337 struct octeon_mdiobus *bus; 338 union cvmx_smix_en smi_en; 339 340 bus = platform_get_drvdata(pdev); 341 342 mdiobus_unregister(bus->mii_bus); 343 mdiobus_free(bus->mii_bus); 344 smi_en.u64 = 0; 345 oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); 346 return 0; 347} 348 349static const struct of_device_id octeon_mdiobus_match[] = { 350 { 351 .compatible = "cavium,octeon-3860-mdio", 352 }, 353 {}, 354}; 355MODULE_DEVICE_TABLE(of, octeon_mdiobus_match); 356 357static struct platform_driver octeon_mdiobus_driver = { 358 .driver = { 359 .name = "mdio-octeon", 360 .of_match_table = octeon_mdiobus_match, 361 }, 362 .probe = octeon_mdiobus_probe, 363 .remove = octeon_mdiobus_remove, 364}; 365 366void octeon_mdiobus_force_mod_depencency(void) 367{ 368 /* Let ethernet drivers force us to be loaded. */ 369} 370EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency); 371 372module_platform_driver(octeon_mdiobus_driver); 373 374MODULE_DESCRIPTION(DRV_DESCRIPTION); 375MODULE_VERSION(DRV_VERSION); 376MODULE_AUTHOR("David Daney"); 377MODULE_LICENSE("GPL"); 378