1/**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 *          Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT.  See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22#include <linux/version.h>
23#include <linux/types.h>
24#include <linux/list.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/kthread.h>
28#include <linux/netdevice.h>
29#include "octeon_config.h"
30#include "liquidio_common.h"
31#include "octeon_droq.h"
32#include "octeon_iq.h"
33#include "response_manager.h"
34#include "octeon_device.h"
35#include "octeon_nic.h"
36#include "octeon_main.h"
37#include "octeon_network.h"
38#include "cn66xx_regs.h"
39#include "cn66xx_device.h"
40#include "cn68xx_regs.h"
41#include "cn68xx_device.h"
42#include "liquidio_image.h"
43#include "octeon_mem_ops.h"
44
45#define MEMOPS_IDX   MAX_BAR1_MAP_INDEX
46
47static inline void
48octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
49			    u32 idx __attribute__((unused)))
50{
51#ifdef __BIG_ENDIAN_BITFIELD
52	u32 mask;
53
54	mask = oct->fn_list.bar1_idx_read(oct, idx);
55	mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
56	oct->fn_list.bar1_idx_write(oct, idx, mask);
57#endif
58}
59
60static void
61octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
62		     u8 *hostbuf, u32 len)
63{
64	while ((len) && ((unsigned long)mapped_addr) & 7) {
65		writeb(*(hostbuf++), mapped_addr++);
66		len--;
67	}
68
69	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
70
71	while (len >= 8) {
72		writeq(*((u64 *)hostbuf), mapped_addr);
73		mapped_addr += 8;
74		hostbuf += 8;
75		len -= 8;
76	}
77
78	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
79
80	while (len--)
81		writeb(*(hostbuf++), mapped_addr++);
82}
83
84static void
85octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
86		    u8 *hostbuf, u32 len)
87{
88	while ((len) && ((unsigned long)mapped_addr) & 7) {
89		*(hostbuf++) = readb(mapped_addr++);
90		len--;
91	}
92
93	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
94
95	while (len >= 8) {
96		*((u64 *)hostbuf) = readq(mapped_addr);
97		mapped_addr += 8;
98		hostbuf += 8;
99		len -= 8;
100	}
101
102	octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
103
104	while (len--)
105		*(hostbuf++) = readb(mapped_addr++);
106}
107
108/* Core mem read/write with temporary bar1 settings. */
109/* op = 1 to read, op = 0 to write. */
110static void
111__octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
112			 u8 *hostbuf, u32 len, u32 op)
113{
114	u32 copy_len = 0, index_reg_val = 0;
115	unsigned long flags;
116	u8 __iomem *mapped_addr;
117
118	spin_lock_irqsave(&oct->mem_access_lock, flags);
119
120	/* Save the original index reg value. */
121	index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
122	do {
123		oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
124		mapped_addr = oct->mmio[1].hw_addr
125		    + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
126
127		/* If operation crosses a 4MB boundary, split the transfer
128		 * at the 4MB
129		 * boundary.
130		 */
131		if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
132			copy_len = (u32)(((addr & ~(0x3fffff)) +
133				   (MEMOPS_IDX << 22)) - addr);
134		} else {
135			copy_len = len;
136		}
137
138		if (op) {	/* read from core */
139			octeon_pci_fastread(oct, mapped_addr, hostbuf,
140					    copy_len);
141		} else {
142			octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
143					     copy_len);
144		}
145
146		len -= copy_len;
147		addr += copy_len;
148		hostbuf += copy_len;
149
150	} while (len);
151
152	oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
153
154	spin_unlock_irqrestore(&oct->mem_access_lock, flags);
155}
156
157void
158octeon_pci_read_core_mem(struct octeon_device *oct,
159			 u64 coreaddr,
160			 u8 *buf,
161			 u32 len)
162{
163	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
164}
165
166void
167octeon_pci_write_core_mem(struct octeon_device *oct,
168			  u64 coreaddr,
169			  u8 *buf,
170			  u32 len)
171{
172	__octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
173}
174
175u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
176{
177	__be64 ret;
178
179	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
180
181	return be64_to_cpu(ret);
182}
183
184u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
185{
186	__be32 ret;
187
188	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
189
190	return be32_to_cpu(ret);
191}
192
193void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
194			       u32 val)
195{
196	__be32 t = cpu_to_be32(val);
197
198	__octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
199}
200