This source file includes following definitions.
- mlx4_icm_first
- mlx4_icm_last
- mlx4_icm_next
- mlx4_icm_addr
- mlx4_icm_size
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #ifndef MLX4_ICM_H
35 #define MLX4_ICM_H
36
37 #include <linux/list.h>
38 #include <linux/pci.h>
39 #include <linux/mutex.h>
40
41 #define MLX4_ICM_CHUNK_LEN \
42 ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
43 (sizeof(struct scatterlist)))
44
45 enum {
46 MLX4_ICM_PAGE_SHIFT = 12,
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
48 };
49
50 struct mlx4_icm_buf {
51 void *addr;
52 size_t size;
53 dma_addr_t dma_addr;
54 };
55
56 struct mlx4_icm_chunk {
57 struct list_head list;
58 int npages;
59 int nsg;
60 bool coherent;
61 union {
62 struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
63 struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
64 };
65 };
66
67 struct mlx4_icm {
68 struct list_head chunk_list;
69 int refcount;
70 };
71
72 struct mlx4_icm_iter {
73 struct mlx4_icm *icm;
74 struct mlx4_icm_chunk *chunk;
75 int page_idx;
76 };
77
78 struct mlx4_dev;
79
80 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
81 gfp_t gfp_mask, int coherent);
82 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
83
84 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
85 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
86 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
87 u32 start, u32 end);
88 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
89 u32 start, u32 end);
90 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
91 u64 virt, int obj_size, u32 nobj, int reserved,
92 int use_lowmem, int use_coherent);
93 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
94 void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle);
95
96 static inline void mlx4_icm_first(struct mlx4_icm *icm,
97 struct mlx4_icm_iter *iter)
98 {
99 iter->icm = icm;
100 iter->chunk = list_empty(&icm->chunk_list) ?
101 NULL : list_entry(icm->chunk_list.next,
102 struct mlx4_icm_chunk, list);
103 iter->page_idx = 0;
104 }
105
106 static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
107 {
108 return !iter->chunk;
109 }
110
111 static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
112 {
113 if (++iter->page_idx >= iter->chunk->nsg) {
114 if (iter->chunk->list.next == &iter->icm->chunk_list) {
115 iter->chunk = NULL;
116 return;
117 }
118
119 iter->chunk = list_entry(iter->chunk->list.next,
120 struct mlx4_icm_chunk, list);
121 iter->page_idx = 0;
122 }
123 }
124
125 static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
126 {
127 if (iter->chunk->coherent)
128 return iter->chunk->buf[iter->page_idx].dma_addr;
129 else
130 return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
131 }
132
133 static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
134 {
135 if (iter->chunk->coherent)
136 return iter->chunk->buf[iter->page_idx].size;
137 else
138 return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
139 }
140
141 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
142 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
143
144 #endif