Searched refs:ip (Results 1 - 200 of 1347) sorted by relevance

1234567

/linux-4.4.14/sound/soc/sti/
H A Duniperif.h19 #define GET_UNIPERIF_REG(ip, offset, shift, mask) \
20 ((readl_relaxed(ip->base + offset) >> shift) & mask)
21 #define SET_UNIPERIF_REG(ip, offset, shift, mask, value) \
22 writel_relaxed(((readl_relaxed(ip->base + offset) & \
23 ~(mask << shift)) | (((value) & mask) << shift)), ip->base + offset)
24 #define SET_UNIPERIF_BIT_REG(ip, offset, shift, mask, value) \
25 writel_relaxed((((value) & mask) << shift), ip->base + offset)
31 #define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000
32 #define GET_UNIPERIF_SOFT_RST(ip) \
33 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
34 readl_relaxed(ip->base + UNIPERIF_SOFT_RST_OFFSET(ip)) : 0)
35 #define SET_UNIPERIF_SOFT_RST(ip, value) \
36 writel_relaxed(value, ip->base + UNIPERIF_SOFT_RST_OFFSET(ip))
39 #define UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip) 0x0
40 #define UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip) 0x1
41 #define SET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
42 SET_UNIPERIF_BIT_REG(ip, \
43 UNIPERIF_SOFT_RST_OFFSET(ip), \
44 UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
45 UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip), 1)
46 #define GET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
47 GET_UNIPERIF_REG(ip, \
48 UNIPERIF_SOFT_RST_OFFSET(ip), \
49 UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
50 UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip))
56 #define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004
57 #define SET_UNIPERIF_DATA(ip, value) \
58 writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip))
64 #define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
65 #define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
66 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
67 #define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
68 writel_relaxed(value, ip->base + \
69 UNIPERIF_CHANNEL_STA_REGN(ip, n))
71 #define UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip) 0x0060
72 #define GET_UNIPERIF_CHANNEL_STA_REG0(ip) \
73 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
74 #define SET_UNIPERIF_CHANNEL_STA_REG0(ip, value) \
75 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
77 #define UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip) 0x0064
78 #define GET_UNIPERIF_CHANNEL_STA_REG1(ip) \
79 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
80 #define SET_UNIPERIF_CHANNEL_STA_REG1(ip, value) \
81 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
83 #define UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip) 0x0068
84 #define GET_UNIPERIF_CHANNEL_STA_REG2(ip) \
85 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
86 #define SET_UNIPERIF_CHANNEL_STA_REG2(ip, value) \
87 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
89 #define UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip) 0x006C
90 #define GET_UNIPERIF_CHANNEL_STA_REG3(ip) \
91 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
92 #define SET_UNIPERIF_CHANNEL_STA_REG3(ip, value) \
93 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
95 #define UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip) 0x0070
96 #define GET_UNIPERIF_CHANNEL_STA_REG4(ip) \
97 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
98 #define SET_UNIPERIF_CHANNEL_STA_REG4(ip, value) \
99 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
101 #define UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip) 0x0074
102 #define GET_UNIPERIF_CHANNEL_STA_REG5(ip) \
103 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
104 #define SET_UNIPERIF_CHANNEL_STA_REG5(ip, value) \
105 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
111 #define UNIPERIF_ITS_OFFSET(ip) 0x000C
112 #define GET_UNIPERIF_ITS(ip) \
113 readl_relaxed(ip->base + UNIPERIF_ITS_OFFSET(ip))
116 #define UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip) 5
117 #define UNIPERIF_ITS_MEM_BLK_READ_MASK(ip) \
118 (BIT(UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip)))
121 #define UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip) \
122 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
123 #define UNIPERIF_ITS_FIFO_ERROR_MASK(ip) \
124 (BIT(UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip)))
127 #define UNIPERIF_ITS_DMA_ERROR_SHIFT(ip) 9
128 #define UNIPERIF_ITS_DMA_ERROR_MASK(ip) \
129 (BIT(UNIPERIF_ITS_DMA_ERROR_SHIFT(ip)))
132 #define UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip) \
133 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
134 #define UNIPERIF_ITS_UNDERFLOW_REC_DONE_MASK(ip) \
135 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
136 0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip))))
139 #define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip) \
140 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
141 #define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_MASK(ip) \
142 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
143 0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip))))
150 #define UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip) \
151 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
152 #define UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip) \
153 (BIT(UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip)))
154 #define SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(ip) \
155 SET_UNIPERIF_ITS_BCLR(ip, \
156 UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip))
158 #define UNIPERIF_ITS_BCLR_OFFSET(ip) 0x0010
159 #define SET_UNIPERIF_ITS_BCLR(ip, value) \
160 writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip))
166 #define UNIPERIF_ITM_OFFSET(ip) 0x0018
167 #define GET_UNIPERIF_ITM(ip) \
168 readl_relaxed(ip->base + UNIPERIF_ITM_OFFSET(ip))
171 #define UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip) \
172 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
173 #define UNIPERIF_ITM_FIFO_ERROR_MASK(ip) \
174 (BIT(UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip)))
177 #define UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip) \
178 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
179 #define UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(ip) \
180 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
181 0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip))))
184 #define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip) \
185 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
186 #define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(ip) \
187 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
188 0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip))))
194 #define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c
195 #define SET_UNIPERIF_ITM_BCLR(ip, value) \
196 writel_relaxed(value, ip->base + UNIPERIF_ITM_BCLR_OFFSET(ip))
199 #define UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip) \
200 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
201 #define UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip) \
202 (BIT(UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip)))
203 #define SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(ip) \
204 SET_UNIPERIF_ITM_BCLR(ip, \
205 UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip))
208 #define UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip) 9
209 #define UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip) \
210 (BIT(UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip)))
211 #define SET_UNIPERIF_ITM_BCLR_DMA_ERROR(ip) \
212 SET_UNIPERIF_ITM_BCLR(ip, \
213 UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip))
219 #define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020
220 #define SET_UNIPERIF_ITM_BSET(ip, value) \
221 writel_relaxed(value, ip->base + UNIPERIF_ITM_BSET_OFFSET(ip))
224 #define UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip) \
225 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
226 #define UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip) \
227 (BIT(UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip)))
228 #define SET_UNIPERIF_ITM_BSET_FIFO_ERROR(ip) \
229 SET_UNIPERIF_ITM_BSET(ip, \
230 UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip))
233 #define UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip) 5
234 #define UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip) \
235 (BIT(UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip)))
236 #define SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(ip) \
237 SET_UNIPERIF_ITM_BSET(ip, \
238 UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip))
241 #define UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip) 9
242 #define UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip) \
243 (BIT(UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip)))
244 #define SET_UNIPERIF_ITM_BSET_DMA_ERROR(ip) \
245 SET_UNIPERIF_ITM_BSET(ip, \
246 UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip))
249 #define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip) \
250 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
251 #define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip) \
252 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
253 0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip))))
254 #define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(ip) \
255 SET_UNIPERIF_ITM_BSET(ip, \
256 UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip))
259 #define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip) \
260 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
261 #define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip) \
262 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
263 0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip))))
264 #define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(ip) \
265 SET_UNIPERIF_ITM_BSET(ip, \
266 UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip))
272 #define UNIPERIF_CONFIG_OFFSET(ip) 0x0040
273 #define GET_UNIPERIF_CONFIG(ip) \
274 readl_relaxed(ip->base + UNIPERIF_CONFIG_OFFSET(ip))
275 #define SET_UNIPERIF_CONFIG(ip, value) \
276 writel_relaxed(value, ip->base + UNIPERIF_CONFIG_OFFSET(ip))
279 #define UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip) 0
280 #define UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip) 0x1
281 #define GET_UNIPERIF_CONFIG_PARITY_CNTR(ip) \
282 GET_UNIPERIF_REG(ip, \
283 UNIPERIF_CONFIG_OFFSET(ip), \
284 UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
285 UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip))
286 #define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(ip) \
287 SET_UNIPERIF_REG(ip, \
288 UNIPERIF_CONFIG_OFFSET(ip), \
289 UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
290 UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 0)
291 #define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_SW(ip) \
292 SET_UNIPERIF_REG(ip, \
293 UNIPERIF_CONFIG_OFFSET(ip), \
294 UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
295 UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 1)
298 #define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip) 1
299 #define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip) 0x1
300 #define GET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR(ip) \
301 GET_UNIPERIF_REG(ip, \
302 UNIPERIF_CONFIG_OFFSET(ip), \
303 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
304 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip))
305 #define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_SW(ip) \
306 SET_UNIPERIF_REG(ip, \
307 UNIPERIF_CONFIG_OFFSET(ip), \
308 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
309 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 0)
310 #define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(ip) \
311 SET_UNIPERIF_REG(ip, \
312 UNIPERIF_CONFIG_OFFSET(ip), \
313 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
314 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 1)
317 #define UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip) 2
318 #define UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip) 0x1
319 #define GET_UNIPERIF_CONFIG_USER_DAT_CNTR(ip) \
320 GET_UNIPERIF_REG(ip, \
321 UNIPERIF_CONFIG_OFFSET(ip), \
322 UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
323 UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip))
324 #define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(ip) \
325 SET_UNIPERIF_REG(ip, \
326 UNIPERIF_CONFIG_OFFSET(ip), \
327 UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
328 UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 1)
329 #define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_SW(ip) \
330 SET_UNIPERIF_REG(ip, \
331 UNIPERIF_CONFIG_OFFSET(ip), \
332 UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
333 UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 0)
336 #define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip) 3
337 #define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip) 0x1
338 #define GET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR(ip) \
339 GET_UNIPERIF_REG(ip, \
340 UNIPERIF_CONFIG_OFFSET(ip), \
341 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
342 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip))
343 #define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_SW(ip) \
344 SET_UNIPERIF_REG(ip, \
345 UNIPERIF_CONFIG_OFFSET(ip), \
346 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
347 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 0)
348 #define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(ip) \
349 SET_UNIPERIF_REG(ip, \
350 UNIPERIF_CONFIG_OFFSET(ip), \
351 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
352 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 1)
355 #define UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip) 4
356 #define UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip) 0x1
357 #define GET_UNIPERIF_CONFIG_ONE_BIT_AUD(ip) \
358 GET_UNIPERIF_REG(ip, \
359 UNIPERIF_CONFIG_OFFSET(ip), \
360 UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
361 UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip))
362 #define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(ip) \
363 SET_UNIPERIF_REG(ip, \
364 UNIPERIF_CONFIG_OFFSET(ip), \
365 UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
366 UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 0)
367 #define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_ENABLE(ip) \
368 SET_UNIPERIF_REG(ip, \
369 UNIPERIF_CONFIG_OFFSET(ip), \
370 UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
371 UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 1)
374 #define UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip) 5
375 #define UNIPERIF_CONFIG_MEM_FMT_MASK(ip) 0x1
376 #define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip) 0
377 #define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) 1
378 #define GET_UNIPERIF_CONFIG_MEM_FMT(ip) \
379 GET_UNIPERIF_REG(ip, \
380 UNIPERIF_CONFIG_OFFSET(ip), \
381 UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
382 UNIPERIF_CONFIG_MEM_FMT_MASK(ip))
383 #define SET_UNIPERIF_CONFIG_MEM_FMT(ip, value) \
384 SET_UNIPERIF_REG(ip, \
385 UNIPERIF_CONFIG_OFFSET(ip), \
386 UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
387 UNIPERIF_CONFIG_MEM_FMT_MASK(ip), value)
388 #define SET_UNIPERIF_CONFIG_MEM_FMT_16_0(ip) \
389 SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
390 VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip))
391 #define SET_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) \
392 SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
393 VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip))
396 #define UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip) 6
397 #define UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip) 0x1
398 #define GET_UNIPERIF_CONFIG_REPEAT_CHL_STS(ip) \
399 GET_UNIPERIF_REG(ip, \
400 UNIPERIF_CONFIG_OFFSET(ip), \
401 UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
402 UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip))
403 #define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(ip) \
404 SET_UNIPERIF_REG(ip, \
405 UNIPERIF_CONFIG_OFFSET(ip), \
406 UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
407 UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 0)
408 #define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_DISABLE(ip) \
409 SET_UNIPERIF_REG(ip, \
410 UNIPERIF_CONFIG_OFFSET(ip), \
411 UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
412 UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 1)
415 #define UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip) \
416 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 7 : -1)
417 #define UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip) 0x1
418 #define GET_UNIPERIF_CONFIG_BACK_STALL_REQ(ip) \
419 GET_UNIPERIF_REG(ip, \
420 UNIPERIF_CONFIG_OFFSET(ip), \
421 UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
422 UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip))
423 #define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(ip) \
424 SET_UNIPERIF_REG(ip, \
425 UNIPERIF_CONFIG_OFFSET(ip), \
426 UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
427 UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 0)
428 #define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_ENABLE(ip) \
429 SET_UNIPERIF_REG(ip, \
430 UNIPERIF_CONFIG_OFFSET(ip), \
431 UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
432 UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 1)
435 #define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip) 8
436 #define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip) 0x7F
437 #define GET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip) \
438 GET_UNIPERIF_REG(ip, \
439 UNIPERIF_CONFIG_OFFSET(ip), \
440 UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
441 UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip))
442 #define SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip, value) \
443 SET_UNIPERIF_REG(ip, \
444 UNIPERIF_CONFIG_OFFSET(ip), \
445 UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
446 UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip), value)
449 #define UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip) \
450 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
451 #define UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip) 0x1
452 #define GET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
453 GET_UNIPERIF_REG(ip, \
454 UNIPERIF_CONFIG_OFFSET(ip), \
455 UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
456 UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip))
457 #define SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
458 SET_UNIPERIF_REG(ip, \
459 UNIPERIF_CONFIG_OFFSET(ip), \
460 UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
461 UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip), 1)
464 #define UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip) 18
465 #define UNIPERIF_CONFIG_IDLE_MOD_MASK(ip) 0x1
466 #define GET_UNIPERIF_CONFIG_IDLE_MOD(ip) \
467 GET_UNIPERIF_REG(ip, \
468 UNIPERIF_CONFIG_OFFSET(ip), \
469 UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
470 UNIPERIF_CONFIG_IDLE_MOD_MASK(ip))
471 #define SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(ip) \
472 SET_UNIPERIF_REG(ip, \
473 UNIPERIF_CONFIG_OFFSET(ip), \
474 UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
475 UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 0)
476 #define SET_UNIPERIF_CONFIG_IDLE_MOD_ENABLE(ip) \
477 SET_UNIPERIF_REG(ip, \
478 UNIPERIF_CONFIG_OFFSET(ip), \
479 UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
480 UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 1)
483 #define UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip) 19
484 #define UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip) 0x1
485 #define GET_UNIPERIF_CONFIG_SUBFRAME_SEL(ip) \
486 GET_UNIPERIF_REG(ip, \
487 UNIPERIF_CONFIG_OFFSET(ip), \
488 UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
489 UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip))
490 #define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(ip) \
491 SET_UNIPERIF_REG(ip, \
492 UNIPERIF_CONFIG_OFFSET(ip), \
493 UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
494 UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 1)
495 #define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF0_SUBF1(ip) \
496 SET_UNIPERIF_REG(ip, \
497 UNIPERIF_CONFIG_OFFSET(ip), \
498 UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
499 UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 0)
502 #define UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip) 20
503 #define UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip) 0x1
504 #define GET_UNIPERIF_CONFIG_SPDIF_SW_CTRL(ip) \
505 GET_UNIPERIF_REG(ip, \
506 UNIPERIF_CONFIG_OFFSET(ip), \
507 UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
508 UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip))
509 #define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_ENABLE(ip) \
510 SET_UNIPERIF_REG(ip, \
511 UNIPERIF_CONFIG_OFFSET(ip), \
512 UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
513 UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 1)
514 #define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(ip) \
515 SET_UNIPERIF_REG(ip, \
516 UNIPERIF_CONFIG_OFFSET(ip), \
517 UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
518 UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 0)
521 #define UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip) \
522 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 24 : -1)
523 #define UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip) 0x1
524 #define GET_UNIPERIF_CONFIG_MSTR_CLKEDGE(ip) \
525 GET_UNIPERIF_REG(ip, \
526 UNIPERIF_CONFIG_OFFSET(ip), \
527 UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
528 UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip))
529 #define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_FALLING(ip) \
530 SET_UNIPERIF_REG(ip, \
531 UNIPERIF_CONFIG_OFFSET(ip), \
532 UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
533 UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 1)
534 #define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_RISING(ip) \
535 SET_UNIPERIF_REG(ip, \
536 UNIPERIF_CONFIG_OFFSET(ip), \
537 UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
538 UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 0)
544 #define UNIPERIF_CTRL_OFFSET(ip) 0x0044
545 #define GET_UNIPERIF_CTRL(ip) \
546 readl_relaxed(ip->base + UNIPERIF_CTRL_OFFSET(ip))
547 #define SET_UNIPERIF_CTRL(ip, value) \
548 writel_relaxed(value, ip->base + UNIPERIF_CTRL_OFFSET(ip))
551 #define UNIPERIF_CTRL_OPERATION_SHIFT(ip) 0
552 #define UNIPERIF_CTRL_OPERATION_MASK(ip) 0x7
553 #define GET_UNIPERIF_CTRL_OPERATION(ip) \
554 GET_UNIPERIF_REG(ip, \
555 UNIPERIF_CTRL_OFFSET(ip), \
556 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
557 UNIPERIF_CTRL_OPERATION_MASK(ip))
558 #define VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip) 0
559 #define SET_UNIPERIF_CTRL_OPERATION_OFF(ip) \
560 SET_UNIPERIF_REG(ip, \
561 UNIPERIF_CTRL_OFFSET(ip), \
562 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
563 UNIPERIF_CTRL_OPERATION_MASK(ip), \
564 VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip))
565 #define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
566 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 1 : -1)
567 #define SET_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
568 SET_UNIPERIF_REG(ip, \
569 UNIPERIF_CTRL_OFFSET(ip), \
570 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
571 UNIPERIF_CTRL_OPERATION_MASK(ip), \
572 VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip))
573 #define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
574 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 2 : -1)
575 #define SET_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
576 SET_UNIPERIF_REG(ip, \
577 UNIPERIF_CTRL_OFFSET(ip), \
578 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
579 UNIPERIF_CTRL_OPERATION_MASK(ip), \
580 VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip))
581 #define VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) 3
582 #define SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) \
583 SET_UNIPERIF_REG(ip, \
584 UNIPERIF_CTRL_OFFSET(ip), \
585 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
586 UNIPERIF_CTRL_OPERATION_MASK(ip), \
587 VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip))
589 #define VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) 3
590 #define SET_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) \
591 SET_UNIPERIF_REG(ip, \
592 UNIPERIF_CTRL_OFFSET(ip), \
593 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
594 UNIPERIF_CTRL_OPERATION_MASK(ip), \
595 VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip))
596 #define VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) 4
597 #define SET_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) \
598 SET_UNIPERIF_REG(ip, \
599 UNIPERIF_CTRL_OFFSET(ip), \
600 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
601 UNIPERIF_CTRL_OPERATION_MASK(ip), \
602 VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip))
603 #define VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
604 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 5 : -1)
605 #define SET_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
606 SET_UNIPERIF_REG(ip, \
607 UNIPERIF_CTRL_OFFSET(ip), \
608 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
609 UNIPERIF_CTRL_OPERATION_MASK(ip), \
610 VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip))
611 #define VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
612 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 7)
613 #define SET_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
614 SET_UNIPERIF_REG(ip, \
615 UNIPERIF_CTRL_OFFSET(ip), \
616 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
617 UNIPERIF_CTRL_OPERATION_MASK(ip), \
618 VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip))
621 #define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip) \
622 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 3)
623 #define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip) 0x1
624 #define GET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK(ip) \
625 GET_UNIPERIF_REG(ip, \
626 UNIPERIF_CTRL_OFFSET(ip), \
627 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
628 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip))
629 #define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(ip) \
630 SET_UNIPERIF_REG(ip, \
631 UNIPERIF_CTRL_OFFSET(ip), \
632 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
633 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 0)
634 #define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(ip) \
635 SET_UNIPERIF_REG(ip, \
636 UNIPERIF_CTRL_OFFSET(ip), \
637 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
638 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 1)
641 #define UNIPERIF_CTRL_ROUNDING_SHIFT(ip) 4
642 #define UNIPERIF_CTRL_ROUNDING_MASK(ip) 0x1
643 #define GET_UNIPERIF_CTRL_ROUNDING(ip) \
644 GET_UNIPERIF_REG(ip, \
645 UNIPERIF_CTRL_OFFSET(ip), \
646 UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
647 UNIPERIF_CTRL_ROUNDING_MASK(ip))
648 #define SET_UNIPERIF_CTRL_ROUNDING_OFF(ip) \
649 SET_UNIPERIF_REG(ip, \
650 UNIPERIF_CTRL_OFFSET(ip), \
651 UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
652 UNIPERIF_CTRL_ROUNDING_MASK(ip), 0)
653 #define SET_UNIPERIF_CTRL_ROUNDING_ON(ip) \
654 SET_UNIPERIF_REG(ip, \
655 UNIPERIF_CTRL_OFFSET(ip), \
656 UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
657 UNIPERIF_CTRL_ROUNDING_MASK(ip), 1)
660 #define UNIPERIF_CTRL_DIVIDER_SHIFT(ip) 5
661 #define UNIPERIF_CTRL_DIVIDER_MASK(ip) 0xff
662 #define GET_UNIPERIF_CTRL_DIVIDER(ip) \
663 GET_UNIPERIF_REG(ip, \
664 UNIPERIF_CTRL_OFFSET(ip), \
665 UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
666 UNIPERIF_CTRL_DIVIDER_MASK(ip))
667 #define SET_UNIPERIF_CTRL_DIVIDER(ip, value) \
668 SET_UNIPERIF_REG(ip, \
669 UNIPERIF_CTRL_OFFSET(ip), \
670 UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
671 UNIPERIF_CTRL_DIVIDER_MASK(ip), value)
674 #define UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip) \
675 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 13 : -1)
676 #define UNIPERIF_CTRL_BYTE_SWP_MASK(ip) 0x1
677 #define GET_UNIPERIF_CTRL_BYTE_SWP(ip) \
678 GET_UNIPERIF_REG(ip, \
679 UNIPERIF_CTRL_OFFSET(ip), \
680 UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
681 UNIPERIF_CTRL_BYTE_SWP_MASK(ip))
682 #define SET_UNIPERIF_CTRL_BYTE_SWP_OFF(ip) \
683 SET_UNIPERIF_REG(ip, \
684 UNIPERIF_CTRL_OFFSET(ip), \
685 UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
686 UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 0)
687 #define SET_UNIPERIF_CTRL_BYTE_SWP_ON(ip) \
688 SET_UNIPERIF_REG(ip, \
689 UNIPERIF_CTRL_OFFSET(ip), \
690 UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
691 UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 1)
694 #define UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip) \
695 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 14 : -1)
696 #define UNIPERIF_CTRL_ZERO_STUFF_MASK(ip) 0x1
697 #define GET_UNIPERIF_CTRL_ZERO_STUFF(ip) \
698 GET_UNIPERIF_REG(ip, \
699 UNIPERIF_CTRL_OFFSET(ip), \
700 UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
701 UNIPERIF_CTRL_ZERO_STUFF_MASK(ip))
702 #define SET_UNIPERIF_CTRL_ZERO_STUFF_HW(ip) \
703 SET_UNIPERIF_REG(ip, \
704 UNIPERIF_CTRL_OFFSET(ip), \
705 UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
706 UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 1)
707 #define SET_UNIPERIF_CTRL_ZERO_STUFF_SW(ip) \
708 SET_UNIPERIF_REG(ip, \
709 UNIPERIF_CTRL_OFFSET(ip), \
710 UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
711 UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 0)
714 #define UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip) \
715 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
716 #define UNIPERIF_CTRL_SPDIF_LAT_MASK(ip) 0x1
717 #define GET_UNIPERIF_CTRL_SPDIF_LAT(ip) \
718 GET_UNIPERIF_REG(ip, \
719 UNIPERIF_CTRL_OFFSET(ip), \
720 UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
721 UNIPERIF_CTRL_SPDIF_LAT_MASK(ip))
722 #define SET_UNIPERIF_CTRL_SPDIF_LAT_ON(ip) \
723 SET_UNIPERIF_REG(ip, \
724 UNIPERIF_CTRL_OFFSET(ip), \
725 UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
726 UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 1)
727 #define SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(ip) \
728 SET_UNIPERIF_REG(ip, \
729 UNIPERIF_CTRL_OFFSET(ip), \
730 UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
731 UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 0)
734 #define UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip) 17
735 #define UNIPERIF_CTRL_SPDIF_FMT_MASK(ip) 0x1
736 #define GET_UNIPERIF_CTRL_SPDIF_FMT(ip) \
737 GET_UNIPERIF_REG(ip, \
738 UNIPERIF_CTRL_OFFSET(ip), \
739 UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
740 UNIPERIF_CTRL_SPDIF_FMT_MASK(ip))
741 #define SET_UNIPERIF_CTRL_SPDIF_FMT_ON(ip) \
742 SET_UNIPERIF_REG(ip, \
743 UNIPERIF_CTRL_OFFSET(ip), \
744 UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
745 UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 1)
746 #define SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(ip) \
747 SET_UNIPERIF_REG(ip, \
748 UNIPERIF_CTRL_OFFSET(ip), \
749 UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
750 UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 0)
753 #define UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip) \
754 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 18 : -1)
755 #define UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip) 0x1
756 #define GET_UNIPERIF_CTRL_READER_OUT_SEL(ip) \
757 GET_UNIPERIF_REG(ip, \
758 UNIPERIF_CTRL_OFFSET(ip), \
759 UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
760 UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip))
761 #define SET_UNIPERIF_CTRL_READER_OUT_SEL_IN_MEM(ip) \
762 SET_UNIPERIF_REG(ip, \
763 UNIPERIF_CTRL_OFFSET(ip), \
764 UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
765 UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 0)
766 #define SET_UNIPERIF_CTRL_READER_OUT_SEL_ON_I2S_LINE(ip) \
767 SET_UNIPERIF_REG(ip, \
768 UNIPERIF_CTRL_OFFSET(ip), \
769 UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
770 CORAUD_UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
773 #define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20
774 #define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip) 0xff
775 #define GET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip) \
776 GET_UNIPERIF_REG(ip, \
777 UNIPERIF_CTRL_OFFSET(ip), \
778 UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
779 UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip))
780 #define SET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip, value) \
781 SET_UNIPERIF_REG(ip, \
782 UNIPERIF_CTRL_OFFSET(ip), \
783 UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
784 UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip), value)
790 #define UNIPERIF_I2S_FMT_OFFSET(ip) 0x0048
791 #define GET_UNIPERIF_I2S_FMT(ip) \
792 readl_relaxed(ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
793 #define SET_UNIPERIF_I2S_FMT(ip, value) \
794 writel_relaxed(value, ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
797 #define UNIPERIF_I2S_FMT_NBIT_SHIFT(ip) 0
798 #define UNIPERIF_I2S_FMT_NBIT_MASK(ip) 0x1
799 #define GET_UNIPERIF_I2S_FMT_NBIT(ip) \
800 GET_UNIPERIF_REG(ip, \
801 UNIPERIF_I2S_FMT_OFFSET(ip), \
802 UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
803 UNIPERIF_I2S_FMT_NBIT_MASK(ip))
804 #define SET_UNIPERIF_I2S_FMT_NBIT_32(ip) \
805 SET_UNIPERIF_REG(ip, \
806 UNIPERIF_I2S_FMT_OFFSET(ip), \
807 UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
808 UNIPERIF_I2S_FMT_NBIT_MASK(ip), 0)
809 #define SET_UNIPERIF_I2S_FMT_NBIT_16(ip) \
810 SET_UNIPERIF_REG(ip, \
811 UNIPERIF_I2S_FMT_OFFSET(ip), \
812 UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
813 UNIPERIF_I2S_FMT_NBIT_MASK(ip), 1)
816 #define UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip) 1
817 #define UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip) 0x7
818 #define GET_UNIPERIF_I2S_FMT_DATA_SIZE(ip) \
819 GET_UNIPERIF_REG(ip, \
820 UNIPERIF_I2S_FMT_OFFSET(ip), \
821 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
822 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip))
823 #define SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(ip) \
824 SET_UNIPERIF_REG(ip, \
825 UNIPERIF_I2S_FMT_OFFSET(ip), \
826 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
827 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 0)
828 #define SET_UNIPERIF_I2S_FMT_DATA_SIZE_18(ip) \
829 SET_UNIPERIF_REG(ip, \
830 UNIPERIF_I2S_FMT_OFFSET(ip), \
831 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
832 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 1)
833 #define SET_UNIPERIF_I2S_FMT_DATA_SIZE_20(ip) \
834 SET_UNIPERIF_REG(ip, \
835 UNIPERIF_I2S_FMT_OFFSET(ip), \
836 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
837 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 2)
838 #define SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(ip) \
839 SET_UNIPERIF_REG(ip, \
840 UNIPERIF_I2S_FMT_OFFSET(ip), \
841 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
842 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 3)
843 #define SET_UNIPERIF_I2S_FMTL_DATA_SIZE_28(ip) \
844 SET_UNIPERIF_REG(ip, \
845 UNIPERIF_I2S_FMT_OFFSET(ip), \
846 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
847 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 4)
848 #define SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(ip) \
849 SET_UNIPERIF_REG(ip, \
850 UNIPERIF_I2S_FMT_OFFSET(ip), \
851 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
852 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 5)
855 #define UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip) 4
856 #define UNIPERIF_I2S_FMT_LR_POL_MASK(ip) 0x1
857 #define VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) 0x0
858 #define VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) 0x1
859 #define GET_UNIPERIF_I2S_FMT_LR_POL(ip) \
860 GET_UNIPERIF_REG(ip, \
861 UNIPERIF_I2S_FMT_OFFSET(ip), \
862 UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
863 UNIPERIF_I2S_FMT_LR_POL_MASK(ip))
864 #define SET_UNIPERIF_I2S_FMT_LR_POL(ip, value) \
865 SET_UNIPERIF_REG(ip, \
866 UNIPERIF_I2S_FMT_OFFSET(ip), \
867 UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
868 UNIPERIF_I2S_FMT_LR_POL_MASK(ip), value)
869 #define SET_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) \
870 SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
871 VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip))
872 #define SET_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) \
873 SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
874 VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip))
877 #define UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip) 5
878 #define UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip) 0x1
879 #define GET_UNIPERIF_I2S_FMT_SCLK_EDGE(ip) \
880 GET_UNIPERIF_REG(ip, \
881 UNIPERIF_I2S_FMT_OFFSET(ip), \
882 UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
883 UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip))
884 #define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(ip) \
885 SET_UNIPERIF_REG(ip, \
886 UNIPERIF_I2S_FMT_OFFSET(ip), \
887 UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
888 UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 0)
889 #define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(ip) \
890 SET_UNIPERIF_REG(ip, \
891 UNIPERIF_I2S_FMT_OFFSET(ip), \
892 UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
893 UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 1)
896 #define UNIPERIF_I2S_FMT_PADDING_SHIFT(ip) 6
897 #define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
898 #define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
899 #define VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) 0x0
900 #define VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) 0x1
901 #define GET_UNIPERIF_I2S_FMT_PADDING(ip) \
902 GET_UNIPERIF_REG(ip, \
903 UNIPERIF_I2S_FMT_OFFSET(ip), \
904 UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
905 UNIPERIF_I2S_FMT_PADDING_MASK(ip))
906 #define SET_UNIPERIF_I2S_FMT_PADDING(ip, value) \
907 SET_UNIPERIF_REG(ip, \
908 UNIPERIF_I2S_FMT_OFFSET(ip), \
909 UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
910 UNIPERIF_I2S_FMT_PADDING_MASK(ip), value)
911 #define SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) \
912 SET_UNIPERIF_I2S_FMT_PADDING(ip, \
913 VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip))
914 #define SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) \
915 SET_UNIPERIF_I2S_FMT_PADDING(ip, \
916 VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip))
919 #define UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip) 7
920 #define UNIPERIF_I2S_FMT_ALIGN_MASK(ip) 0x1
921 #define GET_UNIPERIF_I2S_FMT_ALIGN(ip) \
922 GET_UNIPERIF_REG(ip, \
923 UNIPERIF_I2S_FMT_OFFSET(ip), \
924 UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
925 UNIPERIF_I2S_FMT_ALIGN_MASK(ip))
926 #define SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(ip) \
927 SET_UNIPERIF_REG(ip, \
928 UNIPERIF_I2S_FMT_OFFSET(ip), \
929 UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
930 UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 0)
931 #define SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(ip) \
932 SET_UNIPERIF_REG(ip, \
933 UNIPERIF_I2S_FMT_OFFSET(ip), \
934 UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
935 UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 1)
938 #define UNIPERIF_I2S_FMT_ORDER_SHIFT(ip) 8
939 #define UNIPERIF_I2S_FMT_ORDER_MASK(ip) 0x1
940 #define GET_UNIPERIF_I2S_FMT_ORDER(ip) \
941 GET_UNIPERIF_REG(ip, \
942 UNIPERIF_I2S_FMT_OFFSET(ip), \
943 UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
944 UNIPERIF_I2S_FMT_ORDER_MASK(ip))
945 #define SET_UNIPERIF_I2S_FMT_ORDER_LSB(ip) \
946 SET_UNIPERIF_REG(ip, \
947 UNIPERIF_I2S_FMT_OFFSET(ip), \
948 UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
949 UNIPERIF_I2S_FMT_ORDER_MASK(ip), 0)
950 #define SET_UNIPERIF_I2S_FMT_ORDER_MSB(ip) \
951 SET_UNIPERIF_REG(ip, \
952 UNIPERIF_I2S_FMT_OFFSET(ip), \
953 UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
954 UNIPERIF_I2S_FMT_ORDER_MASK(ip), 1)
957 #define UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip) 9
958 #define UNIPERIF_I2S_FMT_NUM_CH_MASK(ip) 0x7
959 #define GET_UNIPERIF_I2S_FMT_NUM_CH(ip) \
960 GET_UNIPERIF_REG(ip, \
961 UNIPERIF_I2S_FMT_OFFSET(ip), \
962 UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
963 UNIPERIF_I2S_FMT_NUM_CH_MASK(ip))
964 #define SET_UNIPERIF_I2S_FMT_NUM_CH(ip, value) \
965 SET_UNIPERIF_REG(ip, \
966 UNIPERIF_I2S_FMT_OFFSET(ip), \
967 UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
968 UNIPERIF_I2S_FMT_NUM_CH_MASK(ip), value)
971 #define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip) 12
972 #define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip) 0xfffff
973 #define GET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip) \
974 GET_UNIPERIF_REG(ip, \
975 UNIPERIF_I2S_FMT_OFFSET(ip), \
976 UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
977 UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip))
978 #define SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip, value) \
979 SET_UNIPERIF_REG(ip, \
980 UNIPERIF_I2S_FMT_OFFSET(ip), \
981 UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
982 UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip), value)
988 #define UNIPERIF_BIT_CONTROL_OFFSET(ip) \
989 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0x004c)
990 #define GET_UNIPERIF_BIT_CONTROL(ip) \
991 readl_relaxed(ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
992 #define SET_UNIPERIF_BIT_CONTROL(ip, value) \
993 writel_relaxed(value, ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
996 #define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip) 0
997 #define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip) 0x1
998 #define GET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
999 GET_UNIPERIF_REG(ip, \
1000 UNIPERIF_BIT_CONTROL_OFFSET(ip), \
1001 UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
1002 UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip))
1003 #define SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
1004 SET_UNIPERIF_REG(ip, \
1005 UNIPERIF_BIT_CONTROL_OFFSET(ip), \
1006 UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
1007 UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip), 1)
1010 #define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip) 1
1011 #define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip) 0x1
1012 #define GET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
1013 GET_UNIPERIF_REG(ip, \
1014 UNIPERIF_BIT_CONTROL_OFFSET(ip), \
1015 UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
1016 UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip))
1017 #define SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
1018 SET_UNIPERIF_BIT_REG(ip, \
1019 UNIPERIF_BIT_CONTROL_OFFSET(ip), \
1020 UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
1021 UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip), 1)
1027 #define UNIPERIF_STATUS_1_OFFSET(ip) 0x0050
1028 #define GET_UNIPERIF_STATUS_1(ip) \
1029 readl_relaxed(ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
1030 #define SET_UNIPERIF_STATUS_1(ip, value) \
1031 writel_relaxed(value, ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
1034 #define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip) \
1035 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
1036 #define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip) 0xff
1037 #define GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip) \
1038 GET_UNIPERIF_REG(ip, \
1039 UNIPERIF_STATUS_1_OFFSET(ip), \
1040 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
1041 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip))
1042 #define SET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip, value) \
1043 SET_UNIPERIF_REG(ip, \
1044 UNIPERIF_STATUS_1_OFFSET(ip), \
1045 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
1046 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value)
1052 #define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
1053 #define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
1054 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
1055 #define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
1056 writel_relaxed(value, ip->base + \
1057 UNIPERIF_CHANNEL_STA_REGN(ip, n))
1063 #define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090
1064 #define GET_UNIPERIF_USER_VALIDITY(ip) \
1065 readl_relaxed(ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
1066 #define SET_UNIPERIF_USER_VALIDITY(ip, value) \
1067 writel_relaxed(value, ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
1070 #define UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip) 0
1071 #define UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip) 0x3
1072 #define GET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip) \
1073 GET_UNIPERIF_REG(ip, \
1074 UNIPERIF_USER_VALIDITY_OFFSET(ip), \
1075 UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
1076 UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip))
1077 #define SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip, value) \
1078 SET_UNIPERIF_REG(ip, \
1079 UNIPERIF_USER_VALIDITY_OFFSET(ip), \
1080 UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
1081 UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip), \
1087 #define UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip) 0x0150
1088 #define UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip) \
1089 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
1090 #define UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip) \
1091 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 0xFFFFFF)
1092 #define GET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip) \
1093 GET_UNIPERIF_REG(ip, \
1094 UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
1095 UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
1096 UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip))
1097 #define SET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip, value) \
1098 SET_UNIPERIF_REG(ip, \
1099 UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
1100 UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
1101 UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value)
/linux-4.4.14/arch/arm/lib/
H A Dio-readsw-armv4.S21 .Linsw_align: movs ip, r1, lsl #31
23 ldrh ip, [r0]
25 strh ip, [r1], #2
47 ldrh ip, [r0]
48 pack r5, r5, ip
50 ldrh ip, [r0]
52 pack ip, ip, lr
55 stmia r1!, {r3 - r5, ip}
66 ldrh ip, [r0]
67 pack r4, r4, ip
75 ldrh ip, [r0]
76 pack r3, r3, ip
97 ldrccb ip, [r1, #-1]!
100 ldrh ip, [r0]
102 _BE_ONLY_( mov ip, ip, ror #8 )
103 strb ip, [r1], #1
104 _LE_ONLY_( mov ip, ip, lsr #8 )
105 _BE_ONLY_( mov ip, ip, lsr #24 )
109 _BE_ONLY_( mov ip, ip, lsl #24 )
114 orr ip, ip, r3, lsl #8
115 orr ip, ip, r4, push_hbyte0
116 str ip, [r1], #4
117 mov ip, r4, pull_hbyte1
120 _BE_ONLY_( mov ip, ip, lsr #24 )
123 strb ip, [r1], #1
124 ldrneh ip, [r0]
125 _BE_ONLY_( movne ip, ip, ror #8 )
126 strneb ip, [r1], #1
127 _LE_ONLY_( movne ip, ip, lsr #8 )
128 _BE_ONLY_( movne ip, ip, lsr #24 )
129 strneb ip, [r1]
H A Dio-writesw-armv4.S25 .Loutsw_align: movs ip, r1, lsl #31
43 .Loutsw_8_lp: ldmia r1!, {r3, r4, r5, ip}
48 outword ip
54 ldmia r1!, {r3, ip}
56 outword ip
87 1: mov ip, r3, lsr #8
88 strh ip, [r0]
89 2: mov ip, r3, pull_hbyte0
92 orr ip, ip, r3, push_hbyte1
93 strh ip, [r0]
97 3: movne ip, r3, lsr #8
98 strneh ip, [r0]
H A Ddiv64.S45 * Clobbered regs: xl, ip
52 subs ip, r4, #1
54 tst ip, r4
64 @ The bit position is stored in ip.
69 clz ip, xh
70 sub yl, yl, ip
71 mov ip, #1
72 mov ip, ip, lsl yl
78 mov ip, #1
82 movcc ip, ip, lsl #1
90 orrcs yh, yh, ip
92 movnes ip, ip, lsr #1
106 mov ip, #0x80000000
111 5: orrcs yl, yl, ip
113 movs ip, ip, lsr #1
131 mov ip, ip, lsr xh
136 mov ip, ip, lsr #1
145 movs ip, ip, lsr #1
155 clz ip, r4
156 rsb ip, ip, #31
162 mov ip, #0
164 movhs ip, #16
168 addhs ip, ip, #8
172 addhs ip, ip, #4
175 addhi ip, ip, #3
176 addls ip, ip, yl, lsr #1
180 mov yh, xh, lsr ip
181 mov yl, xl, lsr ip
182 rsb ip, ip, #32
183 ARM( orr yl, yl, xh, lsl ip )
184 THUMB( lsl xh, xh, ip )
186 mov xh, xl, lsl ip
187 mov xh, xh, lsr ip
H A Dmuldi3.S33 mov ip, xl, lsr #16
35 bic xl, xl, ip, lsl #16
37 mla xh, yh, ip, xh
40 mul ip, yl, ip
43 adds xl, xl, ip, lsl #16
44 adc xh, xh, ip, lsr #16
H A Dbitops.h8 ands ip, r1, #3 variable
9 strneb r1, [ip] @ assert word-aligned
33 ands ip, r1, #3 variable
34 strneb r1, [ip] @ assert word-aligned
49 strex ip, r2, [r1] variable
50 cmp ip, #0 variable
63 ands ip, r1, #3
64 strneb r1, [ip] @ assert word-aligned
69 save_and_disable_irqs ip
73 restore_irqs ip
90 ands ip, r1, #3
91 strneb r1, [ip] @ assert word-aligned
94 save_and_disable_irqs ip
101 restore_irqs ip
H A Dcsumpartialcopygeneric.S39 load1b ip
41 adcs sum, sum, ip, put_byte_1 @ update checksum
42 strb ip, [dst], #1
46 .Ldst_16bit: load2b r8, ip
50 adcs sum, sum, ip, put_byte_1
51 strb ip, [dst], #1
66 load1b ip
68 adcs sum, sum, ip, put_byte_1 @ update checksum
69 strb ip, [dst], #1
73 1: load2b r8, ip
77 adcs sum, sum, ip, put_byte_1
78 strb ip, [dst], #1
111 bics ip, len, #15
120 sub ip, ip, #16
121 teq ip, #0
124 2: ands ip, len, #12
126 tst ip, #8
132 tst ip, #4
169 and ip, src, #3
172 cmp ip, #2
176 bics ip, len, #15
192 sub ip, ip, #16
193 teq ip, #0
195 2: ands ip, len, #12
197 tst ip, #8
207 tst ip, #4
228 bics ip, len, #15
244 sub ip, ip, #16
245 teq ip, #0
247 2: ands ip, len, #12
249 tst ip, #8
259 tst ip, #4
282 bics ip, len, #15
298 sub ip, ip, #16
299 teq ip, #0
301 2: ands ip, len, #12
303 tst ip, #8
313 tst ip, #4
H A Dputuser.S36 check_uaccess r0, 1, r1, ip, __put_user_bad
43 check_uaccess r0, 2, r1, ip, __put_user_bad
44 mov ip, r2, lsr #8
48 3: TUSER(strb) ip, [r0, #1]
50 2: TUSER(strb) ip, [r0]
56 3: TUSER(strb) ip, [r0]
58 2: TUSER(strb) ip, [r0], #1
67 check_uaccess r0, 4, r1, ip, __put_user_bad
74 check_uaccess r0, 8, r1, ip, __put_user_bad
H A Dcopy_page.S32 ldmia r1!, {r3, r4, ip, lr} @ 4+1
37 stmia r0!, {r3, r4, ip, lr} @ 4
38 ldmia r1!, {r3, r4, ip, lr} @ 4
41 stmia r0!, {r3, r4, ip, lr} @ 4
42 ldmgtia r1!, {r3, r4, ip, lr} @ 4
44 PLD( ldmeqia r1!, {r3, r4, ip, lr} )
H A Dcsumpartial.S91 ldrneb ip, [buf], #1
94 orrne td0, td0, ip, lsl #8
96 orrne td0, ip, td0, lsl #8
114 1: bics ip, len, #31
128 sub ip, ip, #32
129 teq ip, #0
H A Dcsumpartialcopyuser.S22 mrc p15, 0, ip, c3, c0, 0
23 stmfd sp!, {r1, r2, r4 - r8, ip, lr}
24 uaccess_enable ip
28 ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
29 mcr p15, 0, ip, c3, c0, 0
/linux-4.4.14/arch/arm/mach-davinci/
H A Dsleep.S55 ldr ip, CACHE_FLUSH
56 blx ip
65 ldr ip, [r0, #DDR2_SDRCR_OFFSET]
66 bic ip, ip, #DDR2_SRPD_BIT
67 orr ip, ip, #DDR2_LPMODEN_BIT
68 str ip, [r0, #DDR2_SDRCR_OFFSET]
70 ldr ip, [r0, #DDR2_SDRCR_OFFSET]
71 orr ip, ip, #DDR2_MCLKSTOPEN_BIT
72 str ip, [r0, #DDR2_SDRCR_OFFSET]
74 mov ip, #PHYRDY_CYCLES
75 1: subs ip, ip, #0x1
85 ldr ip, [r3, #PLLDIV1]
86 bic ip, ip, #PLLDIV_EN
87 str ip, [r3, #PLLDIV1]
90 ldr ip, [r3, #PLLCTL]
91 bic ip, ip, #PLLCTL_PLLENSRC
92 bic ip, ip, #PLLCTL_PLLEN
93 str ip, [r3, #PLLCTL]
96 mov ip, #PLL_BYPASS_CYCLES
97 2: subs ip, ip, #0x1
101 ldr ip, [r3, #PLLCTL]
102 orr ip, ip, #PLLCTL_PLLPWRDN
103 str ip, [r3, #PLLCTL]
106 ldr ip, [r4]
107 orr ip, ip, #DEEPSLEEP_SLEEPENABLE_BIT
109 str ip, [r4]
114 ldr ip, [r4]
115 bic ip, ip, #DEEPSLEEP_SLEEPENABLE_BIT
116 str ip, [r4]
121 ldr ip, [r3, #PLLCTL]
122 bic ip, ip, #PLLCTL_PLLRST
123 str ip, [r3, #PLLCTL]
126 ldr ip, [r3, #PLLCTL]
127 bic ip, ip, #PLLCTL_PLLPWRDN
128 str ip, [r3, #PLLCTL]
130 mov ip, #PLL_RESET_CYCLES
131 3: subs ip, ip, #0x1
135 ldr ip, [r3, #PLLCTL]
136 orr ip, ip, #PLLCTL_PLLRST
137 str ip, [r3, #PLLCTL]
140 mov ip, #PLL_LOCK_CYCLES
141 4: subs ip, ip, #0x1
145 ldr ip, [r3, #PLLCTL]
146 bic ip, ip, #PLLCTL_PLLENSRC
147 orr ip, ip, #PLLCTL_PLLEN
148 str ip, [r3, #PLLCTL]
152 ldr ip, [r3, #PLLDIV1]
153 orr ip, ip, #PLLDIV_EN
154 str ip, [r3, #PLLDIV1]
166 ldr ip, [r0, #DDR2_SDRCR_OFFSET]
167 bic ip, ip, #DDR2_MCLKSTOPEN_BIT
168 str ip, [r0, #DDR2_SDRCR_OFFSET]
170 ldr ip, [r0, #DDR2_SDRCR_OFFSET]
171 bic ip, ip, #DDR2_LPMODEN_BIT
172 str ip, [r0, #DDR2_SDRCR_OFFSET]
190 ldr ip, [r1, r6]
191 bic ip, ip, #MDSTAT_STATE_MASK
192 orr ip, ip, r0
193 str ip, [r1, r6]
196 ldr ip, [r1, #PTCMD]
197 orr ip, ip, #0x1
198 str ip, [r1, #PTCMD]
202 ldr ip, [r1, #PTSTAT]
203 and ip, ip, #0x1
204 cmp ip, #0x0
211 ldr ip, [r1, r6]
212 and ip, ip, #MDSTAT_STATE_MASK
213 cmp ip, r0
/linux-4.4.14/include/net/netfilter/
H A Dnf_tables_ipv4.h5 #include <net/ip.h>
12 struct iphdr *ip; nft_set_pktinfo_ipv4() local
16 ip = ip_hdr(pkt->skb); nft_set_pktinfo_ipv4()
17 pkt->tprot = ip->protocol; nft_set_pktinfo_ipv4()
19 pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET; nft_set_pktinfo_ipv4()
/linux-4.4.14/lib/lz4/
H A Dlz4_compress.c59 const u8 *ip = (u8 *)source; lz4_compressctx() local
61 const BYTE * const base = ip; lz4_compressctx()
65 const u8 *anchor = ip; lz4_compressctx()
66 const u8 *const iend = ip + isize; lz4_compressctx()
84 hashtable[LZ4_HASH_VALUE(ip)] = ip - base; lz4_compressctx()
85 ip++; lz4_compressctx()
86 forwardh = LZ4_HASH_VALUE(ip); lz4_compressctx()
91 const u8 *forwardip = ip; lz4_compressctx()
99 ip = forwardip; lz4_compressctx()
100 forwardip = ip + step; lz4_compressctx()
107 hashtable[h] = ip - base; lz4_compressctx()
108 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); lz4_compressctx()
111 while ((ip > anchor) && (ref > (u8 *)source) && lz4_compressctx()
112 unlikely(ip[-1] == ref[-1])) { lz4_compressctx()
113 ip--; lz4_compressctx()
118 length = (int)(ip - anchor); lz4_compressctx()
139 LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); lz4_compressctx()
142 ip += MINMATCH; lz4_compressctx()
145 anchor = ip; lz4_compressctx()
146 while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) { lz4_compressctx()
148 u64 diff = A64(ref) ^ A64(ip); lz4_compressctx()
150 u32 diff = A32(ref) ^ A32(ip); lz4_compressctx()
153 ip += STEPSIZE; lz4_compressctx()
157 ip += LZ4_NBCOMMONBYTES(diff); lz4_compressctx()
161 if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { lz4_compressctx()
162 ip += 4; lz4_compressctx()
166 if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { lz4_compressctx()
167 ip += 2; lz4_compressctx()
170 if ((ip < MATCHLIMIT) && (*ref == *ip)) lz4_compressctx()
171 ip++; lz4_compressctx()
174 length = (int)(ip - anchor); lz4_compressctx()
194 if (ip > mflimit) { lz4_compressctx()
195 anchor = ip; lz4_compressctx()
200 hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base; lz4_compressctx()
203 ref = base + hashtable[LZ4_HASH_VALUE(ip)]; lz4_compressctx()
204 hashtable[LZ4_HASH_VALUE(ip)] = ip - base; lz4_compressctx()
205 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { lz4_compressctx()
212 anchor = ip++; lz4_compressctx()
213 forwardh = LZ4_HASH_VALUE(ip); lz4_compressctx()
245 const u8 *ip = (u8 *) source; lz4_compress64kctx() local
246 const u8 *anchor = ip; lz4_compress64kctx()
247 const u8 *const base = ip; lz4_compress64kctx()
248 const u8 *const iend = ip + isize; lz4_compress64kctx()
266 ip++; lz4_compress64kctx()
267 forwardh = LZ4_HASH64K_VALUE(ip); lz4_compress64kctx()
272 const u8 *forwardip = ip; lz4_compress64kctx()
280 ip = forwardip; lz4_compress64kctx()
281 forwardip = ip + step; lz4_compress64kctx()
288 hashtable[h] = (u16)(ip - base); lz4_compress64kctx()
289 } while (A32(ref) != A32(ip)); lz4_compress64kctx()
292 while ((ip > anchor) && (ref > (u8 *)source) lz4_compress64kctx()
293 && (ip[-1] == ref[-1])) { lz4_compress64kctx()
294 ip--; lz4_compress64kctx()
299 length = (int)(ip - anchor); lz4_compress64kctx()
319 LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); lz4_compress64kctx()
322 ip += MINMATCH; lz4_compress64kctx()
325 anchor = ip; lz4_compress64kctx()
327 while (ip < MATCHLIMIT - (STEPSIZE - 1)) { lz4_compress64kctx()
329 u64 diff = A64(ref) ^ A64(ip); lz4_compress64kctx()
331 u32 diff = A32(ref) ^ A32(ip); lz4_compress64kctx()
335 ip += STEPSIZE; lz4_compress64kctx()
339 ip += LZ4_NBCOMMONBYTES(diff); lz4_compress64kctx()
343 if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { lz4_compress64kctx()
344 ip += 4; lz4_compress64kctx()
348 if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) { lz4_compress64kctx()
349 ip += 2; lz4_compress64kctx()
352 if ((ip < MATCHLIMIT) && (*ref == *ip)) lz4_compress64kctx()
353 ip++; lz4_compress64kctx()
357 len = (int)(ip - anchor); lz4_compress64kctx()
377 if (ip > mflimit) { lz4_compress64kctx()
378 anchor = ip; lz4_compress64kctx()
383 hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base); lz4_compress64kctx()
386 ref = base + hashtable[LZ4_HASH64K_VALUE(ip)]; lz4_compress64kctx()
387 hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base); lz4_compress64kctx()
388 if (A32(ref) == A32(ip)) { lz4_compress64kctx()
395 anchor = ip++; lz4_compress64kctx()
396 forwardh = LZ4_HASH64K_VALUE(ip); lz4_compress64kctx()
H A Dlz4hc_compress.c64 /* Update chains up to ip (excluded) */ lz4hc_insert()
65 static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip) lz4hc_insert() argument
75 while (hc4->nexttoupdate < ip) { lz4hc_insert()
122 const u8 *ip, const u8 *const matchlimit, const u8 **matchpos) lz4hc_insertandfindbestmatch()
137 lz4hc_insert(hc4, ip); lz4hc_insertandfindbestmatch()
138 ref = hashtable[HASH_VALUE(ip)] + base; lz4hc_insertandfindbestmatch()
141 if (ref >= ip-4) { lz4hc_insertandfindbestmatch()
143 if (A32(ref) == A32(ip)) { lz4hc_insertandfindbestmatch()
144 delta = (u16)(ip-ref); lz4hc_insertandfindbestmatch()
145 repl = ml = lz4hc_commonlength(ip + MINMATCH, lz4hc_insertandfindbestmatch()
152 while ((ref >= ip - MAX_DISTANCE) && nbattempts) { lz4hc_insertandfindbestmatch()
154 if (*(ref + ml) == *(ip + ml)) { lz4hc_insertandfindbestmatch()
155 if (A32(ref) == A32(ip)) { lz4hc_insertandfindbestmatch()
157 lz4hc_commonlength(ip + MINMATCH, lz4hc_insertandfindbestmatch()
170 const BYTE *ptr = ip; lz4hc_insertandfindbestmatch()
172 end = ip + repl - (MINMATCH-1); lz4hc_insertandfindbestmatch()
191 const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest, lz4hc_insertandgetwidermatch()
203 int delta = (int)(ip - startlimit); lz4hc_insertandgetwidermatch()
206 lz4hc_insert(hc4, ip); lz4hc_insertandgetwidermatch()
207 ref = hashtable[HASH_VALUE(ip)] + base; lz4hc_insertandgetwidermatch()
209 while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base) lz4hc_insertandgetwidermatch()
213 if (A32(ref) == A32(ip)) { lz4hc_insertandgetwidermatch()
215 const u8 *ipt = ip + MINMATCH; lz4hc_insertandgetwidermatch()
216 const u8 *startt = ip; lz4hc_insertandgetwidermatch()
269 static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor, lz4_encodesequence() argument
276 length = (int)(*ip - *anchor); lz4_encodesequence()
291 LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref)); lz4_encodesequence()
311 *ip += ml; lz4_encodesequence()
312 *anchor = *ip; lz4_encodesequence()
322 const u8 *ip = (const u8 *)source; lz4_compresshcctx() local
323 const u8 *anchor = ip; lz4_compresshcctx()
324 const u8 *const iend = ip + isize; lz4_compresshcctx()
340 ip++; lz4_compresshcctx()
343 while (ip < mflimit) { lz4_compresshcctx()
344 ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref)); lz4_compresshcctx()
346 ip++; lz4_compresshcctx()
351 start0 = ip; lz4_compresshcctx()
355 if (ip+ml < mflimit) lz4_compresshcctx()
356 ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2, lz4_compresshcctx()
357 ip + 1, matchlimit, ml, &ref2, &start2); lz4_compresshcctx()
362 lz4_encodesequence(&ip, &op, &anchor, ml, ref); lz4_compresshcctx()
366 if (start0 < ip) { lz4_compresshcctx()
368 if (start2 < ip + ml0) { lz4_compresshcctx()
369 ip = start0; lz4_compresshcctx()
375 * Here, start0==ip lz4_compresshcctx()
378 if ((start2 - ip) < 3) { lz4_compresshcctx()
380 ip = start2; lz4_compresshcctx()
391 if ((start2 - ip) < OPTIMAL_ML) { lz4_compresshcctx()
396 if (ip + new_ml > start2 + ml2 - MINMATCH) lz4_compresshcctx()
397 new_ml = (int)(start2 - ip) + ml2 - MINMATCH; lz4_compresshcctx()
398 correction = new_ml - (int)(start2 - ip); lz4_compresshcctx()
406 * Now, we have start2 = ip+new_ml, lz4_compresshcctx()
418 /* ip & ref are known; Now for ml */ lz4_compresshcctx()
419 if (start2 < ip+ml) lz4_compresshcctx()
420 ml = (int)(start2 - ip); lz4_compresshcctx()
423 lz4_encodesequence(&ip, &op, &anchor, ml, ref); lz4_compresshcctx()
424 ip = start2; lz4_compresshcctx()
425 lz4_encodesequence(&ip, &op, &anchor, ml2, ref2); lz4_compresshcctx()
430 if (start3 < ip + ml + 3) { lz4_compresshcctx()
435 if (start3 >= (ip + ml)) { lz4_compresshcctx()
436 if (start2 < ip + ml) { lz4_compresshcctx()
438 (int)(ip + ml - start2); lz4_compresshcctx()
449 lz4_encodesequence(&ip, &op, &anchor, ml, ref); lz4_compresshcctx()
450 ip = start3; lz4_compresshcctx()
468 * the first one ip & ref are known; Now for ml lz4_compresshcctx()
470 if (start2 < ip + ml) { lz4_compresshcctx()
471 if ((start2 - ip) < (int)ML_MASK) { lz4_compresshcctx()
475 if (ip + ml > start2 + ml2 - MINMATCH) lz4_compresshcctx()
476 ml = (int)(start2 - ip) + ml2 lz4_compresshcctx()
478 correction = ml - (int)(start2 - ip); lz4_compresshcctx()
485 ml = (int)(start2 - ip); lz4_compresshcctx()
487 lz4_encodesequence(&ip, &op, &anchor, ml, ref); lz4_compresshcctx()
489 ip = start2; lz4_compresshcctx()
121 lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4, const u8 *ip, const u8 *const matchlimit, const u8 **matchpos) lz4hc_insertandfindbestmatch() argument
190 lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4, const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest, const u8 **matchpos, const u8 **startpos) lz4hc_insertandgetwidermatch() argument
H A Dlz4_decompress.c57 const BYTE *ip = (const BYTE *) source; lz4_uncompress() local
68 token = *ip++; lz4_uncompress()
73 len = *ip++; lz4_uncompress()
75 len = *ip++; lz4_uncompress()
91 memcpy(op, ip, length); lz4_uncompress()
92 ip += length; lz4_uncompress()
95 LZ4_WILDCOPY(ip, op, cpy); lz4_uncompress()
96 ip -= (op - cpy); lz4_uncompress()
100 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); lz4_uncompress()
101 ip += 2; lz4_uncompress()
110 for (; *ip == 255; length += 255) lz4_uncompress()
111 ip++; lz4_uncompress()
112 if (unlikely(length > (size_t)(length + *ip))) lz4_uncompress()
114 length += *ip++; lz4_uncompress()
166 return (int) (((char *)ip) - source); lz4_uncompress()
176 const BYTE *ip = (const BYTE *) source; lz4_uncompress_unknownoutputsize() local
177 const BYTE *const iend = ip + isize; lz4_uncompress_unknownoutputsize()
186 while (ip < iend) { lz4_uncompress_unknownoutputsize()
192 token = *ip++; lz4_uncompress_unknownoutputsize()
196 while ((ip < iend) && (s == 255)) { lz4_uncompress_unknownoutputsize()
197 s = *ip++; lz4_uncompress_unknownoutputsize()
206 (ip + length > iend - COPYLENGTH)) { lz4_uncompress_unknownoutputsize()
211 if (ip + length != iend) lz4_uncompress_unknownoutputsize()
217 memcpy(op, ip, length); lz4_uncompress_unknownoutputsize()
221 LZ4_WILDCOPY(ip, op, cpy); lz4_uncompress_unknownoutputsize()
222 ip -= (op - cpy); lz4_uncompress_unknownoutputsize()
226 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); lz4_uncompress_unknownoutputsize()
227 ip += 2; lz4_uncompress_unknownoutputsize()
238 while (ip < iend) { lz4_uncompress_unknownoutputsize()
239 int s = *ip++; lz4_uncompress_unknownoutputsize()
/linux-4.4.14/lib/lzo/
H A Dlzo1x_decompress_safe.c22 #define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
42 const unsigned char *ip; lzo1x_decompress_safe() local
50 ip = in; lzo1x_decompress_safe()
54 if (*ip > 17) { lzo1x_decompress_safe()
55 t = *ip++ - 17; lzo1x_decompress_safe()
64 t = *ip++; lzo1x_decompress_safe()
69 const unsigned char *ip_last = ip; lzo1x_decompress_safe()
71 while (unlikely(*ip == 0)) { lzo1x_decompress_safe()
72 ip++; lzo1x_decompress_safe()
75 offset = ip - ip_last; lzo1x_decompress_safe()
80 t += offset + 15 + *ip++; lzo1x_decompress_safe()
86 const unsigned char *ie = ip + t; lzo1x_decompress_safe()
89 COPY8(op, ip); lzo1x_decompress_safe()
91 ip += 8; lzo1x_decompress_safe()
92 COPY8(op, ip); lzo1x_decompress_safe()
94 ip += 8; lzo1x_decompress_safe()
95 } while (ip < ie); lzo1x_decompress_safe()
96 ip = ie; lzo1x_decompress_safe()
104 *op++ = *ip++; lzo1x_decompress_safe()
113 m_pos -= *ip++ << 2; lzo1x_decompress_safe()
124 m_pos -= *ip++ << 2; lzo1x_decompress_safe()
131 m_pos -= *ip++ << 3; lzo1x_decompress_safe()
137 const unsigned char *ip_last = ip; lzo1x_decompress_safe()
139 while (unlikely(*ip == 0)) { lzo1x_decompress_safe()
140 ip++; lzo1x_decompress_safe()
143 offset = ip - ip_last; lzo1x_decompress_safe()
148 t += offset + 31 + *ip++; lzo1x_decompress_safe()
152 next = get_unaligned_le16(ip); lzo1x_decompress_safe()
153 ip += 2; lzo1x_decompress_safe()
162 const unsigned char *ip_last = ip; lzo1x_decompress_safe()
164 while (unlikely(*ip == 0)) { lzo1x_decompress_safe()
165 ip++; lzo1x_decompress_safe()
168 offset = ip - ip_last; lzo1x_decompress_safe()
173 t += offset + 7 + *ip++; lzo1x_decompress_safe()
176 next = get_unaligned_le16(ip); lzo1x_decompress_safe()
177 ip += 2; lzo1x_decompress_safe()
200 COPY4(op, ip); lzo1x_decompress_safe()
202 ip += next; lzo1x_decompress_safe()
229 COPY4(op, ip); lzo1x_decompress_safe()
231 ip += t; lzo1x_decompress_safe()
238 *op++ = *ip++; lzo1x_decompress_safe()
247 ip == ip_end ? LZO_E_OK : lzo1x_decompress_safe()
248 ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); lzo1x_decompress_safe()
H A Dlzo1x_compress.c25 const unsigned char *ip; lzo1x_1_do_compress() local
33 ip = in; lzo1x_1_do_compress()
34 ii = ip; lzo1x_1_do_compress()
35 ip += ti < 4 ? 4 - ti : 0; lzo1x_1_do_compress()
42 ip += 1 + ((ip - ii) >> 5); lzo1x_1_do_compress()
44 if (unlikely(ip >= ip_end)) lzo1x_1_do_compress()
46 dv = get_unaligned_le32(ip); lzo1x_1_do_compress()
49 dict[t] = (lzo_dict_t) (ip - in); lzo1x_1_do_compress()
55 t = ip - ii; lzo1x_1_do_compress()
95 v = get_unaligned((const u64 *) (ip + m_len)) ^ lzo1x_1_do_compress()
100 v = get_unaligned((const u64 *) (ip + m_len)) ^ lzo1x_1_do_compress()
102 if (unlikely(ip + m_len >= ip_end)) lzo1x_1_do_compress()
115 v = get_unaligned((const u32 *) (ip + m_len)) ^ lzo1x_1_do_compress()
120 v = get_unaligned((const u32 *) (ip + m_len)) ^ lzo1x_1_do_compress()
125 v = get_unaligned((const u32 *) (ip + m_len)) ^ lzo1x_1_do_compress()
127 if (unlikely(ip + m_len >= ip_end)) lzo1x_1_do_compress()
139 if (unlikely(ip[m_len] == m_pos[m_len])) { lzo1x_1_do_compress()
142 if (ip[m_len] != m_pos[m_len]) lzo1x_1_do_compress()
145 if (ip[m_len] != m_pos[m_len]) lzo1x_1_do_compress()
148 if (ip[m_len] != m_pos[m_len]) lzo1x_1_do_compress()
151 if (ip[m_len] != m_pos[m_len]) lzo1x_1_do_compress()
154 if (ip[m_len] != m_pos[m_len]) lzo1x_1_do_compress()
157 if (ip[m_len] != m_pos[m_len]) lzo1x_1_do_compress()
160 if (ip[m_len] != m_pos[m_len]) lzo1x_1_do_compress()
163 if (unlikely(ip + m_len >= ip_end)) lzo1x_1_do_compress()
165 } while (ip[m_len] == m_pos[m_len]); lzo1x_1_do_compress()
171 m_off = ip - m_pos; lzo1x_1_do_compress()
172 ip += m_len; lzo1x_1_do_compress()
173 ii = ip; lzo1x_1_do_compress()
220 const unsigned char *ip = in; lzo1x_1_compress() local
227 uintptr_t ll_end = (uintptr_t) ip + ll; lzo1x_1_compress()
232 t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem); lzo1x_1_compress()
233 ip += ll; lzo1x_1_compress()
/linux-4.4.14/include/linux/
H A Dbottom_half.h7 extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
9 static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) __local_bh_disable_ip() argument
22 extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
24 static inline void local_bh_enable_ip(unsigned long ip) local_bh_enable_ip() argument
26 __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); local_bh_enable_ip()
H A Dpercpu-rwsem.h40 bool read, unsigned long ip) percpu_rwsem_release()
42 lock_release(&sem->rw_sem.dep_map, 1, ip); percpu_rwsem_release()
50 bool read, unsigned long ip) percpu_rwsem_acquire()
52 lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); percpu_rwsem_acquire()
39 percpu_rwsem_release(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) percpu_rwsem_release() argument
49 percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) percpu_rwsem_acquire() argument
H A Derrqueue.h5 #include <net/ip.h>
H A Dif_tunnel.h4 #include <linux/ip.h>
H A Dprofile.h52 void profile_hits(int type, void *ip, unsigned int nr_hits);
57 static inline void profile_hit(int type, void *ip) profile_hit() argument
63 profile_hits(type, ip, 1); profile_hit()
102 static inline void profile_hits(int type, void *ip, unsigned int nr_hits) profile_hits() argument
107 static inline void profile_hit(int type, void *ip) profile_hit() argument
H A Dip.h8 * Version: @(#)ip.h 1.0.2 04/28/93
21 #include <uapi/linux/ip.h>
/linux-4.4.14/sound/oss/
H A Dvidc_fill.S20 mov ip, #0xff00
25 and r4, ip, r4, lsl #8
33 mov ip, #0xff00
37 and r5, r4, ip
38 and r4, ip, r4, lsl #8
47 mov ip, #0xff00
51 and r4, ip, r4, lsl #8
59 mov ip, #0xff00
63 and r5, r4, ip
64 and r4, ip, r4, lsl #8
73 mov ip, #0xff00
74 orr ip, ip, ip, lsr #8
78 and r4, r5, ip
83 andlt r4, r5, ip, lsl #16
91 mov ip, #0xff00
92 orr ip, ip, ip, lsr #8
133 * ip = corrupted
144 mov ip, #IOMD_BASE & 0xff000000
145 orr ip, ip, #IOMD_BASE & 0x00ff0000
146 ldrb r6, [ip, #IOMD_SD0ST]
154 mov pc, r4 @ Call fill routine (uses r4, ip)
170 mov ip, #IOMD_BASE & 0xff000000
171 orr ip, ip, #IOMD_BASE & 0x00ff0000
172 streq r4, [ip, #IOMD_SD0CURB]
173 strne r5, [ip, #IOMD_SD0CURA]
174 streq r2, [ip, #IOMD_SD0ENDB]
175 strne r2, [ip, #IOMD_SD0ENDA]
176 ldr lr, [ip, #IOMD_SD0ST]
180 strne r4, [ip, #IOMD_SD0CURB]
181 streq r5, [ip, #IOMD_SD0CURA]
182 strne r2, [ip, #IOMD_SD0ENDB]
183 streq r2, [ip, #IOMD_SD0ENDA]
186 strneb r0, [ip, #IOMD_SD0CR]
/linux-4.4.14/arch/arm/boot/compressed/
H A Dll_char_wr.S38 @ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc)
47 adr ip, LC0
48 ldmia ip, {r3, r4, r5, r6, lr}
49 sub ip, ip, r3
50 add r6, r6, ip
51 add lr, lr, ip
52 ldr r4, [r4, ip]
53 ldr r5, [r5, ip]
65 @ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
83 @ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
86 mov ip, r7, lsr #4
87 ldr ip, [lr, ip, lsl #2]
88 mul r4, r2, ip
89 and ip, r7, #15 @ avoid r4
90 ldr ip, [lr, ip, lsl #2] @ avoid r4
91 mul ip, r2, ip @ avoid r4
92 sub r1, r1, #1 @ avoid ip
93 sub r0, r0, r5 @ avoid ip
94 stmia r0, {r4, ip}
96 mov ip, r7, lsr #4
97 ldr ip, [lr, ip, lsl #2]
98 mul r4, r2, ip
99 and ip, r7, #15 @ avoid r4
100 ldr ip, [lr, ip, lsl #2] @ avoid r4
101 mul ip, r2, ip @ avoid r4
102 tst r1, #7 @ avoid ip
103 sub r0, r0, r5 @ avoid ip
104 stmia r0, {r4, ip}
111 @ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc)
/linux-4.4.14/arch/sparc/kernel/
H A Dftrace.c14 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) ftrace_call_replace() argument
19 off = ((s32)addr - (s32)ip); ftrace_call_replace()
25 static int ftrace_modify_code(unsigned long ip, u32 old, u32 new) ftrace_modify_code() argument
31 "1: cas [%[ip]], %[old], %[new]\n" ftrace_modify_code()
32 " flush %[ip]\n" ftrace_modify_code()
46 : [new] "0" (new), [old] "r" (old), [ip] "r" (ip) ftrace_modify_code()
57 unsigned long ip = rec->ip; ftrace_make_nop() local
60 old = ftrace_call_replace(ip, addr); ftrace_make_nop()
62 return ftrace_modify_code(ip, old, new); ftrace_make_nop()
67 unsigned long ip = rec->ip; ftrace_make_call() local
71 new = ftrace_call_replace(ip, addr); ftrace_make_call()
72 return ftrace_modify_code(ip, old, new); ftrace_make_call()
77 unsigned long ip = (unsigned long)(&ftrace_call); ftrace_update_ftrace_func() local
81 new = ftrace_call_replace(ip, (unsigned long)func); ftrace_update_ftrace_func()
82 return ftrace_modify_code(ip, old, new); ftrace_update_ftrace_func()
98 unsigned long ip = (unsigned long)(&ftrace_graph_call); ftrace_enable_ftrace_graph_caller() local
102 new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); ftrace_enable_ftrace_graph_caller()
103 return ftrace_modify_code(ip, old, new); ftrace_enable_ftrace_graph_caller()
108 unsigned long ip = (unsigned long)(&ftrace_graph_call); ftrace_disable_ftrace_graph_caller() local
112 new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); ftrace_disable_ftrace_graph_caller()
114 return ftrace_modify_code(ip, old, new); ftrace_disable_ftrace_graph_caller()
/linux-4.4.14/fs/gfs2/
H A Dquota.h21 extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
22 extern void gfs2_quota_unhold(struct gfs2_inode *ip);
24 extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
25 extern void gfs2_quota_unlock(struct gfs2_inode *ip);
27 extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
29 extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
41 static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, gfs2_quota_lock_check() argument
44 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_quota_lock_check()
48 ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); gfs2_quota_lock_check()
53 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap); gfs2_quota_lock_check()
55 gfs2_quota_unlock(ip); gfs2_quota_lock_check()
H A Dxattr.c75 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
79 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh, ea_foreach_i() argument
85 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA)) ea_foreach_i()
97 error = ea_call(ip, bh, ea, prev, data); ea_foreach_i()
112 gfs2_consist_inode(ip); ea_foreach_i()
116 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) ea_foreach() argument
122 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh); ea_foreach()
126 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) { ea_foreach()
127 error = ea_foreach_i(ip, bh, ea_call, data); ea_foreach()
131 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) { ea_foreach()
137 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs; ea_foreach()
146 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh); ea_foreach()
149 error = ea_foreach_i(ip, eabh, ea_call, data); ea_foreach()
166 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh, ea_find_i() argument
190 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, gfs2_ea_find() argument
203 error = ea_foreach(ip, ea_find_i, &ef); gfs2_ea_find()
212 * @ip:
225 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, ea_dealloc_unstuffed() argument
230 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); ea_dealloc_unstuffed()
261 gfs2_consist_inode(ip); ea_dealloc_unstuffed()
274 gfs2_trans_add_meta(ip->i_gl, bh); ea_dealloc_unstuffed()
286 gfs2_free_meta(ip, bstart, blen); ea_dealloc_unstuffed()
292 gfs2_add_inode_blocks(&ip->i_inode, -1); ea_dealloc_unstuffed()
295 gfs2_free_meta(ip, bstart, blen); ea_dealloc_unstuffed()
310 error = gfs2_meta_inode_buffer(ip, &dibh); ea_dealloc_unstuffed()
312 ip->i_inode.i_ctime = CURRENT_TIME; ea_dealloc_unstuffed()
313 gfs2_trans_add_meta(ip->i_gl, dibh); ea_dealloc_unstuffed()
314 gfs2_dinode_out(ip, dibh->b_data); ea_dealloc_unstuffed()
325 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, ea_remove_unstuffed() argument
331 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); ea_remove_unstuffed()
335 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); ea_remove_unstuffed()
339 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL); ea_remove_unstuffed()
341 gfs2_quota_unhold(ip); ea_remove_unstuffed()
365 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh, ea_list_i() argument
423 struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); gfs2_listxattr() local
434 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); gfs2_listxattr()
438 if (ip->i_eattr) { gfs2_listxattr()
441 error = ea_foreach(ip, ea_list_i, &ei); gfs2_listxattr()
454 * @ip: The GFS2 inode
462 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, gfs2_iter_unstuffed() argument
465 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_iter_unstuffed()
480 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, gfs2_iter_unstuffed()
513 gfs2_trans_add_meta(ip->i_gl, bh[x]); gfs2_iter_unstuffed()
527 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, gfs2_ea_get_copy() argument
539 ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data); gfs2_ea_get_copy()
545 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata) gfs2_xattr_acl_get() argument
552 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el); gfs2_xattr_acl_get()
566 error = gfs2_ea_get_copy(ip, &el, data, len); gfs2_xattr_acl_get()
590 struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); gfs2_xattr_get() local
595 if (!ip->i_eattr) gfs2_xattr_get()
600 error = gfs2_ea_find(ip, type, name, &el); gfs2_xattr_get()
606 error = gfs2_ea_get_copy(ip, &el, buffer, size); gfs2_xattr_get()
616 * @ip: A pointer to the inode that's getting extended attributes
622 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp) ea_alloc_blk() argument
624 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); ea_alloc_blk()
630 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); ea_alloc_blk()
634 *bhp = gfs2_meta_new(ip->i_gl, block); ea_alloc_blk()
635 gfs2_trans_add_meta(ip->i_gl, *bhp); ea_alloc_blk()
645 gfs2_add_inode_blocks(&ip->i_inode, 1); ea_alloc_blk()
653 * @ip: inode that is being modified
662 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea, ea_write() argument
665 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); ea_write()
692 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); ea_write()
696 bh = gfs2_meta_new(ip->i_gl, block); ea_write()
697 gfs2_trans_add_meta(ip->i_gl, bh); ea_write()
700 gfs2_add_inode_blocks(&ip->i_inode, 1); ea_write()
722 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
725 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, ea_alloc_skeleton() argument
733 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); ea_alloc_skeleton()
737 error = gfs2_quota_lock_check(ip, &ap); ea_alloc_skeleton()
741 error = gfs2_inplace_reserve(ip, &ap); ea_alloc_skeleton()
745 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), ea_alloc_skeleton()
746 blks + gfs2_rg_blocks(ip, blks) + ea_alloc_skeleton()
751 error = skeleton_call(ip, er, private); ea_alloc_skeleton()
755 error = gfs2_meta_inode_buffer(ip, &dibh); ea_alloc_skeleton()
757 ip->i_inode.i_ctime = CURRENT_TIME; ea_alloc_skeleton()
758 gfs2_trans_add_meta(ip->i_gl, dibh); ea_alloc_skeleton()
759 gfs2_dinode_out(ip, dibh->b_data); ea_alloc_skeleton()
764 gfs2_trans_end(GFS2_SB(&ip->i_inode)); ea_alloc_skeleton()
766 gfs2_inplace_release(ip); ea_alloc_skeleton()
768 gfs2_quota_unlock(ip); ea_alloc_skeleton()
772 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er, ea_init_i() argument
778 error = ea_alloc_blk(ip, &bh); ea_init_i()
782 ip->i_eattr = bh->b_blocknr; ea_init_i()
783 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er); ea_init_i()
792 * @ip:
798 static int ea_init(struct gfs2_inode *ip, int type, const char *name, ea_init() argument
802 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize; ea_init()
814 return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL); ea_init()
834 static void ea_set_remove_stuffed(struct gfs2_inode *ip, ea_set_remove_stuffed() argument
841 gfs2_trans_add_meta(ip->i_gl, el->el_bh); ea_set_remove_stuffed()
848 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea); ea_set_remove_stuffed()
868 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh, ea_set_simple_noalloc() argument
875 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0); ea_set_simple_noalloc()
879 gfs2_trans_add_meta(ip->i_gl, bh); ea_set_simple_noalloc()
884 ea_write(ip, ea, er); ea_set_simple_noalloc()
887 ea_set_remove_stuffed(ip, es->es_el); ea_set_simple_noalloc()
889 error = gfs2_meta_inode_buffer(ip, &dibh); ea_set_simple_noalloc()
892 ip->i_inode.i_ctime = CURRENT_TIME; ea_set_simple_noalloc()
893 gfs2_trans_add_meta(ip->i_gl, dibh); ea_set_simple_noalloc()
894 gfs2_dinode_out(ip, dibh->b_data); ea_set_simple_noalloc()
897 gfs2_trans_end(GFS2_SB(&ip->i_inode)); ea_set_simple_noalloc()
901 static int ea_set_simple_alloc(struct gfs2_inode *ip, ea_set_simple_alloc() argument
908 gfs2_trans_add_meta(ip->i_gl, es->es_bh); ea_set_simple_alloc()
913 error = ea_write(ip, ea, er); ea_set_simple_alloc()
918 ea_set_remove_stuffed(ip, es->es_el); ea_set_simple_alloc()
923 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh, ea_set_simple() argument
932 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len, ea_set_simple()
939 error = ea_remove_unstuffed(ip, bh, ea, prev, 1); ea_set_simple()
950 error = ea_set_simple_noalloc(ip, bh, ea, es); ea_set_simple()
959 GFS2_SB(&ip->i_inode)->sd_jbsize); ea_set_simple()
961 error = ea_alloc_skeleton(ip, es->es_er, blks, ea_set_simple()
970 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, ea_set_block() argument
973 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); ea_set_block()
979 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { ea_set_block()
982 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, ea_set_block()
1004 gfs2_trans_add_meta(ip->i_gl, indbh); ea_set_block()
1008 error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL); ea_set_block()
1012 indbh = gfs2_meta_new(ip->i_gl, blk); ea_set_block()
1013 gfs2_trans_add_meta(ip->i_gl, indbh); ea_set_block()
1018 *eablk = cpu_to_be64(ip->i_eattr); ea_set_block()
1019 ip->i_eattr = blk; ea_set_block()
1020 ip->i_diskflags |= GFS2_DIF_EA_INDIRECT; ea_set_block()
1021 gfs2_add_inode_blocks(&ip->i_inode, 1); ea_set_block()
1026 error = ea_alloc_blk(ip, &newbh); ea_set_block()
1031 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er); ea_set_block()
1037 ea_set_remove_stuffed(ip, private); ea_set_block()
1044 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name, ea_set_i() argument
1062 error = ea_foreach(ip, ea_set_simple, &es); ea_set_i()
1068 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) ea_set_i()
1070 if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize) ea_set_i()
1071 blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize); ea_set_i()
1073 return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el); ea_set_i()
1076 static int ea_set_remove_unstuffed(struct gfs2_inode *ip, ea_set_remove_unstuffed() argument
1081 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), ea_set_remove_unstuffed()
1085 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0); ea_set_remove_unstuffed()
1088 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el) ea_remove_stuffed() argument
1095 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); ea_remove_stuffed()
1099 gfs2_trans_add_meta(ip->i_gl, el->el_bh); ea_remove_stuffed()
1113 error = gfs2_meta_inode_buffer(ip, &dibh); ea_remove_stuffed()
1115 ip->i_inode.i_ctime = CURRENT_TIME; ea_remove_stuffed()
1116 gfs2_trans_add_meta(ip->i_gl, dibh); ea_remove_stuffed()
1117 gfs2_dinode_out(ip, dibh->b_data); ea_remove_stuffed()
1121 gfs2_trans_end(GFS2_SB(&ip->i_inode)); ea_remove_stuffed()
1128 * @ip: The inode
1139 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name) gfs2_xattr_remove() argument
1144 if (!ip->i_eattr) gfs2_xattr_remove()
1147 error = gfs2_ea_find(ip, type, name, &el); gfs2_xattr_remove()
1154 error = ea_remove_stuffed(ip, &el); gfs2_xattr_remove()
1156 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0); gfs2_xattr_remove()
1165 * @ip: The inode
1180 struct gfs2_inode *ip = GFS2_I(inode); __gfs2_xattr_set() local
1192 return gfs2_xattr_remove(ip, type, name); __gfs2_xattr_set()
1197 if (!ip->i_eattr) { __gfs2_xattr_set()
1200 return ea_init(ip, type, name, value, size); __gfs2_xattr_set()
1203 error = gfs2_ea_find(ip, type, name, &el); __gfs2_xattr_set()
1208 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) { __gfs2_xattr_set()
1216 error = ea_set_i(ip, type, name, value, size, &el); __gfs2_xattr_set()
1218 ea_set_remove_unstuffed(ip, &el); __gfs2_xattr_set()
1227 error = ea_set_i(ip, type, name, value, size, NULL); __gfs2_xattr_set()
1241 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip, ea_acl_chmod_unstuffed() argument
1244 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); ea_acl_chmod_unstuffed()
1253 ret = gfs2_iter_unstuffed(ip, ea, data, NULL); ea_acl_chmod_unstuffed()
1259 int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) gfs2_xattr_acl_chmod() argument
1261 struct inode *inode = &ip->i_inode; gfs2_xattr_acl_chmod()
1266 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el); gfs2_xattr_acl_chmod()
1273 gfs2_trans_add_meta(ip->i_gl, el.el_bh); gfs2_xattr_acl_chmod()
1278 error = ea_acl_chmod_unstuffed(ip, el.el_ea, data); gfs2_xattr_acl_chmod()
1290 static int ea_dealloc_indirect(struct gfs2_inode *ip) ea_dealloc_indirect() argument
1292 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); ea_dealloc_indirect()
1309 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh); ea_dealloc_indirect()
1332 gfs2_rlist_add(ip, &rlist, bstart); ea_dealloc_indirect()
1339 gfs2_rlist_add(ip, &rlist, bstart); ea_dealloc_indirect()
1360 gfs2_trans_add_meta(ip->i_gl, indbh); ea_dealloc_indirect()
1377 gfs2_free_meta(ip, bstart, blen); ea_dealloc_indirect()
1383 gfs2_add_inode_blocks(&ip->i_inode, -1); ea_dealloc_indirect()
1386 gfs2_free_meta(ip, bstart, blen); ea_dealloc_indirect()
1388 ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT; ea_dealloc_indirect()
1390 error = gfs2_meta_inode_buffer(ip, &dibh); ea_dealloc_indirect()
1392 gfs2_trans_add_meta(ip->i_gl, dibh); ea_dealloc_indirect()
1393 gfs2_dinode_out(ip, dibh->b_data); ea_dealloc_indirect()
1408 static int ea_dealloc_block(struct gfs2_inode *ip) ea_dealloc_block() argument
1410 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); ea_dealloc_block()
1420 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1); ea_dealloc_block()
1422 gfs2_consist_inode(ip); ea_dealloc_block()
1435 gfs2_free_meta(ip, ip->i_eattr, 1); ea_dealloc_block()
1437 ip->i_eattr = 0; ea_dealloc_block()
1438 gfs2_add_inode_blocks(&ip->i_inode, -1); ea_dealloc_block()
1440 error = gfs2_meta_inode_buffer(ip, &dibh); ea_dealloc_block()
1442 gfs2_trans_add_meta(ip->i_gl, dibh); ea_dealloc_block()
1443 gfs2_dinode_out(ip, dibh->b_data); ea_dealloc_block()
1456 * @ip: the inode
1461 int gfs2_ea_dealloc(struct gfs2_inode *ip) gfs2_ea_dealloc() argument
1465 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); gfs2_ea_dealloc()
1469 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); gfs2_ea_dealloc()
1473 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL); gfs2_ea_dealloc()
1477 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { gfs2_ea_dealloc()
1478 error = ea_dealloc_indirect(ip); gfs2_ea_dealloc()
1483 error = ea_dealloc_block(ip); gfs2_ea_dealloc()
1486 gfs2_quota_unhold(ip); gfs2_ea_dealloc()
H A Dglops.c207 struct gfs2_inode *ip = gl->gl_object; inode_go_sync() local
211 if (ip && !S_ISREG(ip->i_inode.i_mode)) inode_go_sync()
212 ip = NULL; inode_go_sync()
213 if (ip) { inode_go_sync()
214 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) inode_go_sync()
215 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); inode_go_sync()
216 inode_dio_wait(&ip->i_inode); inode_go_sync()
225 if (ip) { inode_go_sync()
226 struct address_space *mapping = ip->i_inode.i_mapping; inode_go_sync()
255 struct gfs2_inode *ip = gl->gl_object; inode_go_inval() local
262 if (ip) { inode_go_inval()
263 set_bit(GIF_INVALID, &ip->i_flags); inode_go_inval()
264 forget_all_cached_acls(&ip->i_inode); inode_go_inval()
265 gfs2_dir_hash_inval(ip); inode_go_inval()
269 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { inode_go_inval()
273 if (ip && S_ISREG(ip->i_inode.i_mode)) inode_go_inval()
274 truncate_inode_pages(ip->i_inode.i_mapping, 0); inode_go_inval()
327 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) gfs2_dinode_in() argument
333 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) gfs2_dinode_in()
335 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); gfs2_dinode_in()
336 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); gfs2_dinode_in()
337 ip->i_inode.i_rdev = 0; gfs2_dinode_in()
338 switch (ip->i_inode.i_mode & S_IFMT) { gfs2_dinode_in()
341 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), gfs2_dinode_in()
346 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); gfs2_dinode_in()
347 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); gfs2_dinode_in()
348 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); gfs2_dinode_in()
349 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); gfs2_dinode_in()
350 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); gfs2_dinode_in()
353 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) gfs2_dinode_in()
354 ip->i_inode.i_atime = atime; gfs2_dinode_in()
355 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); gfs2_dinode_in()
356 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); gfs2_dinode_in()
357 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); gfs2_dinode_in()
358 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); gfs2_dinode_in()
360 ip->i_goal = be64_to_cpu(str->di_goal_meta); gfs2_dinode_in()
361 ip->i_generation = be64_to_cpu(str->di_generation); gfs2_dinode_in()
363 ip->i_diskflags = be32_to_cpu(str->di_flags); gfs2_dinode_in()
364 ip->i_eattr = be64_to_cpu(str->di_eattr); gfs2_dinode_in()
366 gfs2_set_inode_flags(&ip->i_inode); gfs2_dinode_in()
370 ip->i_height = (u8)height; gfs2_dinode_in()
375 ip->i_depth = (u8)depth; gfs2_dinode_in()
376 ip->i_entries = be32_to_cpu(str->di_entries); gfs2_dinode_in()
378 if (S_ISREG(ip->i_inode.i_mode)) gfs2_dinode_in()
379 gfs2_set_aops(&ip->i_inode); gfs2_dinode_in()
383 gfs2_consist_inode(ip); gfs2_dinode_in()
389 * @ip: The GFS2 inode
394 int gfs2_inode_refresh(struct gfs2_inode *ip) gfs2_inode_refresh() argument
399 error = gfs2_meta_inode_buffer(ip, &dibh); gfs2_inode_refresh()
403 error = gfs2_dinode_in(ip, dibh->b_data); gfs2_inode_refresh()
405 clear_bit(GIF_INVALID, &ip->i_flags); gfs2_inode_refresh()
422 struct gfs2_inode *ip = gl->gl_object; inode_go_lock() local
425 if (!ip || (gh->gh_flags & GL_SKIP)) inode_go_lock()
428 if (test_bit(GIF_INVALID, &ip->i_flags)) { inode_go_lock()
429 error = gfs2_inode_refresh(ip); inode_go_lock()
435 inode_dio_wait(&ip->i_inode); inode_go_lock()
437 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && inode_go_lock()
441 if (list_empty(&ip->i_trunc_list)) inode_go_lock()
442 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); inode_go_lock()
454 * @ip: the inode
460 const struct gfs2_inode *ip = gl->gl_object; inode_go_dump() local
461 if (ip == NULL) inode_go_dump()
464 (unsigned long long)ip->i_no_formal_ino, inode_go_dump()
465 (unsigned long long)ip->i_no_addr, inode_go_dump()
466 IF2DT(ip->i_inode.i_mode), ip->i_flags, inode_go_dump()
467 (unsigned int)ip->i_diskflags, inode_go_dump()
468 (unsigned long long)i_size_read(&ip->i_inode)); inode_go_dump()
506 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); freeze_go_xmote_bh() local
507 struct gfs2_glock *j_gl = ip->i_gl; freeze_go_xmote_bh()
549 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; iopen_go_callback() local
556 gl->gl_state == LM_ST_SHARED && ip) { iopen_go_callback()
H A Dinode.h19 extern int gfs2_internal_read(struct gfs2_inode *ip,
23 static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) gfs2_is_stuffed() argument
25 return !ip->i_height; gfs2_is_stuffed()
28 static inline int gfs2_is_jdata(const struct gfs2_inode *ip) gfs2_is_jdata() argument
30 return ip->i_diskflags & GFS2_DIF_JDATA; gfs2_is_jdata()
33 static inline int gfs2_is_writeback(const struct gfs2_inode *ip) gfs2_is_writeback() argument
35 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_is_writeback()
36 return (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK) && !gfs2_is_jdata(ip); gfs2_is_writeback()
39 static inline int gfs2_is_ordered(const struct gfs2_inode *ip) gfs2_is_ordered() argument
41 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_is_ordered()
42 return (sdp->sd_args.ar_data == GFS2_DATA_ORDERED) && !gfs2_is_jdata(ip); gfs2_is_ordered()
45 static inline int gfs2_is_dir(const struct gfs2_inode *ip) gfs2_is_dir() argument
47 return S_ISDIR(ip->i_inode.i_mode); gfs2_is_dir()
69 static inline int gfs2_check_inum(const struct gfs2_inode *ip, u64 no_addr, gfs2_check_inum() argument
72 return ip->i_no_addr == no_addr && ip->i_no_formal_ino == no_formal_ino; gfs2_check_inum()
75 static inline void gfs2_inum_out(const struct gfs2_inode *ip, gfs2_inum_out() argument
78 dent->de_inum.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); gfs2_inum_out()
79 dent->de_inum.no_addr = cpu_to_be64(ip->i_no_addr); gfs2_inum_out()
104 extern int gfs2_inode_refresh(struct gfs2_inode *ip);
111 extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
H A Dbmap.h22 * @ip: the file
29 static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip, gfs2_write_calc_reserv() argument
34 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_write_calc_reserv()
37 BUG_ON(gfs2_is_dir(ip)); gfs2_write_calc_reserv()
47 extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
54 extern int gfs2_truncatei_resume(struct gfs2_inode *ip);
55 extern int gfs2_file_dealloc(struct gfs2_inode *ip);
56 extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
H A Dbmap.c48 * @ip: the inode
56 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, gfs2_unstuffer_page() argument
59 struct inode *inode = &ip->i_inode; gfs2_unstuffer_page()
94 if (!gfs2_is_jdata(ip)) gfs2_unstuffer_page()
96 if (!gfs2_is_writeback(ip)) gfs2_unstuffer_page()
97 gfs2_trans_add_data(ip->i_gl, bh); gfs2_unstuffer_page()
109 * @ip: The GFS2 inode to unstuff
118 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) gfs2_unstuff_dinode() argument
123 int isdir = gfs2_is_dir(ip); gfs2_unstuff_dinode()
126 down_write(&ip->i_rw_mutex); gfs2_unstuff_dinode()
128 error = gfs2_meta_inode_buffer(ip, &dibh); gfs2_unstuff_dinode()
132 if (i_size_read(&ip->i_inode)) { gfs2_unstuff_dinode()
137 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); gfs2_unstuff_dinode()
141 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1); gfs2_unstuff_dinode()
142 error = gfs2_dir_get_new_buffer(ip, block, &bh); gfs2_unstuff_dinode()
149 error = gfs2_unstuffer_page(ip, dibh, block, page); gfs2_unstuff_dinode()
157 gfs2_trans_add_meta(ip->i_gl, dibh); gfs2_unstuff_dinode()
161 if (i_size_read(&ip->i_inode)) { gfs2_unstuff_dinode()
163 gfs2_add_inode_blocks(&ip->i_inode, 1); gfs2_unstuff_dinode()
164 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); gfs2_unstuff_dinode()
167 ip->i_height = 1; gfs2_unstuff_dinode()
173 up_write(&ip->i_rw_mutex); gfs2_unstuff_dinode()
186 * through the metadata of inode "ip" to get to block "block".
189 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
299 * @ip: The inode
314 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp) lookup_metapath() argument
316 unsigned int end_of_metadata = ip->i_height - 1; lookup_metapath()
328 ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]); lookup_metapath()
333 return ip->i_height; lookup_metapath()
383 static inline void bmap_lock(struct gfs2_inode *ip, int create) bmap_lock() argument
386 down_write(&ip->i_rw_mutex); bmap_lock()
388 down_read(&ip->i_rw_mutex); bmap_lock()
391 static inline void bmap_unlock(struct gfs2_inode *ip, int create) bmap_unlock() argument
394 up_write(&ip->i_rw_mutex); bmap_unlock()
396 up_read(&ip->i_rw_mutex); bmap_unlock()
454 struct gfs2_inode *ip = GFS2_I(inode); gfs2_bmap_alloc() local
472 gfs2_trans_add_meta(ip->i_gl, dibh); gfs2_bmap_alloc()
488 if (height == ip->i_height) { gfs2_bmap_alloc()
495 iblks = height - ip->i_height; gfs2_bmap_alloc()
508 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); gfs2_bmap_alloc()
512 if (state != ALLOC_DATA || gfs2_is_jdata(ip)) gfs2_bmap_alloc()
522 for (; i - 1 < height - ip->i_height && n > 0; i++, n--) gfs2_bmap_alloc()
523 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); gfs2_bmap_alloc()
524 if (i - 1 == height - ip->i_height) { gfs2_bmap_alloc()
549 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); gfs2_bmap_alloc()
551 gfs2_indirect_init(mp, ip->i_gl, i, gfs2_bmap_alloc()
561 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); gfs2_bmap_alloc()
580 ip->i_height = height; gfs2_bmap_alloc()
581 gfs2_add_inode_blocks(&ip->i_inode, alloced); gfs2_bmap_alloc()
582 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data); gfs2_bmap_alloc()
606 struct gfs2_inode *ip = GFS2_I(inode); gfs2_block_map() local
623 bmap_lock(ip, create); gfs2_block_map()
627 trace_gfs2_bmap(ip, bh_map, lblock, create, 1); gfs2_block_map()
628 if (gfs2_is_dir(ip)) { gfs2_block_map()
633 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); gfs2_block_map()
637 height = ip->i_height; gfs2_block_map()
643 if (height > ip->i_height || gfs2_is_stuffed(ip)) gfs2_block_map()
645 ret = lookup_metapath(ip, &mp); gfs2_block_map()
648 if (ret != ip->i_height) gfs2_block_map()
650 ptr = metapointer(ip->i_height - 1, &mp); gfs2_block_map()
654 bh = mp.mp_bh[ip->i_height - 1]; gfs2_block_map()
662 trace_gfs2_bmap(ip, bh_map, lblock, create, ret); gfs2_block_map()
663 bmap_unlock(ip, create); gfs2_block_map()
669 BUG_ON(gfs2_is_stuffed(ip)); gfs2_block_map()
705 * @ip: the inode
716 static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, do_strip() argument
720 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); do_strip()
746 metadata = (height != ip->i_height - 1); do_strip()
749 else if (ip->i_depth) do_strip()
766 gfs2_rlist_add(ip, &rlist, bstart); do_strip()
774 gfs2_rlist_add(ip, &rlist, bstart); do_strip()
790 if (gfs2_rs_active(ip->i_res)) /* needs to be done with the rgrp glock held */ do_strip()
791 gfs2_rs_deltree(ip->i_res); do_strip()
799 down_write(&ip->i_rw_mutex); do_strip()
801 gfs2_trans_add_meta(ip->i_gl, dibh); do_strip()
802 gfs2_trans_add_meta(ip->i_gl, bh); do_strip()
818 __gfs2_free_blocks(ip, bstart, blen, metadata); do_strip()
827 gfs2_add_inode_blocks(&ip->i_inode, -1); do_strip()
830 __gfs2_free_blocks(ip, bstart, blen, metadata); do_strip()
835 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid, do_strip()
836 ip->i_inode.i_gid); do_strip()
838 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; do_strip()
840 gfs2_dinode_out(ip, dibh->b_data); do_strip()
842 up_write(&ip->i_rw_mutex); do_strip()
856 * @ip: the inode
870 static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh, recursive_scan() argument
874 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); recursive_scan()
882 error = gfs2_meta_inode_buffer(ip, &bh); recursive_scan()
890 error = gfs2_meta_indirect_buffer(ip, height, block, &bh); recursive_scan()
900 error = do_strip(ip, dibh, bh, top, bottom, height, sm); recursive_scan()
904 if (height < ip->i_height - 1) { recursive_scan()
906 gfs2_metapath_ra(ip->i_gl, bh, top); recursive_scan()
914 error = recursive_scan(ip, dibh, mp, height + 1, bn, recursive_scan()
934 struct gfs2_inode *ip = GFS2_I(inode); gfs2_block_truncate_page() local
985 if (!gfs2_is_writeback(ip)) gfs2_block_truncate_page()
986 gfs2_trans_add_data(ip->i_gl, bh); gfs2_block_truncate_page()
1033 struct gfs2_inode *ip = GFS2_I(inode); trunc_start() local
1037 int journaled = gfs2_is_jdata(ip); trunc_start()
1047 error = gfs2_meta_inode_buffer(ip, &dibh); trunc_start()
1051 gfs2_trans_add_meta(ip->i_gl, dibh); trunc_start()
1053 if (gfs2_is_stuffed(ip)) { trunc_start()
1061 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; trunc_start()
1065 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; trunc_start()
1066 gfs2_dinode_out(ip, dibh->b_data); trunc_start()
1085 static int trunc_dealloc(struct gfs2_inode *ip, u64 size) trunc_dealloc() argument
1087 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); trunc_dealloc()
1088 unsigned int height = ip->i_height; trunc_dealloc()
1098 find_metapath(sdp, lblock, &mp, ip->i_height); trunc_dealloc()
1103 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); trunc_dealloc()
1112 error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm); trunc_dealloc()
1117 gfs2_quota_unhold(ip); trunc_dealloc()
1122 static int trunc_end(struct gfs2_inode *ip) trunc_end() argument
1124 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); trunc_end()
1132 down_write(&ip->i_rw_mutex); trunc_end()
1134 error = gfs2_meta_inode_buffer(ip, &dibh); trunc_end()
1138 if (!i_size_read(&ip->i_inode)) { trunc_end()
1139 ip->i_height = 0; trunc_end()
1140 ip->i_goal = ip->i_no_addr; trunc_end()
1142 gfs2_ordered_del_inode(ip); trunc_end()
1144 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; trunc_end()
1145 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG; trunc_end()
1147 gfs2_trans_add_meta(ip->i_gl, dibh); trunc_end()
1148 gfs2_dinode_out(ip, dibh->b_data); trunc_end()
1152 up_write(&ip->i_rw_mutex); trunc_end()
1171 struct gfs2_inode *ip = GFS2_I(inode); do_shrink() local
1177 if (gfs2_is_stuffed(ip)) do_shrink()
1180 error = trunc_dealloc(ip, newsize); do_shrink()
1182 error = trunc_end(ip); do_shrink()
1218 struct gfs2_inode *ip = GFS2_I(inode); do_grow() local
1225 if (gfs2_is_stuffed(ip) && do_grow()
1227 error = gfs2_quota_lock_check(ip, &ap); do_grow()
1231 error = gfs2_inplace_reserve(ip, &ap); do_grow()
1244 error = gfs2_unstuff_dinode(ip, NULL); do_grow()
1249 error = gfs2_meta_inode_buffer(ip, &dibh); do_grow()
1254 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; do_grow()
1255 gfs2_trans_add_meta(ip->i_gl, dibh); do_grow()
1256 gfs2_dinode_out(ip, dibh->b_data); do_grow()
1263 gfs2_inplace_release(ip); do_grow()
1265 gfs2_quota_unlock(ip); do_grow()
1284 struct gfs2_inode *ip = GFS2_I(inode); gfs2_setattr_size() local
1300 ret = gfs2_rs_alloc(ip); gfs2_setattr_size()
1310 gfs2_rs_deltree(ip->i_res); gfs2_setattr_size()
1317 int gfs2_truncatei_resume(struct gfs2_inode *ip) gfs2_truncatei_resume() argument
1320 error = trunc_dealloc(ip, i_size_read(&ip->i_inode)); gfs2_truncatei_resume()
1322 error = trunc_end(ip); gfs2_truncatei_resume()
1326 int gfs2_file_dealloc(struct gfs2_inode *ip) gfs2_file_dealloc() argument
1328 return trunc_dealloc(ip, 0); gfs2_file_dealloc()
1405 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); gfs2_map_journal_extents() local
1427 lblock += (bh.b_size >> ip->i_inode.i_blkbits); gfs2_map_journal_extents()
1448 * @ip: the file being written to
1455 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, gfs2_write_alloc_required() argument
1458 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_write_alloc_required()
1467 if (gfs2_is_stuffed(ip)) { gfs2_write_alloc_required()
1475 BUG_ON(gfs2_is_dir(ip)); gfs2_write_alloc_required()
1476 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift; gfs2_write_alloc_required()
1486 gfs2_block_map(&ip->i_inode, lblock, &bh, 0); gfs2_write_alloc_required()
1490 lblock += (bh.b_size >> ip->i_inode.i_blkbits); gfs2_write_alloc_required()
H A Drgrp.h42 extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
45 extern int gfs2_inplace_reserve(struct gfs2_inode *ip,
47 extern void gfs2_inplace_release(struct gfs2_inode *ip);
49 extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
52 extern int gfs2_rs_alloc(struct gfs2_inode *ip);
54 extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
55 extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
56 extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
57 extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
69 extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
86 extern void check_and_update_goal(struct gfs2_inode *ip);
H A Dinode.c48 struct gfs2_inode *ip = GFS2_I(inode); iget_test() local
51 if (ip->i_no_addr == data->no_addr) { iget_test()
64 struct gfs2_inode *ip = GFS2_I(inode); iget_set() local
70 ip->i_no_addr = data->no_addr; iget_set()
144 struct gfs2_inode *ip; gfs2_inode_lookup() local
149 ip = GFS2_I(inode); gfs2_inode_lookup()
156 ip->i_no_formal_ino = no_formal_ino; gfs2_inode_lookup()
158 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); gfs2_inode_lookup()
161 ip->i_gl->gl_object = ip; gfs2_inode_lookup()
167 set_bit(GIF_INVALID, &ip->i_flags); gfs2_inode_lookup()
168 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); gfs2_inode_lookup()
172 ip->i_iopen_gh.gh_gl->gl_object = ip; gfs2_inode_lookup()
192 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_inode_lookup()
193 ip->i_iopen_gh.gh_gl->gl_object = NULL; gfs2_inode_lookup()
194 gfs2_glock_dq_uninit(&ip->i_iopen_gh); gfs2_inode_lookup()
199 ip->i_gl->gl_object = NULL; gfs2_inode_lookup()
200 gfs2_glock_put(ip->i_gl); gfs2_inode_lookup()
379 static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks) alloc_dinode() argument
381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); alloc_dinode()
385 error = gfs2_quota_lock_check(ip, &ap); alloc_dinode()
389 error = gfs2_inplace_reserve(ip, &ap); alloc_dinode()
397 error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1, &ip->i_generation); alloc_dinode()
398 ip->i_no_formal_ino = ip->i_generation; alloc_dinode()
399 ip->i_inode.i_ino = ip->i_no_addr; alloc_dinode()
400 ip->i_goal = ip->i_no_addr; alloc_dinode()
405 gfs2_inplace_release(ip); alloc_dinode()
407 gfs2_quota_unlock(ip); alloc_dinode()
431 * @ip: The inode in question
437 static void gfs2_init_xattr(struct gfs2_inode *ip) gfs2_init_xattr() argument
439 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_init_xattr()
443 bh = gfs2_meta_new(ip->i_gl, ip->i_eattr); gfs2_init_xattr()
444 gfs2_trans_add_meta(ip->i_gl, bh); gfs2_init_xattr()
459 * @ip: The inode
465 static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip, init_dinode() argument
471 dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr); init_dinode()
472 gfs2_trans_add_meta(ip->i_gl, dibh); init_dinode()
474 gfs2_dinode_out(ip, di); init_dinode()
476 di->di_major = cpu_to_be32(MAJOR(ip->i_inode.i_rdev)); init_dinode()
477 di->di_minor = cpu_to_be32(MINOR(ip->i_inode.i_rdev)); init_dinode()
485 switch(ip->i_inode.i_mode & S_IFMT) { init_dinode()
490 memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, ip->i_inode.i_size); init_dinode()
521 struct gfs2_inode *ip, struct gfs2_diradd *da) link_dinode()
545 error = gfs2_dir_add(&dip->i_inode, name, ip, da); link_dinode()
593 struct gfs2_inode *dip = GFS2_I(dir), *ip; gfs2_create_inode() local
655 ip = GFS2_I(inode); gfs2_create_inode()
656 error = gfs2_rs_alloc(ip); gfs2_create_inode()
668 ip->i_goal = dip->i_goal; gfs2_create_inode()
669 ip->i_diskflags = 0; gfs2_create_inode()
670 ip->i_eattr = 0; gfs2_create_inode()
671 ip->i_height = 0; gfs2_create_inode()
672 ip->i_depth = 0; gfs2_create_inode()
673 ip->i_entries = 0; gfs2_create_inode()
679 ip->i_diskflags |= GFS2_DIF_JDATA; gfs2_create_inode()
683 ip->i_diskflags |= (dip->i_diskflags & GFS2_DIF_INHERIT_JDATA); gfs2_create_inode()
684 ip->i_diskflags |= GFS2_DIF_JDATA; gfs2_create_inode()
685 ip->i_entries = 2; gfs2_create_inode()
697 error = alloc_dinode(ip, aflags, &blocks); gfs2_create_inode()
703 error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); gfs2_create_inode()
707 ip->i_gl->gl_object = ip; gfs2_create_inode()
708 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); gfs2_create_inode()
717 ip->i_eattr = ip->i_no_addr + 1; gfs2_create_inode()
718 gfs2_init_xattr(ip); gfs2_create_inode()
720 init_dinode(dip, ip, symname); gfs2_create_inode()
723 error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); gfs2_create_inode()
727 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); gfs2_create_inode()
731 ip->i_iopen_gh.gh_gl->gl_object = ip; gfs2_create_inode()
749 error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name, gfs2_create_inode()
754 error = link_dinode(dip, name, ip, &da); gfs2_create_inode()
770 if (ip->i_gl) gfs2_create_inode()
771 gfs2_glock_put(ip->i_gl); gfs2_create_inode()
777 if (ip->i_gl) gfs2_create_inode()
778 gfs2_glock_put(ip->i_gl); gfs2_create_inode()
779 gfs2_rs_delete(ip, NULL); gfs2_create_inode()
892 struct gfs2_inode *ip = GFS2_I(inode); gfs2_link() local
906 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); gfs2_link()
944 if (!ip->i_inode.i_nlink) gfs2_link()
947 if (ip->i_inode.i_nlink == (u32)-1) gfs2_link()
973 error = gfs2_meta_inode_buffer(ip, &dibh); gfs2_link()
977 error = gfs2_dir_add(dir, &dentry->d_name, ip, &da); gfs2_link()
981 gfs2_trans_add_meta(ip->i_gl, dibh); gfs2_link()
982 inc_nlink(&ip->i_inode); gfs2_link()
983 ip->i_inode.i_ctime = CURRENT_TIME; gfs2_link()
1013 * @ip: the inode
1021 const struct gfs2_inode *ip) gfs2_unlink_ok()
1025 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode)) gfs2_unlink_ok()
1030 !uid_eq(ip->i_inode.i_uid, current_fsuid()) && !capable(CAP_FOWNER)) gfs2_unlink_ok()
1040 return gfs2_dir_check(&dip->i_inode, name, ip); gfs2_unlink_ok()
1059 struct gfs2_inode *ip = GFS2_I(inode); gfs2_unlink_inode() local
1066 ip->i_entries = 0; gfs2_unlink_inode()
1095 struct gfs2_inode *ip = GFS2_I(inode); gfs2_unlink() local
1107 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); gfs2_unlink()
1109 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); gfs2_unlink()
1130 if (ip->i_entries > 2 || inode->i_nlink > 2) gfs2_unlink()
1138 error = gfs2_unlink_ok(dip, &dentry->d_name, ip); gfs2_unlink()
1311 * @ip: The inode being moved
1313 * @dir_rename: True of ip is a directory
1318 static int update_moved_ino(struct gfs2_inode *ip, struct gfs2_inode *ndip, update_moved_ino() argument
1325 return gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR); update_moved_ino()
1327 error = gfs2_meta_inode_buffer(ip, &dibh); update_moved_ino()
1330 ip->i_inode.i_ctime = CURRENT_TIME; update_moved_ino()
1331 gfs2_trans_add_meta(ip->i_gl, dibh); update_moved_ino()
1332 gfs2_dinode_out(ip, dibh->b_data); update_moved_ino()
1353 struct gfs2_inode *ip = GFS2_I(d_inode(odentry)); gfs2_rename() local
1366 if (ip == nip) gfs2_rename()
1384 if (S_ISDIR(ip->i_inode.i_mode)) { gfs2_rename()
1387 error = gfs2_ok_to_move(ip, ndip); gfs2_rename()
1399 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); gfs2_rename()
1421 if (ip->i_inode.i_nlink == 0) gfs2_rename()
1426 error = gfs2_unlink_ok(odip, &odentry->d_name, ip); gfs2_rename()
1478 if (S_ISDIR(ip->i_inode.i_mode) && gfs2_rename()
1526 error = update_moved_ino(ip, ndip, dir_rename); gfs2_rename()
1534 error = gfs2_dir_add(ndir, &ndentry->d_name, ip, &da); gfs2_rename()
1726 struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); gfs2_follow_link() local
1733 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); gfs2_follow_link()
1740 size = (unsigned int)i_size_read(&ip->i_inode); gfs2_follow_link()
1742 gfs2_consist_inode(ip); gfs2_follow_link()
1747 error = gfs2_meta_inode_buffer(ip, &dibh); gfs2_follow_link()
1781 struct gfs2_inode *ip; gfs2_permission() local
1787 ip = GFS2_I(inode); gfs2_permission()
1788 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) { gfs2_permission()
1791 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); gfs2_permission()
1816 * @ip:
1840 struct gfs2_inode *ip = GFS2_I(inode); setattr_chown() local
1861 error = gfs2_rs_alloc(ip); setattr_chown()
1869 error = gfs2_quota_lock(ip, nuid, ngid); setattr_chown()
1873 ap.target = gfs2_get_inode_blocks(&ip->i_inode); setattr_chown()
1877 error = gfs2_quota_check(ip, nuid, ngid, &ap); setattr_chown()
1892 gfs2_quota_change(ip, -(s64)ap.target, ouid, ogid); setattr_chown()
1893 gfs2_quota_change(ip, ap.target, nuid, ngid); setattr_chown()
1899 gfs2_quota_unlock(ip); setattr_chown()
1919 struct gfs2_inode *ip = GFS2_I(inode); gfs2_setattr() local
1923 error = gfs2_rs_alloc(ip); gfs2_setattr()
1927 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); gfs2_setattr()
1975 struct gfs2_inode *ip = GFS2_I(inode); gfs2_getattr() local
1980 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) { gfs2_getattr()
1981 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); gfs2_getattr()
1998 struct gfs2_inode *ip = GFS2_I(inode); gfs2_setxattr() local
2002 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gfs2_setxattr()
2005 ret = gfs2_rs_alloc(ip); gfs2_setxattr()
2018 struct gfs2_inode *ip = GFS2_I(inode); gfs2_getxattr() local
2023 if (gfs2_glock_is_locked_by_me(ip->i_gl)) gfs2_getxattr()
2026 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); gfs2_getxattr()
2039 struct gfs2_inode *ip = GFS2_I(inode); gfs2_removexattr() local
2043 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gfs2_removexattr()
2046 ret = gfs2_rs_alloc(ip); gfs2_removexattr()
2058 struct gfs2_inode *ip = GFS2_I(inode); gfs2_fiemap() local
2068 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gfs2_fiemap()
2072 if (gfs2_is_stuffed(ip)) { gfs2_fiemap()
2073 u64 phys = ip->i_no_addr << inode->i_blkbits; gfs2_fiemap()
520 link_dinode(struct gfs2_inode *dip, const struct qstr *name, struct gfs2_inode *ip, struct gfs2_diradd *da) link_dinode() argument
1020 gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, const struct gfs2_inode *ip) gfs2_unlink_ok() argument
H A Dfile.c58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); gfs2_llseek() local
66 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, gfs2_llseek()
155 struct gfs2_inode *ip = GFS2_I(inode); gfs2_get_flags() local
160 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gfs2_get_flags()
165 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); gfs2_get_flags()
166 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) gfs2_get_flags()
178 struct gfs2_inode *ip = GFS2_I(inode); gfs2_set_inode_flags() local
182 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode)) gfs2_set_inode_flags()
184 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) gfs2_set_inode_flags()
186 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) gfs2_set_inode_flags()
188 if (ip->i_diskflags & GFS2_DIF_NOATIME) gfs2_set_inode_flags()
190 if (ip->i_diskflags & GFS2_DIF_SYNC) gfs2_set_inode_flags()
215 struct gfs2_inode *ip = GFS2_I(inode); do_gfs2_set_flags() local
226 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); do_gfs2_set_flags()
235 flags = ip->i_diskflags; do_gfs2_set_flags()
259 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); do_gfs2_set_flags()
270 error = gfs2_meta_inode_buffer(ip, &bh); do_gfs2_set_flags()
273 gfs2_trans_add_meta(ip->i_gl, bh); do_gfs2_set_flags()
274 ip->i_diskflags = new_flags; do_gfs2_set_flags()
275 gfs2_dinode_out(ip, bh->b_data); do_gfs2_set_flags()
335 struct gfs2_inode *ip = GFS2_I(inode); gfs2_size_hint() local
339 if (hint > atomic_read(&ip->i_res->rs_sizehint)) gfs2_size_hint()
340 atomic_set(&ip->i_res->rs_sizehint, hint); gfs2_size_hint()
385 struct gfs2_inode *ip = GFS2_I(inode); gfs2_page_mkwrite() local
404 ret = gfs2_rs_alloc(ip); gfs2_page_mkwrite()
410 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gfs2_page_mkwrite()
415 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); gfs2_page_mkwrite()
416 set_bit(GIF_SW_PAGED, &ip->i_flags); gfs2_page_mkwrite()
418 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { gfs2_page_mkwrite()
431 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); gfs2_page_mkwrite()
433 ret = gfs2_quota_lock_check(ip, &ap); gfs2_page_mkwrite()
436 ret = gfs2_inplace_reserve(ip, &ap); gfs2_page_mkwrite()
441 if (gfs2_is_jdata(ip)) gfs2_page_mkwrite()
445 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); gfs2_page_mkwrite()
468 if (gfs2_is_stuffed(ip)) gfs2_page_mkwrite()
469 ret = gfs2_unstuff_dinode(ip, page); gfs2_page_mkwrite()
478 gfs2_inplace_release(ip); gfs2_page_mkwrite()
480 gfs2_quota_unlock(ip); gfs2_page_mkwrite()
516 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); gfs2_mmap() local
519 !IS_NOATIME(&ip->i_inode)) { gfs2_mmap()
523 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, gfs2_mmap()
587 struct gfs2_inode *ip = GFS2_I(inode); gfs2_open() local
592 if (S_ISREG(ip->i_inode.i_mode)) { gfs2_open()
593 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, gfs2_open()
618 struct gfs2_inode *ip = GFS2_I(inode); gfs2_release() local
626 gfs2_rs_delete(ip, &inode->i_writecount); gfs2_release()
657 struct gfs2_inode *ip = GFS2_I(inode); gfs2_fsync() local
666 if (!gfs2_is_jdata(ip)) gfs2_fsync()
675 if (gfs2_is_jdata(ip)) gfs2_fsync()
677 gfs2_ail_flush(ip->i_gl, 1); gfs2_fsync()
703 struct gfs2_inode *ip = GFS2_I(file_inode(file)); gfs2_file_write_iter() local
706 ret = gfs2_rs_alloc(ip); gfs2_file_write_iter()
715 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gfs2_file_write_iter()
727 struct gfs2_inode *ip = GFS2_I(inode); fallocate_chunk() local
733 error = gfs2_meta_inode_buffer(ip, &dibh); fallocate_chunk()
737 gfs2_trans_add_meta(ip->i_gl, dibh); fallocate_chunk()
739 if (gfs2_is_stuffed(ip)) { fallocate_chunk()
740 error = gfs2_unstuff_dinode(ip, NULL); fallocate_chunk()
770 * @ip: The inode in question.
778 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len, calc_max_reserv() argument
783 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); calc_max_reserv()
796 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); calc_max_reserv()
804 struct gfs2_inode *ip = GFS2_I(inode); __gfs2_fallocate() local
829 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks); __gfs2_fallocate()
835 if (!gfs2_write_alloc_required(ip, offset, bytes)) { __gfs2_fallocate()
851 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); __gfs2_fallocate()
854 error = gfs2_quota_lock_check(ip, &ap); __gfs2_fallocate()
862 error = gfs2_inplace_reserve(ip, &ap); __gfs2_fallocate()
873 calc_max_reserv(ip, &max_bytes, &data_blocks, __gfs2_fallocate()
877 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks); __gfs2_fallocate()
878 if (gfs2_is_jdata(ip)) __gfs2_fallocate()
894 gfs2_inplace_release(ip); __gfs2_fallocate()
895 gfs2_quota_unlock(ip); __gfs2_fallocate()
907 gfs2_inplace_release(ip); __gfs2_fallocate()
909 gfs2_quota_unlock(ip); __gfs2_fallocate()
916 struct gfs2_inode *ip = GFS2_I(inode); gfs2_fallocate() local
920 if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip)) gfs2_fallocate()
925 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gfs2_fallocate()
941 ret = gfs2_rs_alloc(ip); gfs2_fallocate()
947 gfs2_rs_deltree(ip->i_res); gfs2_fallocate()
963 struct gfs2_inode *ip = GFS2_I(out->f_mapping->host); gfs2_file_splice_write() local
965 error = gfs2_rs_alloc(ip); gfs2_file_splice_write()
987 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); gfs2_lock() local
993 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) gfs2_lock()
1007 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); gfs2_lock()
1009 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); gfs2_lock()
1011 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); gfs2_lock()
1018 struct gfs2_inode *ip = GFS2_I(file_inode(file)); do_flock() local
1039 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, do_flock()
1060 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); do_flock()
H A Dexport.c36 struct gfs2_inode *ip = GFS2_I(inode); gfs2_encode_fh() local
46 fh[0] = cpu_to_be32(ip->i_no_formal_ino >> 32); gfs2_encode_fh()
47 fh[1] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF); gfs2_encode_fh()
48 fh[2] = cpu_to_be32(ip->i_no_addr >> 32); gfs2_encode_fh()
49 fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); gfs2_encode_fh()
55 ip = GFS2_I(parent); gfs2_encode_fh()
57 fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32); gfs2_encode_fh()
58 fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF); gfs2_encode_fh()
59 fh[6] = cpu_to_be32(ip->i_no_addr >> 32); gfs2_encode_fh()
60 fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); gfs2_encode_fh()
93 struct gfs2_inode *dip, *ip; gfs2_get_name() local
109 ip = GFS2_I(inode); gfs2_get_name()
112 gnfd.inum.no_addr = ip->i_no_addr; gfs2_get_name()
113 gnfd.inum.no_formal_ino = ip->i_no_formal_ino; gfs2_get_name()
H A Dquota.c371 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); bh_get() local
387 bh_map.b_size = 1 << ip->i_inode.i_blkbits; bh_get()
388 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); bh_get()
391 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh); bh_get()
530 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) gfs2_quota_hold() argument
532 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_quota_hold()
536 if (ip->i_res == NULL) { gfs2_quota_hold()
537 error = gfs2_rs_alloc(ip); gfs2_quota_hold()
542 qd = ip->i_res->rs_qa_qd; gfs2_quota_hold()
544 if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) || gfs2_quota_hold()
545 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) gfs2_quota_hold()
551 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); gfs2_quota_hold()
554 ip->i_res->rs_qa_qd_num++; gfs2_quota_hold()
557 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); gfs2_quota_hold()
560 ip->i_res->rs_qa_qd_num++; gfs2_quota_hold()
564 !uid_eq(uid, ip->i_inode.i_uid)) { gfs2_quota_hold()
568 ip->i_res->rs_qa_qd_num++; gfs2_quota_hold()
573 !gid_eq(gid, ip->i_inode.i_gid)) { gfs2_quota_hold()
577 ip->i_res->rs_qa_qd_num++; gfs2_quota_hold()
583 gfs2_quota_unhold(ip); gfs2_quota_hold()
587 void gfs2_quota_unhold(struct gfs2_inode *ip) gfs2_quota_unhold() argument
589 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_quota_unhold()
592 if (ip->i_res == NULL) gfs2_quota_unhold()
594 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); gfs2_quota_unhold()
596 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { gfs2_quota_unhold()
597 qdsb_put(ip->i_res->rs_qa_qd[x]); gfs2_quota_unhold()
598 ip->i_res->rs_qa_qd[x] = NULL; gfs2_quota_unhold()
600 ip->i_res->rs_qa_qd_num = 0; gfs2_quota_unhold()
618 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); do_qc() local
623 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); do_qc()
657 static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, gfs2_write_buf_to_page() argument
660 struct inode *inode = &ip->i_inode; gfs2_write_buf_to_page()
705 gfs2_trans_add_data(ip->i_gl, bh); gfs2_write_buf_to_page()
733 static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, gfs2_write_disk_quota() argument
753 error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr, gfs2_write_disk_quota()
757 error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0, gfs2_write_disk_quota()
765 * @ip: The quota inode
777 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, gfs2_adjust_quota() argument
781 struct inode *inode = &ip->i_inode; gfs2_adjust_quota()
787 if (gfs2_is_stuffed(ip)) { gfs2_adjust_quota()
788 err = gfs2_unstuff_dinode(ip, NULL); gfs2_adjust_quota()
794 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); gfs2_adjust_quota()
819 err = gfs2_write_disk_quota(ip, &q, loc); gfs2_adjust_quota()
835 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); do_sync() local
846 error = gfs2_rs_alloc(ip); do_sync()
850 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), do_sync()
858 mutex_lock(&ip->i_inode.i_mutex); do_sync()
866 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); do_sync()
872 if (gfs2_write_alloc_required(ip, offset, do_sync()
890 error = gfs2_inplace_reserve(ip, &ap); do_sync()
895 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; do_sync()
904 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); do_sync()
917 gfs2_inplace_release(ip); do_sync()
923 mutex_unlock(&ip->i_inode.i_mutex); do_sync()
925 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH); do_sync()
931 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); update_qd() local
939 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); update_qd()
958 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); do_glock() local
979 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); do_glock()
1002 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) gfs2_quota_lock() argument
1004 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_quota_lock()
1009 error = gfs2_quota_hold(ip, uid, gid); gfs2_quota_lock()
1017 sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num, gfs2_quota_lock()
1020 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { gfs2_quota_lock()
1021 qd = ip->i_res->rs_qa_qd[x]; gfs2_quota_lock()
1022 error = do_glock(qd, NO_FORCE, &ip->i_res->rs_qa_qd_ghs[x]); gfs2_quota_lock()
1028 set_bit(GIF_QD_LOCKED, &ip->i_flags); gfs2_quota_lock()
1031 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); gfs2_quota_lock()
1032 gfs2_quota_unhold(ip); gfs2_quota_lock()
1074 void gfs2_quota_unlock(struct gfs2_inode *ip) gfs2_quota_unlock() argument
1076 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_quota_unlock()
1082 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) gfs2_quota_unlock()
1085 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { gfs2_quota_unlock()
1089 qd = ip->i_res->rs_qa_qd[x]; gfs2_quota_unlock()
1092 gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]); gfs2_quota_unlock()
1121 gfs2_quota_unhold(ip); gfs2_quota_unlock()
1140 * @ip: The inode for which this check is being performed
1155 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, gfs2_quota_check() argument
1158 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_quota_check()
1165 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) gfs2_quota_check()
1171 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { gfs2_quota_check()
1172 qd = ip->i_res->rs_qa_qd[x]; gfs2_quota_check()
1215 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, gfs2_quota_change() argument
1221 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) gfs2_quota_change()
1223 if (ip->i_diskflags & GFS2_DIF_SYSTEM) gfs2_quota_change()
1226 for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) { gfs2_quota_change()
1227 qd = ip->i_res->rs_qa_qd[x]; gfs2_quota_change()
1302 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); gfs2_quota_init() local
1334 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); gfs2_quota_init()
1339 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); gfs2_quota_init()
1462 struct gfs2_inode *ip; quotad_check_trunc_list() local
1465 ip = NULL; quotad_check_trunc_list()
1468 ip = list_entry(sdp->sd_trunc_list.next, quotad_check_trunc_list()
1470 list_del_init(&ip->i_trunc_list); quotad_check_trunc_list()
1473 if (ip == NULL) quotad_check_trunc_list()
1475 gfs2_glock_finish_truncate(ip); quotad_check_trunc_list()
1615 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); gfs2_set_dqblk() local
1638 error = gfs2_rs_alloc(ip); gfs2_set_dqblk()
1642 mutex_lock(&ip->i_inode.i_mutex); gfs2_set_dqblk()
1646 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); gfs2_set_dqblk()
1672 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); gfs2_set_dqblk()
1673 if (gfs2_is_stuffed(ip)) gfs2_set_dqblk()
1677 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), gfs2_set_dqblk()
1681 error = gfs2_inplace_reserve(ip, &ap); gfs2_set_dqblk()
1684 blocks += gfs2_rg_blocks(ip, blocks); gfs2_set_dqblk()
1694 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); gfs2_set_dqblk()
1701 gfs2_inplace_release(ip); gfs2_set_dqblk()
1707 mutex_unlock(&ip->i_inode.i_mutex); gfs2_set_dqblk()
H A Dsuper.c348 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); gfs2_jdesc_check() local
357 if (gfs2_write_alloc_required(ip, 0, size)) { gfs2_jdesc_check()
358 gfs2_consist_inode(ip); gfs2_jdesc_check()
401 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); gfs2_make_fs_rw() local
402 struct gfs2_glock *j_gl = ip->i_gl; gfs2_make_fs_rw()
645 struct gfs2_inode *ip; gfs2_lock_fs_check_clean() local
658 ip = GFS2_I(jd->jd_inode); gfs2_lock_fs_check_clean()
659 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); gfs2_lock_fs_check_clean()
696 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) gfs2_dinode_out() argument
703 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); gfs2_dinode_out()
704 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); gfs2_dinode_out()
705 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); gfs2_dinode_out()
706 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode)); gfs2_dinode_out()
707 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode)); gfs2_dinode_out()
708 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); gfs2_dinode_out()
709 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode)); gfs2_dinode_out()
710 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); gfs2_dinode_out()
711 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); gfs2_dinode_out()
712 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); gfs2_dinode_out()
713 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec); gfs2_dinode_out()
715 str->di_goal_meta = cpu_to_be64(ip->i_goal); gfs2_dinode_out()
716 str->di_goal_data = cpu_to_be64(ip->i_goal); gfs2_dinode_out()
717 str->di_generation = cpu_to_be64(ip->i_generation); gfs2_dinode_out()
719 str->di_flags = cpu_to_be32(ip->i_diskflags); gfs2_dinode_out()
720 str->di_height = cpu_to_be16(ip->i_height); gfs2_dinode_out()
721 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && gfs2_dinode_out()
722 !(ip->i_diskflags & GFS2_DIF_EXHASH) ? gfs2_dinode_out()
724 str->di_depth = cpu_to_be16(ip->i_depth); gfs2_dinode_out()
725 str->di_entries = cpu_to_be32(ip->i_entries); gfs2_dinode_out()
727 str->di_eattr = cpu_to_be64(ip->i_eattr); gfs2_dinode_out()
728 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec); gfs2_dinode_out()
729 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec); gfs2_dinode_out()
730 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec); gfs2_dinode_out()
743 struct gfs2_inode *ip = GFS2_I(inode); gfs2_write_inode() local
745 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); gfs2_write_inode()
750 gfs2_log_flush(GFS2_SB(inode), ip->i_gl, NORMAL_FLUSH); gfs2_write_inode()
777 struct gfs2_inode *ip = GFS2_I(inode); gfs2_dirty_inode() local
788 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { gfs2_dirty_inode()
789 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gfs2_dirty_inode()
795 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) gfs2_dirty_inode()
807 ret = gfs2_meta_inode_buffer(ip, &bh); gfs2_dirty_inode()
809 gfs2_trans_add_meta(ip->i_gl, bh); gfs2_dirty_inode()
810 gfs2_dinode_out(ip, bh->b_data); gfs2_dirty_inode()
1300 struct gfs2_inode *ip = GFS2_I(inode); gfs2_drop_inode() local
1302 if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) && inode->i_nlink) { gfs2_drop_inode()
1303 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; gfs2_drop_inode()
1425 static void gfs2_final_release_pages(struct gfs2_inode *ip) gfs2_final_release_pages() argument
1427 struct inode *inode = &ip->i_inode; gfs2_final_release_pages()
1428 struct gfs2_glock *gl = ip->i_gl; gfs2_final_release_pages()
1430 truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0); gfs2_final_release_pages()
1439 static int gfs2_dinode_dealloc(struct gfs2_inode *ip) gfs2_dinode_dealloc() argument
1441 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_dinode_dealloc()
1446 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { gfs2_dinode_dealloc()
1447 gfs2_consist_inode(ip); gfs2_dinode_dealloc()
1455 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); gfs2_dinode_dealloc()
1459 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); gfs2_dinode_dealloc()
1461 gfs2_consist_inode(ip); gfs2_dinode_dealloc()
1475 gfs2_free_di(rgd, ip); gfs2_dinode_dealloc()
1477 gfs2_final_release_pages(ip); gfs2_dinode_dealloc()
1484 gfs2_quota_unhold(ip); gfs2_dinode_dealloc()
1513 struct gfs2_inode *ip = GFS2_I(inode); gfs2_evict_inode() local
1517 if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) { gfs2_evict_inode()
1526 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); gfs2_evict_inode()
1528 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_evict_inode()
1529 gfs2_glock_dq_uninit(&ip->i_iopen_gh); gfs2_evict_inode()
1533 if (!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) { gfs2_evict_inode()
1534 error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); gfs2_evict_inode()
1539 if (test_bit(GIF_INVALID, &ip->i_flags)) { gfs2_evict_inode()
1540 error = gfs2_inode_refresh(ip); gfs2_evict_inode()
1545 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_evict_inode()
1546 gfs2_glock_dq_wait(&ip->i_iopen_gh); gfs2_evict_inode()
1547 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh); gfs2_evict_inode()
1548 error = gfs2_glock_nq(&ip->i_iopen_gh); gfs2_evict_inode()
1555 (ip->i_diskflags & GFS2_DIF_EXHASH)) { gfs2_evict_inode()
1556 error = gfs2_dir_exhash_dealloc(ip); gfs2_evict_inode()
1561 if (ip->i_eattr) { gfs2_evict_inode()
1562 error = gfs2_ea_dealloc(ip); gfs2_evict_inode()
1567 if (!gfs2_is_stuffed(ip)) { gfs2_evict_inode()
1568 error = gfs2_file_dealloc(ip); gfs2_evict_inode()
1573 error = gfs2_dinode_dealloc(ip); gfs2_evict_inode()
1577 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); gfs2_evict_inode()
1578 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) { gfs2_evict_inode()
1579 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); gfs2_evict_inode()
1584 gfs2_ail_flush(ip->i_gl, 0); gfs2_evict_inode()
1596 if (gfs2_rs_active(ip->i_res)) gfs2_evict_inode()
1597 gfs2_rs_deltree(ip->i_res); gfs2_evict_inode()
1599 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { gfs2_evict_inode()
1600 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_evict_inode()
1601 gfs2_glock_dq(&ip->i_iopen_gh); gfs2_evict_inode()
1603 gfs2_holder_uninit(&ip->i_iopen_gh); gfs2_evict_inode()
1610 gfs2_rs_delete(ip, NULL); gfs2_evict_inode()
1611 gfs2_ordered_del_inode(ip); gfs2_evict_inode()
1613 gfs2_dir_hash_inval(ip); gfs2_evict_inode()
1614 ip->i_gl->gl_object = NULL; gfs2_evict_inode()
1615 flush_delayed_work(&ip->i_gl->gl_work); gfs2_evict_inode()
1616 gfs2_glock_add_to_lru(ip->i_gl); gfs2_evict_inode()
1617 gfs2_glock_put(ip->i_gl); gfs2_evict_inode()
1618 ip->i_gl = NULL; gfs2_evict_inode()
1619 if (ip->i_iopen_gh.gh_gl) { gfs2_evict_inode()
1620 ip->i_iopen_gh.gh_gl->gl_object = NULL; gfs2_evict_inode()
1621 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_evict_inode()
1622 gfs2_glock_dq_uninit(&ip->i_iopen_gh); gfs2_evict_inode()
1628 struct gfs2_inode *ip; gfs2_alloc_inode() local
1630 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL); gfs2_alloc_inode()
1631 if (ip) { gfs2_alloc_inode()
1632 ip->i_flags = 0; gfs2_alloc_inode()
1633 ip->i_gl = NULL; gfs2_alloc_inode()
1634 ip->i_rgd = NULL; gfs2_alloc_inode()
1635 ip->i_res = NULL; gfs2_alloc_inode()
1637 return &ip->i_inode; gfs2_alloc_inode()
H A Daops.c41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, gfs2_page_add_databufs() argument
54 if (gfs2_is_jdata(ip)) gfs2_page_add_databufs()
56 gfs2_trans_add_data(ip->i_gl, bh); gfs2_page_add_databufs()
101 struct gfs2_inode *ip = GFS2_I(inode); gfs2_writepage_common() local
107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) gfs2_writepage_common()
157 struct gfs2_inode *ip = GFS2_I(inode); __gfs2_jdata_writepage() local
166 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); __gfs2_jdata_writepage()
428 struct gfs2_inode *ip = GFS2_I(mapping->host); gfs2_jdata_writepages() local
434 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); gfs2_jdata_writepages()
442 * @ip: the inode
448 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) stuffed_readpage() argument
451 u64 dsize = i_size_read(&ip->i_inode); stuffed_readpage()
466 error = gfs2_meta_inode_buffer(ip, &dibh); stuffed_readpage()
497 struct gfs2_inode *ip = GFS2_I(page->mapping->host); __gfs2_readpage() local
501 if (gfs2_is_stuffed(ip)) { __gfs2_readpage()
502 error = stuffed_readpage(ip, page); __gfs2_readpage()
527 struct gfs2_inode *ip = GFS2_I(mapping->host); gfs2_readpage() local
532 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gfs2_readpage()
552 * @ip: The gfs2 inode
559 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, gfs2_internal_read() argument
562 struct address_space *mapping = ip->i_inode.i_mapping; gfs2_internal_read()
611 struct gfs2_inode *ip = GFS2_I(inode); gfs2_readpages() local
616 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); gfs2_readpages()
620 if (!gfs2_is_stuffed(ip)) gfs2_readpages()
647 struct gfs2_inode *ip = GFS2_I(mapping->host); gfs2_write_begin() local
658 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); gfs2_write_begin()
659 error = gfs2_glock_nq(&ip->i_gh); gfs2_write_begin()
662 if (&ip->i_inode == sdp->sd_rindex) { gfs2_write_begin()
666 gfs2_glock_dq(&ip->i_gh); gfs2_write_begin()
671 alloc_required = gfs2_write_alloc_required(ip, pos, len); gfs2_write_begin()
673 if (alloc_required || gfs2_is_jdata(ip)) gfs2_write_begin()
674 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); gfs2_write_begin()
680 error = gfs2_quota_lock_check(ip, &ap); gfs2_write_begin()
684 error = gfs2_inplace_reserve(ip, &ap); gfs2_write_begin()
690 if (gfs2_is_jdata(ip)) gfs2_write_begin()
694 if (&ip->i_inode == sdp->sd_rindex) gfs2_write_begin()
697 rblocks += gfs2_rg_blocks(ip, requested); gfs2_write_begin()
711 if (gfs2_is_stuffed(ip)) { gfs2_write_begin()
714 error = gfs2_unstuff_dinode(ip, page); gfs2_write_begin()
718 error = stuffed_readpage(ip, page); gfs2_write_begin()
733 if (pos + len > ip->i_inode.i_size) gfs2_write_begin()
734 gfs2_trim_blocks(&ip->i_inode); gfs2_write_begin()
741 gfs2_inplace_release(ip); gfs2_write_begin()
743 gfs2_quota_unlock(ip); gfs2_write_begin()
746 if (&ip->i_inode == sdp->sd_rindex) { gfs2_write_begin()
750 gfs2_glock_dq(&ip->i_gh); gfs2_write_begin()
752 gfs2_holder_uninit(&ip->i_gh); gfs2_write_begin()
813 struct gfs2_inode *ip = GFS2_I(inode); gfs2_stuffed_write_end() local
849 gfs2_glock_dq(&ip->i_gh); gfs2_stuffed_write_end()
850 gfs2_holder_uninit(&ip->i_gh); gfs2_stuffed_write_end()
876 struct gfs2_inode *ip = GFS2_I(inode); gfs2_write_end() local
886 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL); gfs2_write_end()
888 ret = gfs2_meta_inode_buffer(ip, &dibh); gfs2_write_end()
895 if (gfs2_is_stuffed(ip)) gfs2_write_end()
898 if (!gfs2_is_writeback(ip)) gfs2_write_end()
899 gfs2_page_add_databufs(ip, page, from, to); gfs2_write_end()
905 gfs2_trans_add_meta(ip->i_gl, dibh); gfs2_write_end()
916 gfs2_inplace_release(ip); gfs2_write_end()
917 if (ip->i_res->rs_qa_qd_num) gfs2_write_end()
918 gfs2_quota_unlock(ip); gfs2_write_end()
923 gfs2_glock_dq(&ip->i_gh); gfs2_write_end()
924 gfs2_holder_uninit(&ip->i_gh); gfs2_write_end()
951 struct gfs2_inode *ip = GFS2_I(mapping->host); gfs2_bmap() local
956 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); gfs2_bmap()
960 if (!gfs2_is_stuffed(ip)) gfs2_bmap()
1022 * @ip: The inode
1028 static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) gfs2_ok_for_dio() argument
1035 if (gfs2_is_stuffed(ip)) gfs2_ok_for_dio()
1038 if (offset >= i_size_read(&ip->i_inode)) gfs2_ok_for_dio()
1051 struct gfs2_inode *ip = GFS2_I(inode); gfs2_direct_IO() local
1063 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); gfs2_direct_IO()
1067 rv = gfs2_ok_for_dio(ip, offset); gfs2_direct_IO()
1092 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) gfs2_direct_IO()
1093 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); gfs2_direct_IO()
1222 struct gfs2_inode *ip = GFS2_I(inode); gfs2_set_aops() local
1224 if (gfs2_is_writeback(ip)) gfs2_set_aops()
1226 else if (gfs2_is_ordered(ip)) gfs2_set_aops()
1228 else if (gfs2_is_jdata(ip)) gfs2_set_aops()
H A Dlog.h51 static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip) gfs2_ordered_add_inode() argument
53 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_ordered_add_inode()
55 if (!test_bit(GIF_ORDERED, &ip->i_flags)) { gfs2_ordered_add_inode()
57 if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags)) gfs2_ordered_add_inode()
58 list_add(&ip->i_ordered, &sdp->sd_log_le_ordered); gfs2_ordered_add_inode()
62 extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
H A Ddir.c92 int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block, gfs2_dir_get_new_buffer() argument
97 bh = gfs2_meta_new(ip->i_gl, block); gfs2_dir_get_new_buffer()
98 gfs2_trans_add_meta(ip->i_gl, bh); gfs2_dir_get_new_buffer()
105 static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block, gfs2_dir_get_existing_buffer() argument
111 error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, &bh); gfs2_dir_get_existing_buffer()
114 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) { gfs2_dir_get_existing_buffer()
122 static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf, gfs2_dir_write_stuffed() argument
128 error = gfs2_meta_inode_buffer(ip, &dibh); gfs2_dir_write_stuffed()
132 gfs2_trans_add_meta(ip->i_gl, dibh); gfs2_dir_write_stuffed()
134 if (ip->i_inode.i_size < offset + size) gfs2_dir_write_stuffed()
135 i_size_write(&ip->i_inode, offset + size); gfs2_dir_write_stuffed()
136 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_dir_write_stuffed()
137 gfs2_dinode_out(ip, dibh->b_data); gfs2_dir_write_stuffed()
148 * @ip: The GFS2 inode
155 static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf, gfs2_dir_write_data() argument
158 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_dir_write_data()
170 if (gfs2_is_stuffed(ip) && gfs2_dir_write_data()
172 return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset, gfs2_dir_write_data()
175 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) gfs2_dir_write_data()
178 if (gfs2_is_stuffed(ip)) { gfs2_dir_write_data()
179 error = gfs2_unstuff_dinode(ip, NULL); gfs2_dir_write_data()
197 error = gfs2_extent_map(&ip->i_inode, lblock, &new, gfs2_dir_write_data()
207 error = gfs2_dir_get_new_buffer(ip, dblock, &bh); gfs2_dir_write_data()
209 error = gfs2_dir_get_existing_buffer(ip, dblock, &bh); gfs2_dir_write_data()
214 gfs2_trans_add_meta(ip->i_gl, bh); gfs2_dir_write_data()
228 error = gfs2_meta_inode_buffer(ip, &dibh); gfs2_dir_write_data()
232 if (ip->i_inode.i_size < offset + copied) gfs2_dir_write_data()
233 i_size_write(&ip->i_inode, offset + copied); gfs2_dir_write_data()
234 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; gfs2_dir_write_data()
236 gfs2_trans_add_meta(ip->i_gl, dibh); gfs2_dir_write_data()
237 gfs2_dinode_out(ip, dibh->b_data); gfs2_dir_write_data()
247 static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, __be64 *buf, gfs2_dir_read_stuffed() argument
253 error = gfs2_meta_inode_buffer(ip, &dibh); gfs2_dir_read_stuffed()
265 * @ip: The GFS2 Inode
271 static int gfs2_dir_read_data(struct gfs2_inode *ip, __be64 *buf, gfs2_dir_read_data() argument
274 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_dir_read_data()
281 if (gfs2_is_stuffed(ip)) gfs2_dir_read_data()
282 return gfs2_dir_read_stuffed(ip, buf, size); gfs2_dir_read_data()
284 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) gfs2_dir_read_data()
301 error = gfs2_extent_map(&ip->i_inode, lblock, &new, gfs2_dir_read_data()
306 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); gfs2_dir_read_data()
308 error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, &bh); gfs2_dir_read_data()
334 * @ip: The inode in question
339 static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) gfs2_dir_get_hash_table() argument
341 struct inode *inode = &ip->i_inode; gfs2_dir_get_hash_table()
346 BUG_ON(!(ip->i_diskflags & GFS2_DIF_EXHASH)); gfs2_dir_get_hash_table()
348 hc = ip->i_hash_cache; gfs2_dir_get_hash_table()
352 hsize = 1 << ip->i_depth; gfs2_dir_get_hash_table()
354 if (hsize != i_size_read(&ip->i_inode)) { gfs2_dir_get_hash_table()
355 gfs2_consist_inode(ip); gfs2_dir_get_hash_table()
366 ret = gfs2_dir_read_data(ip, hc, hsize); gfs2_dir_get_hash_table()
373 if (likely(!ip->i_hash_cache)) { gfs2_dir_get_hash_table()
374 ip->i_hash_cache = hc; gfs2_dir_get_hash_table()
380 return ip->i_hash_cache; gfs2_dir_get_hash_table()
385 * @ip: The directory inode
389 void gfs2_dir_hash_inval(struct gfs2_inode *ip) gfs2_dir_hash_inval() argument
393 spin_lock(&ip->i_inode.i_lock); gfs2_dir_hash_inval()
394 hc = ip->i_hash_cache; gfs2_dir_hash_inval()
395 ip->i_hash_cache = NULL; gfs2_dir_hash_inval()
396 spin_unlock(&ip->i_inode.i_lock); gfs2_dir_hash_inval()
694 struct gfs2_inode *ip = GFS2_I(inode); gfs2_init_dirent() local
702 gfs2_trans_add_meta(ip->i_gl, bh); gfs2_init_dirent()
776 struct gfs2_inode *ip = GFS2_I(inode); gfs2_dirent_search() local
779 if (ip->i_diskflags & GFS2_DIF_EXHASH) { gfs2_dirent_search()
781 unsigned hsize = 1 << ip->i_depth; gfs2_dirent_search()
785 gfs2_consist_inode(ip); gfs2_dirent_search()
789 index = name->hash >> (32 - ip->i_depth); gfs2_dirent_search()
790 error = get_first_leaf(ip, index, &bh); gfs2_dirent_search()
804 error = get_leaf(ip, ln, &bh); gfs2_dirent_search()
811 error = gfs2_meta_inode_buffer(ip, &bh); gfs2_dirent_search()
826 struct gfs2_inode *ip = GFS2_I(inode); new_leaf() local
836 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); new_leaf()
839 bh = gfs2_meta_new(ip->i_gl, bn); new_leaf()
844 gfs2_trans_add_meta(ip->i_gl, bh); new_leaf()
851 leaf->lf_inode = cpu_to_be64(ip->i_no_addr); new_leaf()
1302 struct gfs2_inode *ip = GFS2_I(inode); gfs2_dir_read_leaf() local
1316 error = get_leaf(ip, lfn, &bh); gfs2_dir_read_leaf()
1347 error = get_leaf(ip, lfn, &bh); gfs2_dir_read_leaf()
1377 error = do_filldir_main(ip, ctx, darr, entries, copied); gfs2_dir_read_leaf()
1397 struct gfs2_inode *ip = GFS2_I(inode); gfs2_dir_readahead() local
1398 struct gfs2_glock *gl = ip->i_gl; gfs2_dir_readahead()
1413 blocknr = be64_to_cpu(ip->i_hash_cache[f_ra->start]); gfs2_dir_readahead()
1520 "ip->i_entries (%u) != g.offset (%u)\n", gfs2_dir_read()
1577 const struct gfs2_inode *ip) gfs2_dir_check()
1587 if (ip) { gfs2_dir_check()
1588 if (be64_to_cpu(dent->de_inum.no_addr) != ip->i_no_addr) gfs2_dir_check()
1591 ip->i_no_formal_ino) gfs2_dir_check()
1593 if (unlikely(IF2DT(ip->i_inode.i_mode) != gfs2_dir_check()
1629 struct gfs2_inode *ip = GFS2_I(inode); dir_new_leaf() local
1636 index = name->hash >> (32 - ip->i_depth); dir_new_leaf()
1637 error = get_first_leaf(ip, index, &obh); dir_new_leaf()
1647 error = get_leaf(ip, bn, &obh); dir_new_leaf()
1652 gfs2_trans_add_meta(ip->i_gl, obh); dir_new_leaf()
1664 error = gfs2_meta_inode_buffer(ip, &bh); dir_new_leaf()
1667 gfs2_trans_add_meta(ip->i_gl, bh); dir_new_leaf()
1668 gfs2_add_inode_blocks(&ip->i_inode, 1); dir_new_leaf()
1669 gfs2_dinode_out(ip, bh->b_data); dir_new_leaf()
1674 static u16 gfs2_inode_ra_len(const struct gfs2_inode *ip) gfs2_inode_ra_len() argument
1676 u64 where = ip->i_no_addr + 1; gfs2_inode_ra_len()
1677 if (ip->i_eattr == where) gfs2_inode_ra_len()
1701 struct gfs2_inode *ip = GFS2_I(inode); gfs2_dir_add() local
1721 if (ip->i_diskflags & GFS2_DIF_EXHASH) { gfs2_dir_add()
1730 ip->i_entries++; gfs2_dir_add()
1731 ip->i_inode.i_mtime = ip->i_inode.i_ctime = tv; gfs2_dir_add()
1733 inc_nlink(&ip->i_inode); gfs2_dir_add()
1738 if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) { gfs2_dir_add()
1749 if (ip->i_depth < GFS2_DIR_MAX_DEPTH) { gfs2_dir_add()
1750 error = dir_double_exhash(ip); gfs2_dir_add()
2056 * @ip: the file being written to
2066 struct gfs2_inode *ip = GFS2_I(inode); gfs2_diradd_alloc_required() local
2079 if (!(ip->i_diskflags & GFS2_DIF_EXHASH) && gfs2_diradd_alloc_required()
1576 gfs2_dir_check(struct inode *dir, const struct qstr *name, const struct gfs2_inode *ip) gfs2_dir_check() argument
H A Dmeta_io.h62 extern void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
63 extern int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
66 static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip, gfs2_meta_inode_buffer() argument
69 return gfs2_meta_indirect_buffer(ip, 0, ip->i_no_addr, bhp); gfs2_meta_inode_buffer()
H A Drgrp.c76 const struct gfs2_inode *ip, bool nowrap,
580 void check_and_update_goal(struct gfs2_inode *ip) check_and_update_goal() argument
582 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); check_and_update_goal()
583 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL) check_and_update_goal()
584 ip->i_goal = ip->i_no_addr; check_and_update_goal()
600 * @ip: the inode for this reservation
602 int gfs2_rs_alloc(struct gfs2_inode *ip) gfs2_rs_alloc() argument
606 down_write(&ip->i_rw_mutex); gfs2_rs_alloc()
607 if (ip->i_res) gfs2_rs_alloc()
610 ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS); gfs2_rs_alloc()
611 if (!ip->i_res) { gfs2_rs_alloc()
616 RB_CLEAR_NODE(&ip->i_res->rs_node); gfs2_rs_alloc()
618 up_write(&ip->i_rw_mutex); gfs2_rs_alloc()
682 * @ip: The inode for this reservation
686 void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount) gfs2_rs_delete() argument
688 down_write(&ip->i_rw_mutex); gfs2_rs_delete()
689 if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) { gfs2_rs_delete()
690 gfs2_rs_deltree(ip->i_res); gfs2_rs_delete()
691 BUG_ON(ip->i_res->rs_free); gfs2_rs_delete()
692 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); gfs2_rs_delete()
693 ip->i_res = NULL; gfs2_rs_delete()
695 up_write(&ip->i_rw_mutex); gfs2_rs_delete()
845 struct gfs2_inode *ip = GFS2_I(inode); gfs2_ri_total() local
854 error = gfs2_internal_read(ip, buf, &pos, gfs2_ri_total()
890 * @ip: Pointer to the rindex inode
895 static int read_rindex_entry(struct gfs2_inode *ip) read_rindex_entry() argument
897 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); read_rindex_entry()
904 if (pos >= i_size_read(&ip->i_inode)) read_rindex_entry()
907 error = gfs2_internal_read(ip, (char *)&buf, &pos, read_rindex_entry()
990 * @ip: pointer to the rindex inode
995 static int gfs2_ri_update(struct gfs2_inode *ip) gfs2_ri_update() argument
997 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_ri_update()
1001 error = read_rindex_entry(ip); gfs2_ri_update()
1032 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); gfs2_rindex_update() local
1033 struct gfs2_glock *gl = ip->i_gl; gfs2_rindex_update()
1047 error = gfs2_ri_update(ip); gfs2_rindex_update()
1452 * @ip: the inode structure
1455 static void rs_insert(struct gfs2_inode *ip) rs_insert() argument
1459 struct gfs2_blkreserv *rs = ip->i_res; rs_insert()
1496 * @ip: pointer to the inode for which we're reserving blocks
1501 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, rg_mblk_search() argument
1506 struct gfs2_blkreserv *rs = ip->i_res; rg_mblk_search()
1510 struct inode *inode = &ip->i_inode; rg_mblk_search()
1522 if (rgrp_contains_block(rgd, ip->i_goal)) rg_mblk_search()
1523 goal = ip->i_goal; rg_mblk_search()
1530 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true, ap); rg_mblk_search()
1534 rs->rs_inum = ip->i_no_addr; rg_mblk_search()
1535 rs_insert(ip); rg_mblk_search()
1547 * @ip: Ignore any reservations for this inode
1557 const struct gfs2_inode *ip) gfs2_next_unreserved_block()
1577 while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) { gfs2_next_unreserved_block()
1593 * @ip: The inode for which we are searching for blocks
1607 const struct gfs2_inode *ip, gfs2_reservation_check_and_update()
1630 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip); gfs2_reservation_check_and_update()
1654 * @ip: If set, check for reservations
1669 const struct gfs2_inode *ip, bool nowrap, gfs2_rbm_find()
1708 if (ip == NULL) gfs2_rbm_find()
1712 ret = gfs2_reservation_check_and_update(rbm, ip, gfs2_rbm_find()
1783 struct gfs2_inode *ip; try_rgrp_unlink() local
1818 ip = gl->gl_object; try_rgrp_unlink()
1820 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) try_rgrp_unlink()
1925 static u32 gfs2_orlov_skip(const struct gfs2_inode *ip) gfs2_orlov_skip() argument
1927 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_orlov_skip()
1969 * @ip: the inode to reserve space for
1983 int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap) gfs2_inplace_reserve() argument
1985 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_inplace_reserve()
1987 struct gfs2_blkreserv *rs = ip->i_res; gfs2_inplace_reserve()
1999 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) { gfs2_inplace_reserve()
2000 rs->rs_rbm.rgd = begin = ip->i_rgd; gfs2_inplace_reserve()
2002 check_and_update_goal(ip); gfs2_inplace_reserve()
2003 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); gfs2_inplace_reserve()
2005 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV)) gfs2_inplace_reserve()
2006 skip = gfs2_orlov_skip(ip); gfs2_inplace_reserve()
2054 rg_mblk_search(rs->rs_rbm.rgd, ip, ap); gfs2_inplace_reserve()
2064 ip->i_rgd = rs->rs_rbm.rgd; gfs2_inplace_reserve()
2065 ap->allowed = ip->i_rgd->rd_free_clone; gfs2_inplace_reserve()
2072 ip->i_no_addr); gfs2_inplace_reserve()
2094 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { gfs2_inplace_reserve()
2095 error = gfs2_ri_update(ip); gfs2_inplace_reserve()
2109 * @ip: the inode the reservation was taken out on
2114 void gfs2_inplace_release(struct gfs2_inode *ip) gfs2_inplace_release() argument
2116 struct gfs2_blkreserv *rs = ip->i_res; gfs2_inplace_release()
2258 * @ip: The inode we have just allocated blocks for
2267 static void gfs2_adjust_reservation(struct gfs2_inode *ip, gfs2_adjust_reservation() argument
2270 struct gfs2_blkreserv *rs = ip->i_res; gfs2_adjust_reservation()
2300 * @ip: The gfs2 inode
2309 const struct gfs2_inode *ip, bool dinode) gfs2_set_alloc_start()
2313 if (gfs2_rs_active(ip->i_res)) { gfs2_set_alloc_start()
2314 *rbm = ip->i_res->rs_rbm; gfs2_set_alloc_start()
2318 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal)) gfs2_set_alloc_start()
2319 goal = ip->i_goal; gfs2_set_alloc_start()
2328 * @ip: the inode to allocate the block for
2337 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, gfs2_alloc_blocks() argument
2340 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_alloc_blocks()
2342 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, }; gfs2_alloc_blocks()
2347 gfs2_set_alloc_start(&rbm, ip, dinode); gfs2_alloc_blocks()
2348 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false, NULL); gfs2_alloc_blocks()
2351 gfs2_set_alloc_start(&rbm, ip, dinode); gfs2_alloc_blocks()
2359 (unsigned long long)ip->i_no_addr, error, *nblocks, gfs2_alloc_blocks()
2368 if (gfs2_rs_active(ip->i_res)) gfs2_alloc_blocks()
2369 gfs2_adjust_reservation(ip, &rbm, *nblocks); gfs2_alloc_blocks()
2375 ip->i_goal = block + ndata - 1; gfs2_alloc_blocks()
2376 error = gfs2_meta_inode_buffer(ip, &dibh); gfs2_alloc_blocks()
2380 gfs2_trans_add_meta(ip->i_gl, dibh); gfs2_alloc_blocks()
2382 cpu_to_be64(ip->i_goal); gfs2_alloc_blocks()
2407 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_alloc_blocks()
2410 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks, gfs2_alloc_blocks()
2422 * @ip: the inode these blocks are being freed from
2429 void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) __gfs2_free_blocks() argument
2431 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); __gfs2_free_blocks()
2437 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE); __gfs2_free_blocks()
2445 if (meta || ip->i_depth) __gfs2_free_blocks()
2446 gfs2_meta_wipe(ip, bstart, blen); __gfs2_free_blocks()
2451 * @ip: the inode these blocks are being freed from
2457 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) gfs2_free_meta() argument
2459 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_free_meta()
2461 __gfs2_free_blocks(ip, bstart, blen, 1); gfs2_free_meta()
2463 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_free_meta()
2468 struct gfs2_inode *ip = GFS2_I(inode); gfs2_unlink_di() local
2471 u64 blkno = ip->i_no_addr; gfs2_unlink_di()
2476 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED); gfs2_unlink_di()
2507 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) gfs2_free_di() argument
2509 gfs2_free_uninit_di(rgd, ip->i_no_addr); gfs2_free_di()
2510 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); gfs2_free_di()
2511 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_free_di()
2512 gfs2_meta_wipe(ip, ip->i_no_addr, 1); gfs2_free_di()
2550 * @ip: the inode
2560 void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, gfs2_rlist_add() argument
2563 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_rlist_add()
2572 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block)) gfs2_rlist_add()
2573 rgd = ip->i_rgd; gfs2_rlist_add()
2580 ip->i_rgd = rgd; gfs2_rlist_add()
1555 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block, u32 length, const struct gfs2_inode *ip) gfs2_next_unreserved_block() argument
1606 gfs2_reservation_check_and_update(struct gfs2_rbm *rbm, const struct gfs2_inode *ip, u32 minext, struct gfs2_extent *maxext) gfs2_reservation_check_and_update() argument
1668 gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, const struct gfs2_inode *ip, bool nowrap, const struct gfs2_alloc_parms *ap) gfs2_rbm_find() argument
2308 gfs2_set_alloc_start(struct gfs2_rbm *rbm, const struct gfs2_inode *ip, bool dinode) gfs2_set_alloc_start() argument
H A Ddir.h33 const struct gfs2_inode *ip);
35 const struct gfs2_inode *ip, struct gfs2_diradd *da); gfs2_dir_no_add()
53 extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
55 extern void gfs2_dir_hash_inval(struct gfs2_inode *ip);
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_mmap.c48 struct ipath_mmap_info *ip = ipath_release_mmap_info() local
50 struct ipath_ibdev *dev = to_idev(ip->context->device); ipath_release_mmap_info()
53 list_del(&ip->pending_mmaps); ipath_release_mmap_info()
56 vfree(ip->obj); ipath_release_mmap_info()
57 kfree(ip); ipath_release_mmap_info()
66 struct ipath_mmap_info *ip = vma->vm_private_data; ipath_vma_open() local
68 kref_get(&ip->ref); ipath_vma_open()
73 struct ipath_mmap_info *ip = vma->vm_private_data; ipath_vma_close() local
75 kref_put(&ip->ref, ipath_release_mmap_info); ipath_vma_close()
94 struct ipath_mmap_info *ip, *pp; ipath_mmap() local
103 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, ipath_mmap()
106 if (context != ip->context || (__u64) offset != ip->offset) ipath_mmap()
109 if (size > ip->size) ipath_mmap()
112 list_del_init(&ip->pending_mmaps); ipath_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); ipath_mmap()
119 vma->vm_private_data = ip; ipath_mmap()
135 struct ipath_mmap_info *ip; ipath_create_mmap_info() local
137 ip = kmalloc(sizeof *ip, GFP_KERNEL); ipath_create_mmap_info()
138 if (!ip) ipath_create_mmap_info()
146 ip->offset = dev->mmap_offset; ipath_create_mmap_info()
150 INIT_LIST_HEAD(&ip->pending_mmaps); ipath_create_mmap_info()
151 ip->size = size; ipath_create_mmap_info()
152 ip->context = context; ipath_create_mmap_info()
153 ip->obj = obj; ipath_create_mmap_info()
154 kref_init(&ip->ref); ipath_create_mmap_info()
157 return ip; ipath_create_mmap_info()
161 struct ipath_mmap_info *ip, ipath_update_mmap_info()
168 ip->offset = dev->mmap_offset; ipath_update_mmap_info()
172 ip->size = size; ipath_update_mmap_info()
173 ip->obj = obj; ipath_update_mmap_info()
160 ipath_update_mmap_info(struct ipath_ibdev *dev, struct ipath_mmap_info *ip, u32 size, void *obj) ipath_update_mmap_info() argument
H A Dipath_srq.c153 srq->ip = ipath_create_srq()
157 if (!srq->ip) { ipath_create_srq()
162 err = ib_copy_to_udata(udata, &srq->ip->offset, ipath_create_srq()
163 sizeof(srq->ip->offset)); ipath_create_srq()
169 srq->ip = NULL; ipath_create_srq()
189 if (srq->ip) { ipath_create_srq()
191 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); ipath_create_srq()
199 kfree(srq->ip); ipath_create_srq()
309 if (srq->ip) { ipath_modify_srq()
310 struct ipath_mmap_info *ip = srq->ip; ipath_modify_srq() local
314 ipath_update_mmap_info(dev, ip, s, wq); ipath_modify_srq()
321 ret = ib_copy_to_udata(udata, &ip->offset, ipath_modify_srq()
322 sizeof(ip->offset)); ipath_modify_srq()
328 if (list_empty(&ip->pending_mmaps)) ipath_modify_srq()
329 list_add(&ip->pending_mmaps, ipath_modify_srq()
373 if (srq->ip) ipath_destroy_srq()
374 kref_put(&srq->ip->ref, ipath_release_mmap_info); ipath_destroy_srq()
/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_mmap.c48 struct qib_mmap_info *ip = qib_release_mmap_info() local
50 struct qib_ibdev *dev = to_idev(ip->context->device); qib_release_mmap_info()
53 list_del(&ip->pending_mmaps); qib_release_mmap_info()
56 vfree(ip->obj); qib_release_mmap_info()
57 kfree(ip); qib_release_mmap_info()
66 struct qib_mmap_info *ip = vma->vm_private_data; qib_vma_open() local
68 kref_get(&ip->ref); qib_vma_open()
73 struct qib_mmap_info *ip = vma->vm_private_data; qib_vma_close() local
75 kref_put(&ip->ref, qib_release_mmap_info); qib_vma_close()
94 struct qib_mmap_info *ip, *pp; qib_mmap() local
103 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, qib_mmap()
106 if (context != ip->context || (__u64) offset != ip->offset) qib_mmap()
109 if (size > ip->size) qib_mmap()
112 list_del_init(&ip->pending_mmaps); qib_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); qib_mmap()
119 vma->vm_private_data = ip; qib_mmap()
135 struct qib_mmap_info *ip; qib_create_mmap_info() local
137 ip = kmalloc(sizeof(*ip), GFP_KERNEL); qib_create_mmap_info()
138 if (!ip) qib_create_mmap_info()
146 ip->offset = dev->mmap_offset; qib_create_mmap_info()
150 INIT_LIST_HEAD(&ip->pending_mmaps); qib_create_mmap_info()
151 ip->size = size; qib_create_mmap_info()
152 ip->context = context; qib_create_mmap_info()
153 ip->obj = obj; qib_create_mmap_info()
154 kref_init(&ip->ref); qib_create_mmap_info()
157 return ip; qib_create_mmap_info()
160 void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, qib_update_mmap_info() argument
168 ip->offset = dev->mmap_offset; qib_update_mmap_info()
172 ip->size = size; qib_update_mmap_info()
173 ip->obj = obj; qib_update_mmap_info()
H A Dqib_srq.c150 srq->ip = qib_create_srq()
153 if (!srq->ip) { qib_create_srq()
158 err = ib_copy_to_udata(udata, &srq->ip->offset, qib_create_srq()
159 sizeof(srq->ip->offset)); qib_create_srq()
165 srq->ip = NULL; qib_create_srq()
185 if (srq->ip) { qib_create_srq()
187 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); qib_create_srq()
195 kfree(srq->ip); qib_create_srq()
305 if (srq->ip) { qib_modify_srq()
306 struct qib_mmap_info *ip = srq->ip; qib_modify_srq() local
310 qib_update_mmap_info(dev, ip, s, wq); qib_modify_srq()
317 ret = ib_copy_to_udata(udata, &ip->offset, qib_modify_srq()
318 sizeof(ip->offset)); qib_modify_srq()
328 if (list_empty(&ip->pending_mmaps)) qib_modify_srq()
329 list_add(&ip->pending_mmaps, qib_modify_srq()
373 if (srq->ip) qib_destroy_srq()
374 kref_put(&srq->ip->ref, qib_release_mmap_info); qib_destroy_srq()
/linux-4.4.14/samples/bpf/
H A Dtracex4_kern.c14 u64 ip; member in struct:pair
40 long ip = 0; bpf_prog2() local
42 /* get ip address of kmem_cache_alloc_node() caller */ bpf_prog2()
43 bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); bpf_prog2()
47 .ip = ip, bpf_prog2()
H A Dtracex4_user.c20 __u64 ip; member in struct:pair
46 printf("obj 0x%llx is %2lldsec old was allocated at ip %llx\n", print_old_objects()
47 next_key, (val - v.val) / 1000000000ll, v.ip); print_old_objects()
/linux-4.4.14/include/trace/events/
H A Dmodule.h71 TP_PROTO(struct module *mod, unsigned long ip),
73 TP_ARGS(mod, ip),
76 __field( unsigned long, ip )
82 __entry->ip = ip;
88 __get_str(name), (void *)__entry->ip, __entry->refcnt)
93 TP_PROTO(struct module *mod, unsigned long ip),
95 TP_ARGS(mod, ip)
100 TP_PROTO(struct module *mod, unsigned long ip),
102 TP_ARGS(mod, ip)
108 TP_PROTO(char *name, bool wait, unsigned long ip),
110 TP_ARGS(name, wait, ip),
113 __field( unsigned long, ip )
119 __entry->ip = ip;
125 __get_str(name), (int)__entry->wait, (void *)__entry->ip)
H A Dlock.h16 struct lockdep_map *next_lock, unsigned long ip),
18 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
40 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
42 TP_ARGS(lock, ip),
59 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
61 TP_ARGS(lock, ip)
68 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
70 TP_ARGS(lock, ip)
75 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
77 TP_ARGS(lock, ip)
H A Drpm.h78 TP_PROTO(struct device *dev, unsigned long ip, int ret),
79 TP_ARGS(dev, ip, ret),
83 __field( unsigned long, ip )
89 __entry->ip = ip;
93 TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
H A Dmce.h23 __field( u64, ip )
41 __entry->ip = m->ip;
58 __entry->cs, __entry->ip,
/linux-4.4.14/arch/ia64/kernel/
H A Dstacktrace.c15 unsigned long ip; ia64_do_save_stack() local
20 unw_get_ip(info, &ip); ia64_do_save_stack()
21 if (ip == 0) ia64_do_save_stack()
24 trace->entries[trace->nr_entries++] = ip; ia64_do_save_stack()
H A Dpatch.c103 u64 ip; ia64_patch_vtop() local
106 ip = (u64) offp + *offp; ia64_patch_vtop()
109 ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip))); ia64_patch_vtop()
110 ia64_fc((void *) ip); ia64_patch_vtop()
126 u64 ip, *b; ia64_patch_rse() local
129 ip = (u64) offp + *offp; ia64_patch_rse()
131 b = (u64 *)(ip & -16); ia64_patch_rse()
133 ia64_fc((void *) ip); ia64_patch_rse()
176 u64 ip; patch_fsyscall_table() local
179 ip = (u64) ia64_imva((char *) offp + *offp); patch_fsyscall_table()
180 ia64_patch_imm64(ip, (u64) fsyscall_table); patch_fsyscall_table()
181 ia64_fc((void *) ip); patch_fsyscall_table()
193 u64 ip; patch_brl_fsys_bubble_down() local
196 ip = (u64) offp + *offp; patch_brl_fsys_bubble_down()
197 ia64_patch_imm60((u64) ia64_imva((void *) ip), patch_brl_fsys_bubble_down()
198 (u64) (fsys_bubble_down - (ip & -16)) / 16); patch_brl_fsys_bubble_down()
199 ia64_fc((void *) ip); patch_brl_fsys_bubble_down()
222 u64 ip, mask, imm; ia64_patch_phys_stack_reg() local
229 ip = (u64) offp + *offp; ia64_patch_phys_stack_reg()
230 ia64_patch(ip, mask, imm); ia64_patch_phys_stack_reg()
231 ia64_fc((void *)ip); ia64_patch_phys_stack_reg()
H A Dftrace.c43 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
62 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */
81 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) ftrace_call_replace() argument
84 unsigned long offset = addr - (ip + 0x10); ftrace_call_replace()
94 ftrace_modify_code(unsigned long ip, unsigned char *old_code, ftrace_modify_code() argument
113 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) ftrace_modify_code()
122 if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE)) ftrace_modify_code()
124 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); ftrace_modify_code()
132 unsigned long ip = rec->ip; ftrace_make_nop_check() local
134 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) ftrace_make_nop_check()
171 return ftrace_modify_code(rec->ip, NULL, new, 0); ftrace_make_nop()
176 unsigned long ip = rec->ip; ftrace_make_call() local
180 new = ftrace_call_replace(ip, addr); ftrace_make_call()
181 return ftrace_modify_code(ip, old, new, 1); ftrace_make_call()
187 unsigned long ip; ftrace_update_ftrace_func() local
188 unsigned long addr = ((struct fnptr *)ftrace_call)->ip; ftrace_update_ftrace_func()
192 ip = ((struct fnptr *)func)->ip; ftrace_update_ftrace_func()
194 ia64_patch_imm64(addr + 2, ip); ftrace_update_ftrace_func()
/linux-4.4.14/net/netfilter/ipset/
H A Dip_set_hash_ip.c8 /* Kernel module implementing an IP set type: the hash:ip type */
12 #include <linux/ip.h>
16 #include <net/ip.h>
34 IP_SET_MODULE_DESC("hash:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
35 MODULE_ALIAS("ip_set_hash:ip");
46 __be32 ip; member in struct:hash_ip4_elem
56 return e1->ip == e2->ip; hash_ip4_data_equal()
62 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip)) hash_ip4_data_list()
73 next->ip = e->ip; hash_ip4_data_next()
89 __be32 ip; hash_ip4_kadt() local
91 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip); hash_ip4_kadt()
92 ip &= ip_set_netmask(h->netmask); hash_ip4_kadt()
93 if (ip == 0) hash_ip4_kadt()
96 e.ip = ip; hash_ip4_kadt()
108 u32 ip = 0, ip_to = 0, hosts; hash_ip4_uadt() local
117 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); hash_ip4_uadt()
125 ip &= ip_set_hostmask(h->netmask); hash_ip4_uadt()
128 e.ip = htonl(ip); hash_ip4_uadt()
129 if (e.ip == 0) hash_ip4_uadt()
134 ip_to = ip; hash_ip4_uadt()
139 if (ip > ip_to) hash_ip4_uadt()
140 swap(ip, ip_to); hash_ip4_uadt()
146 ip_set_mask_from_to(ip, ip_to, cidr); hash_ip4_uadt()
152 ip = ntohl(h->next.ip); hash_ip4_uadt()
153 for (; !before(ip_to, ip); ip += hosts) { hash_ip4_uadt()
154 e.ip = htonl(ip); hash_ip4_uadt()
155 if (e.ip == 0) hash_ip4_uadt()
171 union nf_inet_addr ip; member in struct:hash_ip6_elem
181 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6); hash_ip6_data_equal()
185 hash_ip6_netmask(union nf_inet_addr *ip, u8 prefix) hash_ip6_netmask() argument
187 ip6_netmask(ip, prefix); hash_ip6_netmask()
193 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6)) hash_ip6_data_list()
225 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); hash_ip6_kadt()
226 hash_ip6_netmask(&e.ip, h->netmask); hash_ip6_kadt()
227 if (ipv6_addr_any(&e.ip.in6)) hash_ip6_kadt()
257 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); hash_ip6_uadt()
265 hash_ip6_netmask(&e.ip, h->netmask); hash_ip6_uadt()
266 if (ipv6_addr_any(&e.ip.in6)) hash_ip6_uadt()
275 .name = "hash:ip",
H A Dip_set_hash_netnet.c13 #include <linux/ip.h>
17 #include <net/ip.h>
45 __be32 ip[2]; member in union:hash_netnet4_elem::__anon15003
89 elem->ip[1] = orig->ip[1]; hash_netnet4_data_reset_elem()
96 elem->ip[1] &= ip_set_netmask(cidr); hash_netnet4_data_netmask()
99 elem->ip[0] &= ip_set_netmask(cidr); hash_netnet4_data_netmask()
110 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) || hash_netnet4_data_list()
111 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) || hash_netnet4_data_list()
156 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]); hash_netnet4_kadt()
157 ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]); hash_netnet4_kadt()
158 e.ip[0] &= ip_set_netmask(e.cidr[0]); hash_netnet4_kadt()
159 e.ip[1] &= ip_set_netmask(e.cidr[1]); hash_netnet4_kadt()
172 u32 ip = 0, ip_to = 0, last; hash_netnet4_uadt() local
184 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); hash_netnet4_uadt()
217 e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0])); hash_netnet4_uadt()
218 e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1])); hash_netnet4_uadt()
224 ip_to = ip; hash_netnet4_uadt()
229 if (ip_to < ip) hash_netnet4_uadt()
230 swap(ip, ip_to); hash_netnet4_uadt()
231 if (unlikely(ip + UINT_MAX == ip_to)) hash_netnet4_uadt()
234 ip_set_mask_from_to(ip, ip_to, e.cidr[0]); hash_netnet4_uadt()
251 ip = ntohl(h->next.ip[0]); hash_netnet4_uadt()
253 while (!after(ip, ip_to)) { hash_netnet4_uadt()
254 e.ip[0] = htonl(ip); hash_netnet4_uadt()
255 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); hash_netnet4_uadt()
257 ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1]) hash_netnet4_uadt()
260 e.ip[1] = htonl(ip2); hash_netnet4_uadt()
269 ip = last + 1; hash_netnet4_uadt()
277 union nf_inet_addr ip[2]; member in struct:hash_netnet6_elem
293 return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) && hash_netnet6_data_equal()
294 ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) && hash_netnet6_data_equal()
320 elem->ip[1] = orig->ip[1]; hash_netnet6_data_reset_elem()
327 ip6_netmask(&elem->ip[1], cidr); hash_netnet6_data_netmask()
330 ip6_netmask(&elem->ip[0], cidr); hash_netnet6_data_netmask()
341 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) || hash_netnet6_data_list()
342 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) || hash_netnet6_data_list()
390 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6); hash_netnet6_kadt()
391 ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6); hash_netnet6_kadt()
392 ip6_netmask(&e.ip[0], e.cidr[0]); hash_netnet6_kadt()
393 ip6_netmask(&e.ip[1], e.cidr[1]); hash_netnet6_kadt()
417 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]); hash_netnet6_uadt()
421 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]); hash_netnet6_uadt()
441 ip6_netmask(&e.ip[0], e.cidr[0]); hash_netnet6_uadt()
442 ip6_netmask(&e.ip[1], e.cidr[1]); hash_netnet6_uadt()
H A Dip_set_hash_ipport.c8 /* Kernel module implementing an IP set type: the hash:ip,port type */
12 #include <linux/ip.h>
16 #include <net/ip.h>
36 IP_SET_MODULE_DESC("hash:ip,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
37 MODULE_ALIAS("ip_set_hash:ip,port");
46 __be32 ip; member in struct:hash_ipport4_elem
59 return ip1->ip == ip2->ip && hash_ipport4_data_equal()
68 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || hash_ipport4_data_list()
82 next->ip = d->ip; hash_ipport4_data_next()
96 struct hash_ipport4_elem e = { .ip = 0 }; hash_ipport4_kadt()
103 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); hash_ipport4_kadt()
113 struct hash_ipport4_elem e = { .ip = 0 }; hash_ipport4_uadt()
115 u32 ip, ip_to = 0, p = 0, port, port_to; hash_ipport4_uadt() local
127 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); hash_ipport4_uadt()
157 ip_to = ip = ntohl(e.ip); hash_ipport4_uadt()
162 if (ip > ip_to) hash_ipport4_uadt()
163 swap(ip, ip_to); hash_ipport4_uadt()
169 ip_set_mask_from_to(ip, ip_to, cidr); hash_ipport4_uadt()
180 ip = ntohl(h->next.ip); hash_ipport4_uadt()
181 for (; !before(ip_to, ip); ip++) { hash_ipport4_uadt()
182 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) hash_ipport4_uadt()
185 e.ip = htonl(ip); hash_ipport4_uadt()
201 union nf_inet_addr ip; member in struct:hash_ipport6_elem
214 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && hash_ipport6_data_equal()
223 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || hash_ipport6_data_list()
254 struct hash_ipport6_elem e = { .ip = { .all = { 0 } } }; hash_ipport6_kadt()
261 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); hash_ipport6_kadt()
271 struct hash_ipport6_elem e = { .ip = { .all = { 0 } } }; hash_ipport6_uadt()
293 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); hash_ipport6_uadt()
341 .name = "hash:ip,port",
H A Dip_set_hash_net.c12 #include <linux/ip.h>
16 #include <net/ip.h>
46 __be32 ip; member in struct:hash_net4_elem
59 return ip1->ip == ip2->ip && hash_net4_data_equal()
84 elem->ip &= ip_set_netmask(cidr); hash_net4_data_netmask()
93 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || hash_net4_data_list()
108 next->ip = d->ip; hash_net4_data_next()
132 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); hash_net4_kadt()
133 e.ip &= ip_set_netmask(e.cidr); hash_net4_kadt()
146 u32 ip = 0, ip_to = 0, last; hash_net4_uadt() local
156 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); hash_net4_uadt()
178 e.ip = htonl(ip & ip_set_hostmask(e.cidr)); hash_net4_uadt()
184 ip_to = ip; hash_net4_uadt()
189 if (ip_to < ip) hash_net4_uadt()
190 swap(ip, ip_to); hash_net4_uadt()
191 if (ip + UINT_MAX == ip_to) hash_net4_uadt()
195 ip = ntohl(h->next.ip); hash_net4_uadt()
196 while (!after(ip, ip_to)) { hash_net4_uadt()
197 e.ip = htonl(ip); hash_net4_uadt()
198 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); hash_net4_uadt()
204 ip = last + 1; hash_net4_uadt()
212 union nf_inet_addr ip; member in struct:hash_net6_elem
225 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && hash_net6_data_equal()
250 ip6_netmask(&elem->ip, cidr); hash_net6_data_netmask()
259 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || hash_net6_data_list()
301 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); hash_net6_kadt()
302 ip6_netmask(&e.ip, e.cidr); hash_net6_kadt()
325 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); hash_net6_uadt()
339 ip6_netmask(&e.ip, e.cidr); hash_net6_uadt()
H A Dip_set_hash_ipportip.c8 /* Kernel module implementing an IP set type: the hash:ip,port,ip type */
12 #include <linux/ip.h>
16 #include <net/ip.h>
36 IP_SET_MODULE_DESC("hash:ip,port,ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
37 MODULE_ALIAS("ip_set_hash:ip,port,ip");
46 __be32 ip; member in struct:hash_ipportip4_elem
58 return ip1->ip == ip2->ip && hash_ipportip4_data_equal()
68 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || hash_ipportip4_data_list()
83 next->ip = d->ip; hash_ipportip4_data_next()
98 struct hash_ipportip4_elem e = { .ip = 0 }; hash_ipportip4_kadt()
105 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); hash_ipportip4_kadt()
116 struct hash_ipportip4_elem e = { .ip = 0 }; hash_ipportip4_uadt()
118 u32 ip, ip_to = 0, p = 0, port, port_to; hash_ipportip4_uadt() local
130 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); hash_ipportip4_uadt()
164 ip_to = ip = ntohl(e.ip); hash_ipportip4_uadt()
169 if (ip > ip_to) hash_ipportip4_uadt()
170 swap(ip, ip_to); hash_ipportip4_uadt()
176 ip_set_mask_from_to(ip, ip_to, cidr); hash_ipportip4_uadt()
187 ip = ntohl(h->next.ip); hash_ipportip4_uadt()
188 for (; !before(ip_to, ip); ip++) { hash_ipportip4_uadt()
189 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) hash_ipportip4_uadt()
192 e.ip = htonl(ip); hash_ipportip4_uadt()
208 union nf_inet_addr ip; member in struct:hash_ipportip6_elem
222 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && hash_ipportip6_data_equal()
232 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || hash_ipportip6_data_list()
264 struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } }; hash_ipportip6_kadt()
271 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); hash_ipportip6_kadt()
282 struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } }; hash_ipportip6_uadt()
304 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); hash_ipportip6_uadt()
356 .name = "hash:ip,port,ip",
H A Dip_set_hash_ipmark.c9 /* Kernel module implementing an IP set type: the hash:ip,mark type */
13 #include <linux/ip.h>
17 #include <net/ip.h>
33 IP_SET_MODULE_DESC("hash:ip,mark", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
34 MODULE_ALIAS("ip_set_hash:ip,mark");
44 __be32 ip; member in struct:hash_ipmark4_elem
55 return ip1->ip == ip2->ip && hash_ipmark4_data_equal()
63 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || hash_ipmark4_data_list()
76 next->ip = d->ip; hash_ipmark4_data_next()
96 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); hash_ipmark4_kadt()
108 u32 ip, ip_to = 0; hash_ipmark4_uadt() local
118 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); hash_ipmark4_uadt()
135 ip_to = ip = ntohl(e.ip); hash_ipmark4_uadt()
140 if (ip > ip_to) hash_ipmark4_uadt()
141 swap(ip, ip_to); hash_ipmark4_uadt()
147 ip_set_mask_from_to(ip, ip_to, cidr); hash_ipmark4_uadt()
151 ip = ntohl(h->next.ip); hash_ipmark4_uadt()
152 for (; !before(ip_to, ip); ip++) { hash_ipmark4_uadt()
153 e.ip = htonl(ip); hash_ipmark4_uadt()
167 union nf_inet_addr ip; member in struct:hash_ipmark6_elem
178 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && hash_ipmark6_data_equal()
186 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || hash_ipmark6_data_list()
222 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); hash_ipmark6_kadt()
251 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); hash_ipmark6_uadt()
275 .name = "hash:ip,mark",
H A Dip_set_hash_netportnet.c8 /* Kernel module implementing an IP set type: the hash:ip,port,net type */
12 #include <linux/ip.h>
16 #include <net/ip.h>
48 __be32 ip[2]; member in union:hash_netportnet4_elem::__anon15006
96 elem->ip[1] = orig->ip[1]; hash_netportnet4_data_reset_elem()
104 elem->ip[1] &= ip_set_netmask(cidr); hash_netportnet4_data_netmask()
107 elem->ip[0] &= ip_set_netmask(cidr); hash_netportnet4_data_netmask()
118 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) || hash_netportnet4_data_list()
119 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) || hash_netportnet4_data_list()
171 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]); hash_netportnet4_kadt()
172 ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]); hash_netportnet4_kadt()
173 e.ip[0] &= ip_set_netmask(e.cidr[0]); hash_netportnet4_kadt()
174 e.ip[1] &= ip_set_netmask(e.cidr[1]); hash_netportnet4_kadt()
187 u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to; hash_netportnet4_uadt() local
202 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); hash_netportnet4_uadt()
251 e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0])); hash_netportnet4_uadt()
252 e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1])); hash_netportnet4_uadt()
258 ip_to = ip; hash_netportnet4_uadt()
263 if (ip > ip_to) hash_netportnet4_uadt()
264 swap(ip, ip_to); hash_netportnet4_uadt()
265 if (unlikely(ip + UINT_MAX == ip_to)) hash_netportnet4_uadt()
268 ip_set_mask_from_to(ip, ip_to, e.cidr[0]); hash_netportnet4_uadt()
292 ip = ntohl(h->next.ip[0]); hash_netportnet4_uadt()
294 while (!after(ip, ip_to)) { hash_netportnet4_uadt()
295 e.ip[0] = htonl(ip); hash_netportnet4_uadt()
296 ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); hash_netportnet4_uadt()
297 p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port) hash_netportnet4_uadt()
301 ip2 = (retried && ip == ntohl(h->next.ip[0]) && hash_netportnet4_uadt()
302 p == ntohs(h->next.port)) ? ntohl(h->next.ip[1]) hash_netportnet4_uadt()
305 e.ip[1] = htonl(ip2); hash_netportnet4_uadt()
316 ip = ip_last + 1; hash_netportnet4_uadt()
324 union nf_inet_addr ip[2]; member in struct:hash_netportnet6_elem
342 return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) && hash_netportnet6_data_equal()
343 ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) && hash_netportnet6_data_equal()
371 elem->ip[1] = orig->ip[1]; hash_netportnet6_data_reset_elem()
379 ip6_netmask(&elem->ip[1], cidr); hash_netportnet6_data_netmask()
382 ip6_netmask(&elem->ip[0], cidr); hash_netportnet6_data_netmask()
393 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) || hash_netportnet6_data_list()
394 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) || hash_netportnet6_data_list()
449 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6); hash_netportnet6_kadt()
450 ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6); hash_netportnet6_kadt()
451 ip6_netmask(&e.ip[0], e.cidr[0]); hash_netportnet6_kadt()
452 ip6_netmask(&e.ip[1], e.cidr[1]); hash_netportnet6_kadt()
481 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]); hash_netportnet6_uadt()
485 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]); hash_netportnet6_uadt()
505 ip6_netmask(&e.ip[0], e.cidr[0]); hash_netportnet6_uadt()
506 ip6_netmask(&e.ip[1], e.cidr[1]); hash_netportnet6_uadt()
H A Dip_set_hash_netiface.c12 #include <linux/ip.h>
16 #include <net/ip.h>
50 __be32 ip; member in struct:hash_netiface4_elem_hashed
59 __be32 ip; member in struct:hash_netiface4_elem
74 return ip1->ip == ip2->ip && hash_netiface4_data_equal()
102 elem->ip &= ip_set_netmask(cidr); hash_netiface4_data_netmask()
114 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || hash_netiface4_data_list()
130 next->ip = d->ip; hash_netiface4_data_next()
172 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); hash_netiface4_kadt()
173 e.ip &= ip_set_netmask(e.cidr); hash_netiface4_kadt()
205 u32 ip = 0, ip_to = 0, last; hash_netiface4_uadt() local
216 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); hash_netiface4_uadt()
240 e.ip = htonl(ip & ip_set_hostmask(e.cidr)); hash_netiface4_uadt()
250 if (ip_to < ip) hash_netiface4_uadt()
251 swap(ip, ip_to); hash_netiface4_uadt()
252 if (ip + UINT_MAX == ip_to) hash_netiface4_uadt()
255 ip_set_mask_from_to(ip, ip_to, e.cidr); hash_netiface4_uadt()
259 ip = ntohl(h->next.ip); hash_netiface4_uadt()
260 while (!after(ip, ip_to)) { hash_netiface4_uadt()
261 e.ip = htonl(ip); hash_netiface4_uadt()
262 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); hash_netiface4_uadt()
269 ip = last + 1; hash_netiface4_uadt()
277 union nf_inet_addr ip; member in struct:hash_netiface6_elem_hashed
285 union nf_inet_addr ip; member in struct:hash_netiface6_elem
300 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && hash_netiface6_data_equal()
328 ip6_netmask(&elem->ip, cidr); hash_netiface6_data_netmask()
340 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || hash_netiface6_data_list()
385 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); hash_netiface6_kadt()
386 ip6_netmask(&e.ip, e.cidr); hash_netiface6_kadt()
427 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); hash_netiface6_uadt()
441 ip6_netmask(&e.ip, e.cidr); hash_netiface6_uadt()
H A Dip_set_hash_netport.c12 #include <linux/ip.h>
16 #include <net/ip.h>
55 __be32 ip; member in struct:hash_netport4_elem
69 return ip1->ip == ip2->ip && hash_netport4_data_equal()
96 elem->ip &= ip_set_netmask(cidr); hash_netport4_data_netmask()
106 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || hash_netport4_data_list()
123 next->ip = d->ip; hash_netport4_data_next()
150 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); hash_netport4_kadt()
151 e.ip &= ip_set_netmask(e.cidr + 1); hash_netport4_kadt()
164 u32 port, port_to, p = 0, ip = 0, ip_to = 0, last; hash_netport4_uadt() local
178 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); hash_netport4_uadt()
218 e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1)); hash_netport4_uadt()
234 if (ip_to < ip) hash_netport4_uadt()
235 swap(ip, ip_to); hash_netport4_uadt()
236 if (ip + UINT_MAX == ip_to) hash_netport4_uadt()
239 ip_set_mask_from_to(ip, ip_to, e.cidr + 1); hash_netport4_uadt()
243 ip = ntohl(h->next.ip); hash_netport4_uadt()
244 while (!after(ip, ip_to)) { hash_netport4_uadt()
245 e.ip = htonl(ip); hash_netport4_uadt()
246 last = ip_set_range_to_cidr(ip, ip_to, &cidr); hash_netport4_uadt()
248 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) hash_netport4_uadt()
259 ip = last + 1; hash_netport4_uadt()
267 union nf_inet_addr ip; member in struct:hash_netport6_elem
281 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && hash_netport6_data_equal()
308 ip6_netmask(&elem->ip, cidr); hash_netport6_data_netmask()
318 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || hash_netport6_data_list()
365 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); hash_netport6_kadt()
366 ip6_netmask(&e.ip, e.cidr + 1); hash_netport6_kadt()
395 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); hash_netport6_uadt()
409 ip6_netmask(&e.ip, e.cidr + 1); hash_netport6_uadt()
H A Dip_set_bitmap_ip.c10 /* Kernel module implementing an IP set type: the bitmap:ip type */
13 #include <linux/ip.h>
35 IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
36 MODULE_ALIAS("ip_set_bitmap:ip");
61 ip_to_id(const struct bitmap_ip *m, u32 ip) ip_to_id() argument
63 return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip) / m->hosts; ip_to_id()
120 u32 ip; bitmap_ip_kadt() local
122 ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC)); bitmap_ip_kadt()
123 if (ip < map->first_ip || ip > map->last_ip) bitmap_ip_kadt()
126 e.id = ip_to_id(map, ip); bitmap_ip_kadt()
137 u32 ip = 0, ip_to = 0; bitmap_ip_uadt() local
148 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); bitmap_ip_uadt()
156 if (ip < map->first_ip || ip > map->last_ip) bitmap_ip_uadt()
160 e.id = ip_to_id(map, ip); bitmap_ip_uadt()
168 if (ip > ip_to) { bitmap_ip_uadt()
169 swap(ip, ip_to); bitmap_ip_uadt()
170 if (ip < map->first_ip) bitmap_ip_uadt()
178 ip_set_mask_from_to(ip, ip_to, cidr); bitmap_ip_uadt()
180 ip_to = ip; bitmap_ip_uadt()
186 for (; !before(ip_to, ip); ip += map->hosts) { bitmap_ip_uadt()
187 e.id = ip_to_id(map, ip); bitmap_ip_uadt()
218 /* Create bitmap:ip type of sets */
333 .name = "bitmap:ip",
H A Dip_set_hash_ipportnet.c8 /* Kernel module implementing an IP set type: the hash:ip,port,net type */
12 #include <linux/ip.h>
16 #include <net/ip.h>
38 IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
39 MODULE_ALIAS("ip_set_hash:ip,port,net");
56 __be32 ip; member in struct:hash_ipportnet4_elem
71 return ip1->ip == ip2->ip && hash_ipportnet4_data_equal()
109 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || hash_ipportnet4_data_list()
127 next->ip = d->ip; hash_ipportnet4_data_next()
155 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); hash_ipportnet4_kadt()
170 u32 ip = 0, ip_to = 0, p = 0, port, port_to; hash_ipportnet4_uadt() local
185 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); hash_ipportnet4_uadt()
230 e.ip = htonl(ip); hash_ipportnet4_uadt()
237 ip_to = ip; hash_ipportnet4_uadt()
242 if (ip > ip_to) hash_ipportnet4_uadt()
243 swap(ip, ip_to); hash_ipportnet4_uadt()
249 ip_set_mask_from_to(ip, ip_to, cidr); hash_ipportnet4_uadt()
273 ip = ntohl(h->next.ip); hash_ipportnet4_uadt()
274 for (; !before(ip_to, ip); ip++) { hash_ipportnet4_uadt()
275 e.ip = htonl(ip); hash_ipportnet4_uadt()
276 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) hash_ipportnet4_uadt()
281 ip == ntohl(h->next.ip) && hash_ipportnet4_uadt()
305 union nf_inet_addr ip; member in struct:hash_ipportnet6_elem
320 return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && hash_ipportnet6_data_equal()
358 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || hash_ipportnet6_data_list()
406 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); hash_ipportnet6_kadt()
443 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); hash_ipportnet6_uadt()
512 .name = "hash:ip,port,net",
H A Dip_set_bitmap_ipmac.c11 /* Kernel module implementing an IP set type: the bitmap:ip,mac type */
14 #include <linux/ip.h>
35 IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
36 MODULE_ALIAS("ip_set_bitmap:ip,mac");
72 ip_to_id(const struct bitmap_ipmac *m, u32 ip) ip_to_id() argument
74 return ip - m->first_ip; ip_to_id()
219 u32 ip; bitmap_ipmac_kadt() local
225 ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC)); bitmap_ipmac_kadt()
226 if (ip < map->first_ip || ip > map->last_ip) bitmap_ipmac_kadt()
234 e.id = ip_to_id(map, ip); bitmap_ipmac_kadt()
248 u32 ip = 0; bitmap_ipmac_uadt() local
257 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); bitmap_ipmac_uadt()
265 if (ip < map->first_ip || ip > map->last_ip) bitmap_ipmac_uadt()
268 e.id = ip_to_id(map, ip); bitmap_ipmac_uadt()
294 /* Create bitmap:ip,mac type of sets */
378 .name = "bitmap:ip,mac",
/linux-4.4.14/arch/arm/mm/
H A Dcopypage-v4wt.c29 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ v4wt_copy_user_page()
30 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ v4wt_copy_user_page()
31 ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ v4wt_copy_user_page()
32 stmia r0!, {r3, r4, ip, lr} @ 4\n\ v4wt_copy_user_page()
33 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ v4wt_copy_user_page()
34 stmia r0!, {r3, r4, ip, lr} @ 4\n\ v4wt_copy_user_page()
35 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ v4wt_copy_user_page()
37 stmia r0!, {r3, r4, ip, lr} @ 4\n\ v4wt_copy_user_page()
38 ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ v4wt_copy_user_page()
70 mov ip, #0 @ 1\n\ v4wt_clear_user_highpage()
72 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4wt_clear_user_highpage()
73 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4wt_clear_user_highpage()
74 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4wt_clear_user_highpage()
75 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4wt_clear_user_highpage()
81 : "r1", "r2", "r3", "ip", "lr"); v4wt_clear_user_highpage()
H A Dcopypage-v4mc.c49 ldmia %0!, {r2, r3, ip, lr} @ 4\n\ mc_copy_user_page()
51 stmia %1!, {r2, r3, ip, lr} @ 4\n\ mc_copy_user_page()
52 ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\ mc_copy_user_page()
53 stmia %1!, {r2, r3, ip, lr} @ 4\n\ mc_copy_user_page()
54 ldmia %0!, {r2, r3, ip, lr} @ 4\n\ mc_copy_user_page()
56 stmia %1!, {r2, r3, ip, lr} @ 4\n\ mc_copy_user_page()
57 ldmia %0!, {r2, r3, ip, lr} @ 4\n\ mc_copy_user_page()
59 stmia %1!, {r2, r3, ip, lr} @ 4\n\ mc_copy_user_page()
60 ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ mc_copy_user_page()
96 mov ip, #0 @ 1\n\ v4_mc_clear_user_highpage()
99 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4_mc_clear_user_highpage()
100 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4_mc_clear_user_highpage()
102 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4_mc_clear_user_highpage()
103 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4_mc_clear_user_highpage()
108 : "r1", "r2", "r3", "ip", "lr"); v4_mc_clear_user_highpage()
H A Dcopypage-v4wb.c31 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ v4wb_copy_user_page()
33 stmia r0!, {r3, r4, ip, lr} @ 4\n\ v4wb_copy_user_page()
34 ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ v4wb_copy_user_page()
35 stmia r0!, {r3, r4, ip, lr} @ 4\n\ v4wb_copy_user_page()
36 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ v4wb_copy_user_page()
38 stmia r0!, {r3, r4, ip, lr} @ 4\n\ v4wb_copy_user_page()
39 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ v4wb_copy_user_page()
41 stmia r0!, {r3, r4, ip, lr} @ 4\n\ v4wb_copy_user_page()
42 ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ v4wb_copy_user_page()
75 mov ip, #0 @ 1\n\ v4wb_clear_user_highpage()
78 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4wb_clear_user_highpage()
79 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4wb_clear_user_highpage()
81 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4wb_clear_user_highpage()
82 stmia %0!, {r2, r3, ip, lr} @ 4\n\ v4wb_clear_user_highpage()
88 : "r1", "r2", "r3", "ip", "lr"); v4wb_clear_user_highpage()
H A Dproc-arm1020.S98 mov ip, #0
99 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
100 mcr p15, 0, ip, c7, c10, 4 @ drain WB
102 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
104 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
105 bic ip, ip, #0x000f @ ............wcam
106 bic ip, ip, #0x1100 @ ...i...s........
107 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
152 mov ip, #0
155 mcr p15, 0, ip, c7, c10, 4 @ drain WB
159 mcr p15, 0, ip, c7, c10, 4 @ drain WB
167 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
169 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
183 mov ip, #0
189 mcr p15, 0, ip, c7, c10, 4
191 mcr p15, 0, ip, c7, c10, 4 @ drain WB
198 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
227 mov ip, #0
229 mcr p15, 0, ip, c7, c10, 4
233 mcr p15, 0, ip, c7, c10, 4 @ drain WB
241 mcr p15, 0, ip, c7, c10, 4 @ drain WB
255 mov ip, #0
259 mcr p15, 0, ip, c7, c10, 4 @ drain WB
264 mcr p15, 0, ip, c7, c10, 4 @ drain WB
281 mov ip, #0
285 mcrne p15, 0, ip, c7, c10, 4
287 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
289 mcrne p15, 0, ip, c7, c10, 4
291 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
297 mcr p15, 0, ip, c7, c10, 4 @ drain WB
311 mov ip, #0
315 mcr p15, 0, ip, c7, c10, 4 @ drain WB
320 mcr p15, 0, ip, c7, c10, 4 @ drain WB
332 mov ip, #0
335 mcr p15, 0, ip, c7, c10, 4
337 mcr p15, 0, ip, c7, c10, 4 @ drain WB
342 mcr p15, 0, ip, c7, c10, 4 @ drain WB
378 mov ip, #0
380 mcr p15, 0, ip, c7, c10, 4 @ drain WB
403 2: mov ip, r3, LSL #26 @ shift up entry
404 orr ip, ip, r1, LSL #5 @ shift in/up index
405 mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry
406 mov ip, #0
407 mcr p15, 0, ip, c7, c10, 4
H A Dcopypage-fa.c26 1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\ fa_copy_user_page()
27 stmia r0, {r3, r4, ip, lr} @ 4\n\ fa_copy_user_page()
30 ldmia r1!, {r3, r4, ip, lr} @ 4\n\ fa_copy_user_page()
31 stmia r0, {r3, r4, ip, lr} @ 4\n\ fa_copy_user_page()
66 mov ip, #0 @ 1\n\ fa_clear_user_highpage()
68 1: stmia %0, {r2, r3, ip, lr} @ 4\n\ fa_clear_user_highpage()
71 stmia %0, {r2, r3, ip, lr} @ 4\n\ fa_clear_user_highpage()
79 : "r1", "r2", "r3", "ip", "lr"); fa_clear_user_highpage()
H A Dcopypage-xscale.c59 mov ip, r1 \n\ mc_copy_user_page()
66 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ mc_copy_user_page()
68 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ mc_copy_user_page()
70 mov ip, r1 \n\ mc_copy_user_page()
77 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ mc_copy_user_page()
79 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ mc_copy_user_page()
117 1: mov ip, %0 \n\ xscale_mc_clear_user_highpage()
122 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ xscale_mc_clear_user_highpage()
124 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ xscale_mc_clear_user_highpage()
128 : "r1", "r2", "r3", "ip"); xscale_mc_clear_user_highpage()
H A Dproc-arm1022.S89 mov ip, #0
90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
91 mcr p15, 0, ip, c7, c10, 4 @ drain WB
93 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
95 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
143 mov ip, #0
156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
172 mov ip, #0
185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
214 mov ip, #0
226 mcr p15, 0, ip, c7, c10, 4 @ drain WB
240 mov ip, #0
248 mcr p15, 0, ip, c7, c10, 4 @ drain WB
265 mov ip, #0
277 mcr p15, 0, ip, c7, c10, 4 @ drain WB
291 mov ip, #0
299 mcr p15, 0, ip, c7, c10, 4 @ drain WB
311 mov ip, #0
319 mcr p15, 0, ip, c7, c10, 4 @ drain WB
355 mov ip, #0
H A Dproc-arm1026.S89 mov ip, #0
90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
91 mcr p15, 0, ip, c7, c10, 4 @ drain WB
93 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
95 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
143 mov ip, #0
151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
167 mov ip, #0
180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
208 mov ip, #0
220 mcr p15, 0, ip, c7, c10, 4 @ drain WB
234 mov ip, #0
242 mcr p15, 0, ip, c7, c10, 4 @ drain WB
259 mov ip, #0
271 mcr p15, 0, ip, c7, c10, 4 @ drain WB
285 mov ip, #0
293 mcr p15, 0, ip, c7, c10, 4 @ drain WB
305 mov ip, #0
313 mcr p15, 0, ip, c7, c10, 4 @ drain WB
349 mov ip, #0
H A Dproc-fa526.S62 mov ip, #0
63 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
64 mcr p15, 0, ip, c7, c10, 4 @ drain WB
66 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
68 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
69 bic ip, ip, #0x000f @ ............wcam
70 bic ip, ip, #0x1100 @ ...i...s........
71 bic ip, ip, #0x0800 @ BTB off
72 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
107 mov ip, #0
109 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
111 mcr p15, 0, ip, c7, c14, 0 @ clean and invalidate whole D cache
113 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
114 mcr p15, 0, ip, c7, c5, 6 @ invalidate BTB since mm changed
115 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
116 mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
118 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB
H A Dproc-sa110.S67 mov ip, #0
68 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
69 mcr p15, 0, ip, c7, c10, 4 @ drain WB
71 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
73 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
74 bic ip, ip, #0x000f @ ............wcam
75 bic ip, ip, #0x1100 @ ...i...s........
76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
95 mcr p15, 0, ip, c15, c2, 2 @ disable clock switching
141 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
H A Dproc-arm1020e.S98 mov ip, #0
99 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
100 mcr p15, 0, ip, c7, c10, 4 @ drain WB
102 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
104 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
105 bic ip, ip, #0x000f @ ............wcam
106 bic ip, ip, #0x1100 @ ...i...s........
107 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
152 mov ip, #0
155 mcr p15, 0, ip, c7, c10, 4 @ drain WB
166 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
168 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
182 mov ip, #0
195 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
197 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
223 mov ip, #0
235 mcr p15, 0, ip, c7, c10, 4 @ drain WB
249 mov ip, #0
257 mcr p15, 0, ip, c7, c10, 4 @ drain WB
274 mov ip, #0
286 mcr p15, 0, ip, c7, c10, 4 @ drain WB
300 mov ip, #0
308 mcr p15, 0, ip, c7, c10, 4 @ drain WB
320 mov ip, #0
328 mcr p15, 0, ip, c7, c10, 4 @ drain WB
364 mov ip, #0
388 2: mov ip, r3, LSL #26 @ shift up entry
389 orr ip, ip, r1, LSL #5 @ shift in/up index
390 mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry
391 mov ip, #0
H A Dproc-sa1100.S56 mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching
75 mov ip, #0
76 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
77 mcr p15, 0, ip, c7, c10, 4 @ drain WB
79 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
81 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
82 bic ip, ip, #0x000f @ ............wcam
83 bic ip, ip, #0x1100 @ ...i...s........
84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
150 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
187 mov ip, #0
188 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs
189 mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache
190 mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
191 mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB
H A Dcopypage-feroceon.c21 mov ip, %2 \n\ feroceon_copy_user_page()
60 subs ip, ip, #(32 * 8) \n\ feroceon_copy_user_page()
64 mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ feroceon_copy_user_page()
94 mov ip, #0 \n\ feroceon_clear_user_highpage()
96 1: stmia %0, {r2-r7, ip, lr} \n\ feroceon_clear_user_highpage()
104 : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); feroceon_clear_user_highpage()
H A Dproc-arm940.S53 mov ip, #0
54 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
55 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
56 mcr p15, 0, ip, c7, c10, 4 @ drain WB
57 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
58 bic ip, ip, #0x00000005 @ .............c.p
59 bic ip, ip, #0x00001000 @ i-cache
60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
110 mov ip, #0
112 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
185 mov ip, #0
193 mcr p15, 0, ip, c7, c10, 4 @ drain WB
207 mov ip, #0
217 mcr p15, 0, ip, c7, c10, 4 @ drain WB
230 mov ip, #0
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
H A Dpv-fixup-asm.S26 bic ip, r8, #CR_M @ disable caches and MMU
27 mcr p15, 0, ip, c1, c0, 0
77 mov ip, #0
78 mcr p15, 0, ip, c7, c5, 0 @ I+BTB cache invalidate
79 mcr p15, 0, ip, c8, c7, 0 @ local_flush_tlb_all()
H A Dproc-arm925.S114 mov ip, #0xff000000
115 orr ip, ip, #0x00fe0000
116 orr ip, ip, #0x0000ce00
118 strh r4, [ip, #0x10]
122 mov ip, #0
123 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
124 mcr p15, 0, ip, c7, c10, 4 @ drain WB
126 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
128 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
129 bic ip, ip, #0x000f @ ............wcam
130 bic ip, ip, #0x1100 @ ...i...s........
131 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
177 mov ip, #0
180 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
204 mov ip, #0
227 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
407 mov ip, #0
409 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
417 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
418 mcr p15, 0, ip, c7, c10, 4 @ drain WB
420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
H A Dproc-arm926.S82 mov ip, #0
83 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
84 mcr p15, 0, ip, c7, c10, 4 @ drain WB
86 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
88 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
89 bic ip, ip, #0x000f @ ............wcam
90 bic ip, ip, #0x1100 @ ...i...s........
91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
108 orr ip, r3, #PSR_F_BIT @ is disabled
109 msr cpsr_c, ip
143 mov ip, #0
146 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
167 mov ip, #0
190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
370 mov ip, #0
372 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
378 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
379 mcr p15, 0, ip, c7, c10, 4 @ drain WB
381 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
416 mov ip, #0
417 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
418 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
H A Dproc-mohawk.S74 mov ip, #0
75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
76 mcr p15, 0, ip, c7, c10, 4 @ drain WB
77 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
78 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
79 bic ip, ip, #0x0007 @ .............cam
80 bic ip, ip, #0x1100 @ ...i...s........
81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
125 mov ip, #0
127 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
146 mov ip, #0
160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
329 mov ip, #0
330 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
331 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
332 mcr p15, 0, ip, c7, c10, 4 @ drain WB
335 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
369 mov ip, #0
370 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
371 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
372 mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer
373 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
H A Dproc-arm920.S90 mov ip, #0
91 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
92 mcr p15, 0, ip, c7, c10, 4 @ drain WB
94 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
96 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
97 bic ip, ip, #0x000f @ ............wcam
98 bic ip, ip, #0x1100 @ ...i...s........
99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
142 mov ip, #0
152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
167 mov ip, #0
179 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
349 mov ip, #0
351 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
355 @ && Uses registers r1, r3 and ip
365 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
366 mcr p15, 0, ip, c7, c10, 4 @ drain WB
368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
401 mov ip, #0
402 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
403 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
/linux-4.4.14/fs/xfs/
H A Dxfs_icache.c41 struct xfs_perag *pag, struct xfs_inode *ip);
51 struct xfs_inode *ip; xfs_inode_alloc() local
58 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); xfs_inode_alloc()
59 if (!ip) xfs_inode_alloc()
61 if (inode_init_always(mp->m_super, VFS_I(ip))) { xfs_inode_alloc()
62 kmem_zone_free(xfs_inode_zone, ip); xfs_inode_alloc()
67 ASSERT(atomic_read(&ip->i_pincount) == 0); xfs_inode_alloc()
68 ASSERT(!spin_is_locked(&ip->i_flags_lock)); xfs_inode_alloc()
69 ASSERT(!xfs_isiflocked(ip)); xfs_inode_alloc()
70 ASSERT(ip->i_ino == 0); xfs_inode_alloc()
72 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); xfs_inode_alloc()
75 ip->i_ino = ino; xfs_inode_alloc()
76 ip->i_mount = mp; xfs_inode_alloc()
77 memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); xfs_inode_alloc()
78 ip->i_afp = NULL; xfs_inode_alloc()
79 memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); xfs_inode_alloc()
80 ip->i_flags = 0; xfs_inode_alloc()
81 ip->i_delayed_blks = 0; xfs_inode_alloc()
82 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); xfs_inode_alloc()
84 return ip; xfs_inode_alloc()
92 struct xfs_inode *ip = XFS_I(inode); xfs_inode_free_callback() local
94 kmem_zone_free(xfs_inode_zone, ip); xfs_inode_free_callback()
99 struct xfs_inode *ip) xfs_inode_free()
101 switch (ip->i_d.di_mode & S_IFMT) { xfs_inode_free()
105 xfs_idestroy_fork(ip, XFS_DATA_FORK); xfs_inode_free()
109 if (ip->i_afp) xfs_inode_free()
110 xfs_idestroy_fork(ip, XFS_ATTR_FORK); xfs_inode_free()
112 if (ip->i_itemp) { xfs_inode_free()
113 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL)); xfs_inode_free()
114 xfs_inode_item_destroy(ip); xfs_inode_free()
115 ip->i_itemp = NULL; xfs_inode_free()
121 * free state. The ip->i_flags_lock provides the barrier against lookup xfs_inode_free()
124 spin_lock(&ip->i_flags_lock); xfs_inode_free()
125 ip->i_flags = XFS_IRECLAIM; xfs_inode_free()
126 ip->i_ino = 0; xfs_inode_free()
127 spin_unlock(&ip->i_flags_lock); xfs_inode_free()
130 ASSERT(atomic_read(&ip->i_pincount) == 0); xfs_inode_free()
131 ASSERT(!xfs_isiflocked(ip)); xfs_inode_free()
132 XFS_STATS_DEC(ip->i_mount, vn_active); xfs_inode_free()
134 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); xfs_inode_free()
143 struct xfs_inode *ip,
148 struct inode *inode = VFS_I(ip); __releases()
149 struct xfs_mount *mp = ip->i_mount; __releases()
159 spin_lock(&ip->i_flags_lock); __releases()
160 if (ip->i_ino != ino) { __releases()
161 trace_xfs_iget_skip(ip); __releases()
178 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { __releases()
179 trace_xfs_iget_skip(ip); __releases()
188 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { __releases()
197 if (ip->i_flags & XFS_IRECLAIMABLE) { __releases()
198 trace_xfs_iget_reclaim(ip); __releases()
206 ip->i_flags |= XFS_IRECLAIM; __releases()
208 spin_unlock(&ip->i_flags_lock); __releases()
218 spin_lock(&ip->i_flags_lock); __releases()
220 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); __releases()
221 ASSERT(ip->i_flags & XFS_IRECLAIMABLE); __releases()
222 trace_xfs_iget_reclaim_fail(ip); __releases()
227 spin_lock(&ip->i_flags_lock); __releases()
234 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; __releases()
235 ip->i_flags |= XFS_INEW; __releases()
236 __xfs_inode_clear_reclaim_tag(mp, pag, ip); __releases()
239 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); __releases()
240 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); __releases()
242 spin_unlock(&ip->i_flags_lock); __releases()
247 trace_xfs_iget_skip(ip); __releases()
253 spin_unlock(&ip->i_flags_lock); __releases()
255 trace_xfs_iget_hit(ip); __releases()
259 xfs_ilock(ip, lock_flags); __releases()
261 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); __releases()
267 spin_unlock(&ip->i_flags_lock); __releases()
283 struct xfs_inode *ip; xfs_iget_cache_miss() local
288 ip = xfs_inode_alloc(mp, ino); xfs_iget_cache_miss()
289 if (!ip) xfs_iget_cache_miss()
292 error = xfs_iread(mp, tp, ip, flags); xfs_iget_cache_miss()
296 trace_xfs_iget_miss(ip); xfs_iget_cache_miss()
298 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { xfs_iget_cache_miss()
319 if (!xfs_ilock_nowait(ip, lock_flags)) xfs_iget_cache_miss()
328 * The ip->i_flags_lock that protects the XFS_INEW flag forms the xfs_iget_cache_miss()
335 ip->i_udquot = NULL; xfs_iget_cache_miss()
336 ip->i_gdquot = NULL; xfs_iget_cache_miss()
337 ip->i_pdquot = NULL; xfs_iget_cache_miss()
338 xfs_iflags_set(ip, iflags); xfs_iget_cache_miss()
342 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); xfs_iget_cache_miss()
352 *ipp = ip; xfs_iget_cache_miss()
359 xfs_iunlock(ip, lock_flags); xfs_iget_cache_miss()
361 __destroy_inode(VFS_I(ip)); xfs_iget_cache_miss()
362 xfs_inode_free(ip); xfs_iget_cache_miss()
397 xfs_inode_t *ip; xfs_iget() local
424 ip = radix_tree_lookup(&pag->pag_ici_root, agino); xfs_iget()
426 if (ip) { xfs_iget()
427 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); xfs_iget()
434 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, xfs_iget()
441 *ipp = ip; xfs_iget()
447 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) xfs_iget()
448 xfs_setup_existing_inode(ip); xfs_iget()
470 struct xfs_inode *ip) xfs_inode_ag_walk_grab()
472 struct inode *inode = VFS_I(ip); xfs_inode_ag_walk_grab()
485 spin_lock(&ip->i_flags_lock); xfs_inode_ag_walk_grab()
486 if (!ip->i_ino) xfs_inode_ag_walk_grab()
490 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) xfs_inode_ag_walk_grab()
492 spin_unlock(&ip->i_flags_lock); xfs_inode_ag_walk_grab()
495 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) xfs_inode_ag_walk_grab()
506 spin_unlock(&ip->i_flags_lock); xfs_inode_ag_walk_grab()
514 int (*execute)(struct xfs_inode *ip, int flags, xfs_inode_ag_walk()
558 struct xfs_inode *ip = batch[i]; xfs_inode_ag_walk() local
560 if (done || xfs_inode_ag_walk_grab(ip)) xfs_inode_ag_walk()
575 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) xfs_inode_ag_walk()
577 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); xfs_inode_ag_walk()
578 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) xfs_inode_ag_walk()
642 int (*execute)(struct xfs_inode *ip, int flags, xfs_inode_ag_iterator()
669 int (*execute)(struct xfs_inode *ip, int flags, xfs_inode_ag_iterator_tag()
735 struct xfs_inode *ip) __xfs_inode_set_reclaim_tag()
738 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), __xfs_inode_set_reclaim_tag()
743 spin_lock(&ip->i_mount->m_perag_lock); __xfs_inode_set_reclaim_tag()
744 radix_tree_tag_set(&ip->i_mount->m_perag_tree, __xfs_inode_set_reclaim_tag()
745 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), __xfs_inode_set_reclaim_tag()
747 spin_unlock(&ip->i_mount->m_perag_lock); __xfs_inode_set_reclaim_tag()
750 xfs_reclaim_work_queue(ip->i_mount); __xfs_inode_set_reclaim_tag()
752 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, __xfs_inode_set_reclaim_tag()
765 xfs_inode_t *ip) xfs_inode_set_reclaim_tag()
767 struct xfs_mount *mp = ip->i_mount; xfs_inode_set_reclaim_tag()
770 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); xfs_inode_set_reclaim_tag()
772 spin_lock(&ip->i_flags_lock); xfs_inode_set_reclaim_tag()
773 __xfs_inode_set_reclaim_tag(pag, ip); xfs_inode_set_reclaim_tag()
774 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); xfs_inode_set_reclaim_tag()
775 spin_unlock(&ip->i_flags_lock); xfs_inode_set_reclaim_tag()
783 xfs_inode_t *ip) __xfs_inode_clear_reclaim()
788 spin_lock(&ip->i_mount->m_perag_lock); __xfs_inode_clear_reclaim()
789 radix_tree_tag_clear(&ip->i_mount->m_perag_tree, __xfs_inode_clear_reclaim()
790 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), __xfs_inode_clear_reclaim()
792 spin_unlock(&ip->i_mount->m_perag_lock); __xfs_inode_clear_reclaim()
793 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, __xfs_inode_clear_reclaim()
802 xfs_inode_t *ip) __xfs_inode_clear_reclaim_tag()
805 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); __xfs_inode_clear_reclaim_tag()
806 __xfs_inode_clear_reclaim(pag, ip); __xfs_inode_clear_reclaim_tag()
815 struct xfs_inode *ip, xfs_reclaim_inode_grab()
821 if (!ip->i_ino) xfs_reclaim_inode_grab()
830 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) xfs_reclaim_inode_grab()
843 spin_lock(&ip->i_flags_lock); xfs_reclaim_inode_grab()
844 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || xfs_reclaim_inode_grab()
845 __xfs_iflags_test(ip, XFS_IRECLAIM)) { xfs_reclaim_inode_grab()
847 spin_unlock(&ip->i_flags_lock); xfs_reclaim_inode_grab()
850 __xfs_iflags_set(ip, XFS_IRECLAIM); xfs_reclaim_inode_grab()
851 spin_unlock(&ip->i_flags_lock); xfs_reclaim_inode_grab()
896 struct xfs_inode *ip, xfs_reclaim_inode()
905 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_reclaim_inode()
906 if (!xfs_iflock_nowait(ip)) { xfs_reclaim_inode()
909 xfs_iflock(ip); xfs_reclaim_inode()
912 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_reclaim_inode()
913 xfs_iunpin_wait(ip); xfs_reclaim_inode()
914 xfs_iflush_abort(ip, false); xfs_reclaim_inode()
917 if (xfs_ipincount(ip)) { xfs_reclaim_inode()
920 xfs_iunpin_wait(ip); xfs_reclaim_inode()
922 if (xfs_iflags_test(ip, XFS_ISTALE)) xfs_reclaim_inode()
924 if (xfs_inode_clean(ip)) xfs_reclaim_inode()
939 * ip->i_lock, and we are doing the exact opposite here. As a result, xfs_reclaim_inode()
950 error = xfs_iflush(ip, &bp); xfs_reclaim_inode()
952 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_reclaim_inode()
963 xfs_iflock(ip); xfs_reclaim_inode()
965 xfs_ifunlock(ip); xfs_reclaim_inode()
966 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_reclaim_inode()
968 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); xfs_reclaim_inode()
978 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) xfs_reclaim_inode()
980 __xfs_inode_clear_reclaim(pag, ip); xfs_reclaim_inode()
991 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_reclaim_inode()
992 xfs_qm_dqdetach(ip); xfs_reclaim_inode()
993 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_reclaim_inode()
995 xfs_inode_free(ip); xfs_reclaim_inode()
999 xfs_ifunlock(ip); xfs_reclaim_inode()
1001 xfs_iflags_clear(ip, XFS_IRECLAIM); xfs_reclaim_inode()
1002 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_reclaim_inode()
1073 struct xfs_inode *ip = batch[i]; xfs_reclaim_inodes_ag() local
1075 if (done || xfs_reclaim_inode_grab(ip, flags)) xfs_reclaim_inodes_ag()
1092 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != xfs_reclaim_inodes_ag()
1095 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); xfs_reclaim_inodes_ag()
1096 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) xfs_reclaim_inodes_ag()
1192 struct xfs_inode *ip, xfs_inode_match_id()
1196 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) xfs_inode_match_id()
1200 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) xfs_inode_match_id()
1204 xfs_get_projid(ip) != eofb->eof_prid) xfs_inode_match_id()
1216 struct xfs_inode *ip, xfs_inode_match_id_union()
1220 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) xfs_inode_match_id_union()
1224 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) xfs_inode_match_id_union()
1228 xfs_get_projid(ip) == eofb->eof_prid) xfs_inode_match_id_union()
1236 struct xfs_inode *ip, xfs_inode_free_eofblocks()
1247 if (!xfs_can_free_eofblocks(ip, false)) { xfs_inode_free_eofblocks()
1249 trace_xfs_inode_free_eofblocks_invalid(ip); xfs_inode_free_eofblocks()
1250 xfs_inode_clear_eofblocks_tag(ip); xfs_inode_free_eofblocks()
1259 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) xfs_inode_free_eofblocks()
1264 match = xfs_inode_match_id_union(ip, eofb); xfs_inode_free_eofblocks()
1266 match = xfs_inode_match_id(ip, eofb); xfs_inode_free_eofblocks()
1272 XFS_ISIZE(ip) < eofb->eof_min_file_size) xfs_inode_free_eofblocks()
1280 if (eofb->eof_scan_owner == ip->i_ino) xfs_inode_free_eofblocks()
1284 ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock); xfs_inode_free_eofblocks()
1315 struct xfs_inode *ip) xfs_inode_free_quota_eofblocks()
1321 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); xfs_inode_free_quota_eofblocks()
1329 eofb.eof_scan_owner = ip->i_ino; xfs_inode_free_quota_eofblocks()
1332 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { xfs_inode_free_quota_eofblocks()
1333 dq = xfs_inode_dquot(ip, XFS_DQ_USER); xfs_inode_free_quota_eofblocks()
1335 eofb.eof_uid = VFS_I(ip)->i_uid; xfs_inode_free_quota_eofblocks()
1341 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { xfs_inode_free_quota_eofblocks()
1342 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); xfs_inode_free_quota_eofblocks()
1344 eofb.eof_gid = VFS_I(ip)->i_gid; xfs_inode_free_quota_eofblocks()
1351 xfs_icache_free_eofblocks(ip->i_mount, &eofb); xfs_inode_free_quota_eofblocks()
1358 xfs_inode_t *ip) xfs_inode_set_eofblocks_tag()
1360 struct xfs_mount *mp = ip->i_mount; xfs_inode_set_eofblocks_tag()
1364 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); xfs_inode_set_eofblocks_tag()
1366 trace_xfs_inode_set_eofblocks_tag(ip); xfs_inode_set_eofblocks_tag()
1371 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), xfs_inode_set_eofblocks_tag()
1375 spin_lock(&ip->i_mount->m_perag_lock); xfs_inode_set_eofblocks_tag()
1376 radix_tree_tag_set(&ip->i_mount->m_perag_tree, xfs_inode_set_eofblocks_tag()
1377 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), xfs_inode_set_eofblocks_tag()
1379 spin_unlock(&ip->i_mount->m_perag_lock); xfs_inode_set_eofblocks_tag()
1382 xfs_queue_eofblocks(ip->i_mount); xfs_inode_set_eofblocks_tag()
1384 trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno, xfs_inode_set_eofblocks_tag()
1394 xfs_inode_t *ip) xfs_inode_clear_eofblocks_tag()
1396 struct xfs_mount *mp = ip->i_mount; xfs_inode_clear_eofblocks_tag()
1399 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); xfs_inode_clear_eofblocks_tag()
1401 trace_xfs_inode_clear_eofblocks_tag(ip); xfs_inode_clear_eofblocks_tag()
1404 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), xfs_inode_clear_eofblocks_tag()
1408 spin_lock(&ip->i_mount->m_perag_lock); xfs_inode_clear_eofblocks_tag()
1409 radix_tree_tag_clear(&ip->i_mount->m_perag_tree, xfs_inode_clear_eofblocks_tag()
1410 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), xfs_inode_clear_eofblocks_tag()
1412 spin_unlock(&ip->i_mount->m_perag_lock); xfs_inode_clear_eofblocks_tag()
1413 trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno, xfs_inode_clear_eofblocks_tag()
98 xfs_inode_free( struct xfs_inode *ip) xfs_inode_free() argument
469 xfs_inode_ag_walk_grab( struct xfs_inode *ip) xfs_inode_ag_walk_grab() argument
511 xfs_inode_ag_walk( struct xfs_mount *mp, struct xfs_perag *pag, int (*execute)(struct xfs_inode *ip, int flags, void *args), int flags, void *args, int tag) xfs_inode_ag_walk() argument
640 xfs_inode_ag_iterator( struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, int flags, void *args), int flags, void *args) xfs_inode_ag_iterator() argument
667 xfs_inode_ag_iterator_tag( struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, int flags, void *args), int flags, void *args, int tag) xfs_inode_ag_iterator_tag() argument
733 __xfs_inode_set_reclaim_tag( struct xfs_perag *pag, struct xfs_inode *ip) __xfs_inode_set_reclaim_tag() argument
764 xfs_inode_set_reclaim_tag( xfs_inode_t *ip) xfs_inode_set_reclaim_tag() argument
781 __xfs_inode_clear_reclaim( xfs_perag_t *pag, xfs_inode_t *ip) __xfs_inode_clear_reclaim() argument
799 __xfs_inode_clear_reclaim_tag( xfs_mount_t *mp, xfs_perag_t *pag, xfs_inode_t *ip) __xfs_inode_clear_reclaim_tag() argument
814 xfs_reclaim_inode_grab( struct xfs_inode *ip, int flags) xfs_reclaim_inode_grab() argument
895 xfs_reclaim_inode( struct xfs_inode *ip, struct xfs_perag *pag, int sync_mode) xfs_reclaim_inode() argument
1191 xfs_inode_match_id( struct xfs_inode *ip, struct xfs_eofblocks *eofb) xfs_inode_match_id() argument
1215 xfs_inode_match_id_union( struct xfs_inode *ip, struct xfs_eofblocks *eofb) xfs_inode_match_id_union() argument
1235 xfs_inode_free_eofblocks( struct xfs_inode *ip, int flags, void *args) xfs_inode_free_eofblocks() argument
1314 xfs_inode_free_quota_eofblocks( struct xfs_inode *ip) xfs_inode_free_quota_eofblocks() argument
1357 xfs_inode_set_eofblocks_tag( xfs_inode_t *ip) xfs_inode_set_eofblocks_tag() argument
1393 xfs_inode_clear_eofblocks_tag( xfs_inode_t *ip) xfs_inode_clear_eofblocks_tag() argument
H A Dxfs_inode.h79 static inline struct inode *VFS_I(struct xfs_inode *ip) VFS_I() argument
81 return &ip->i_vnode; VFS_I()
89 static inline xfs_fsize_t XFS_ISIZE(struct xfs_inode *ip) XFS_ISIZE() argument
91 if (S_ISREG(ip->i_d.di_mode)) XFS_ISIZE()
92 return i_size_read(VFS_I(ip)); XFS_ISIZE()
93 return ip->i_d.di_size; XFS_ISIZE()
101 xfs_new_eof(struct xfs_inode *ip, xfs_fsize_t new_size) xfs_new_eof() argument
103 xfs_fsize_t i_size = i_size_read(VFS_I(ip)); xfs_new_eof()
107 return new_size > ip->i_d.di_size ? new_size : 0; xfs_new_eof()
114 __xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) __xfs_iflags_set() argument
116 ip->i_flags |= flags; __xfs_iflags_set()
120 xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) xfs_iflags_set() argument
122 spin_lock(&ip->i_flags_lock); xfs_iflags_set()
123 __xfs_iflags_set(ip, flags); xfs_iflags_set()
124 spin_unlock(&ip->i_flags_lock); xfs_iflags_set()
128 xfs_iflags_clear(xfs_inode_t *ip, unsigned short flags) xfs_iflags_clear() argument
130 spin_lock(&ip->i_flags_lock); xfs_iflags_clear()
131 ip->i_flags &= ~flags; xfs_iflags_clear()
132 spin_unlock(&ip->i_flags_lock); xfs_iflags_clear()
136 __xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) __xfs_iflags_test() argument
138 return (ip->i_flags & flags); __xfs_iflags_test()
142 xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) xfs_iflags_test() argument
145 spin_lock(&ip->i_flags_lock); xfs_iflags_test()
146 ret = __xfs_iflags_test(ip, flags); xfs_iflags_test()
147 spin_unlock(&ip->i_flags_lock); xfs_iflags_test()
152 xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) xfs_iflags_test_and_clear() argument
156 spin_lock(&ip->i_flags_lock); xfs_iflags_test_and_clear()
157 ret = ip->i_flags & flags; xfs_iflags_test_and_clear()
159 ip->i_flags &= ~flags; xfs_iflags_test_and_clear()
160 spin_unlock(&ip->i_flags_lock); xfs_iflags_test_and_clear()
165 xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags) xfs_iflags_test_and_set() argument
169 spin_lock(&ip->i_flags_lock); xfs_iflags_test_and_set()
170 ret = ip->i_flags & flags; xfs_iflags_test_and_set()
172 ip->i_flags |= flags; xfs_iflags_test_and_set()
173 spin_unlock(&ip->i_flags_lock); xfs_iflags_test_and_set()
183 xfs_get_projid(struct xfs_inode *ip) xfs_get_projid() argument
185 return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo; xfs_get_projid()
189 xfs_set_projid(struct xfs_inode *ip, xfs_set_projid() argument
192 ip->i_d.di_projid_hi = (__uint16_t) (projid >> 16); xfs_set_projid()
193 ip->i_d.di_projid_lo = (__uint16_t) (projid & 0xffff); xfs_set_projid()
233 extern void __xfs_iflock(struct xfs_inode *ip);
235 static inline int xfs_iflock_nowait(struct xfs_inode *ip) xfs_iflock_nowait() argument
237 return !xfs_iflags_test_and_set(ip, XFS_IFLOCK); xfs_iflock_nowait()
240 static inline void xfs_iflock(struct xfs_inode *ip) xfs_iflock() argument
242 if (!xfs_iflock_nowait(ip)) xfs_iflock()
243 __xfs_iflock(ip); xfs_iflock()
246 static inline void xfs_ifunlock(struct xfs_inode *ip) xfs_ifunlock() argument
248 xfs_iflags_clear(ip, XFS_IFLOCK); xfs_ifunlock()
250 wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT); xfs_ifunlock()
253 static inline int xfs_isiflocked(struct xfs_inode *ip) xfs_isiflocked() argument
255 return xfs_iflags_test(ip, XFS_IFLOCK); xfs_isiflocked()
374 int xfs_release(struct xfs_inode *ip);
375 void xfs_inactive(struct xfs_inode *ip);
383 struct xfs_inode *ip);
413 #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
419 xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
435 int xfs_update_prealloc_flags(struct xfs_inode *ip,
437 int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
439 int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
450 extern void xfs_setup_inode(struct xfs_inode *ip); xfs_finish_inode_setup()
451 static inline void xfs_finish_inode_setup(struct xfs_inode *ip) xfs_finish_inode_setup() argument
453 xfs_iflags_clear(ip, XFS_INEW); xfs_finish_inode_setup()
455 unlock_new_inode(VFS_I(ip)); xfs_finish_inode_setup()
458 static inline void xfs_setup_existing_inode(struct xfs_inode *ip) xfs_setup_existing_inode() argument
460 xfs_setup_inode(ip); xfs_setup_existing_inode()
461 xfs_finish_inode_setup(ip); xfs_setup_existing_inode()
464 #define IHOLD(ip) \
466 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
467 ihold(VFS_I(ip)); \
468 trace_xfs_ihold(ip, _THIS_IP_); \
471 #define IRELE(ip) \
473 trace_xfs_irele(ip, _THIS_IP_); \
474 iput(VFS_I(ip)); \
H A Dxfs_symlink.c47 struct xfs_inode *ip, xfs_readlink_bmap()
50 struct xfs_mount *mp = ip->i_mount; xfs_readlink_bmap()
55 int pathlen = ip->i_d.di_size; xfs_readlink_bmap()
64 error = xfs_bmapi_read(ip, 0, fsblocks, mval, &nmaps, 0); xfs_readlink_bmap()
93 if (!xfs_symlink_hdr_ok(ip->i_ino, offset, xfs_readlink_bmap()
98 offset, byte_cnt, ip->i_ino); xfs_readlink_bmap()
116 link[ip->i_d.di_size] = '\0'; xfs_readlink_bmap()
125 struct xfs_inode *ip, xfs_readlink()
128 struct xfs_mount *mp = ip->i_mount; xfs_readlink()
132 trace_xfs_readlink(ip); xfs_readlink()
137 xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_readlink()
139 pathlen = ip->i_d.di_size; xfs_readlink()
145 __func__, (unsigned long long) ip->i_ino, xfs_readlink()
153 if (ip->i_df.if_flags & XFS_IFINLINE) { xfs_readlink()
154 memcpy(link, ip->i_df.if_u1.if_data, pathlen); xfs_readlink()
157 error = xfs_readlink_bmap(ip, link); xfs_readlink()
161 xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_readlink()
175 struct xfs_inode *ip = NULL; xfs_symlink() local
281 prid, resblks > 0, &ip, NULL); xfs_symlink()
298 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); xfs_symlink()
305 if (pathlen <= XFS_IFORK_DSIZE(ip)) { xfs_symlink()
306 xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK); xfs_symlink()
307 memcpy(ip->i_df.if_u1.if_data, target_path, pathlen); xfs_symlink()
308 ip->i_d.di_size = pathlen; xfs_symlink()
313 ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT); xfs_symlink()
314 ip->i_df.if_flags |= XFS_IFINLINE; xfs_symlink()
316 ip->i_d.di_format = XFS_DINODE_FMT_LOCAL; xfs_symlink()
317 xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE); xfs_symlink()
325 error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks, xfs_symlink()
333 ip->i_d.di_size = pathlen; xfs_symlink()
334 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_symlink()
355 buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset, xfs_symlink()
374 error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, xfs_symlink()
402 *ipp = ip; xfs_symlink()
415 if (ip) { xfs_symlink()
416 xfs_finish_inode_setup(ip); xfs_symlink()
417 IRELE(ip); xfs_symlink()
434 struct xfs_inode *ip) xfs_inactive_symlink_rmt()
449 mp = ip->i_mount; xfs_inactive_symlink_rmt()
450 ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS); xfs_inactive_symlink_rmt()
458 ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2); xfs_inactive_symlink_rmt()
467 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_inactive_symlink_rmt()
468 xfs_trans_ijoin(tp, ip, 0); xfs_inactive_symlink_rmt()
476 size = (int)ip->i_d.di_size; xfs_inactive_symlink_rmt()
477 ip->i_d.di_size = 0; xfs_inactive_symlink_rmt()
478 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_inactive_symlink_rmt()
485 error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size), xfs_inactive_symlink_rmt()
505 error = xfs_bunmapi(tp, ip, 0, size, 0, nmaps, xfs_inactive_symlink_rmt()
527 xfs_trans_ijoin(tp, ip, 0); xfs_inactive_symlink_rmt()
528 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_inactive_symlink_rmt()
541 if (ip->i_df.if_bytes) xfs_inactive_symlink_rmt()
542 xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK); xfs_inactive_symlink_rmt()
543 ASSERT(ip->i_df.if_bytes == 0); xfs_inactive_symlink_rmt()
545 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_symlink_rmt()
553 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_symlink_rmt()
562 struct xfs_inode *ip) xfs_inactive_symlink()
564 struct xfs_mount *mp = ip->i_mount; xfs_inactive_symlink()
567 trace_xfs_inactive_symlink(ip); xfs_inactive_symlink()
572 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_inactive_symlink()
577 pathlen = (int)ip->i_d.di_size; xfs_inactive_symlink()
579 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_symlink()
585 __func__, (unsigned long long)ip->i_ino, pathlen); xfs_inactive_symlink()
586 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_symlink()
591 if (ip->i_df.if_flags & XFS_IFINLINE) { xfs_inactive_symlink()
592 if (ip->i_df.if_bytes > 0) xfs_inactive_symlink()
593 xfs_idata_realloc(ip, -(ip->i_df.if_bytes), xfs_inactive_symlink()
595 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_symlink()
596 ASSERT(ip->i_df.if_bytes == 0); xfs_inactive_symlink()
600 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_symlink()
603 return xfs_inactive_symlink_rmt(ip); xfs_inactive_symlink()
46 xfs_readlink_bmap( struct xfs_inode *ip, char *link) xfs_readlink_bmap() argument
124 xfs_readlink( struct xfs_inode *ip, char *link) xfs_readlink() argument
433 xfs_inactive_symlink_rmt( struct xfs_inode *ip) xfs_inactive_symlink_rmt() argument
561 xfs_inactive_symlink( struct xfs_inode *ip) xfs_inactive_symlink() argument
H A Dxfs_inode_item.c46 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_data_fork_size() local
48 switch (ip->i_d.di_format) { xfs_inode_item_data_fork_size()
51 ip->i_d.di_nextents > 0 && xfs_inode_item_data_fork_size()
52 ip->i_df.if_bytes > 0) { xfs_inode_item_data_fork_size()
54 *nbytes += XFS_IFORK_DSIZE(ip); xfs_inode_item_data_fork_size()
60 ip->i_df.if_broot_bytes > 0) { xfs_inode_item_data_fork_size()
61 *nbytes += ip->i_df.if_broot_bytes; xfs_inode_item_data_fork_size()
67 ip->i_df.if_bytes > 0) { xfs_inode_item_data_fork_size()
68 *nbytes += roundup(ip->i_df.if_bytes, 4); xfs_inode_item_data_fork_size()
88 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_attr_fork_size() local
90 switch (ip->i_d.di_aformat) { xfs_inode_item_attr_fork_size()
93 ip->i_d.di_anextents > 0 && xfs_inode_item_attr_fork_size()
94 ip->i_afp->if_bytes > 0) { xfs_inode_item_attr_fork_size()
96 *nbytes += XFS_IFORK_ASIZE(ip); xfs_inode_item_attr_fork_size()
102 ip->i_afp->if_broot_bytes > 0) { xfs_inode_item_attr_fork_size()
103 *nbytes += ip->i_afp->if_broot_bytes; xfs_inode_item_attr_fork_size()
109 ip->i_afp->if_bytes > 0) { xfs_inode_item_attr_fork_size()
110 *nbytes += roundup(ip->i_afp->if_bytes, 4); xfs_inode_item_attr_fork_size()
134 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_size() local
138 xfs_icdinode_size(ip->i_d.di_version); xfs_inode_item_size()
141 if (XFS_IFORK_Q(ip)) xfs_inode_item_size()
152 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_format_data_fork() local
155 switch (ip->i_d.di_format) { xfs_inode_item_format_data_fork()
162 ip->i_d.di_nextents > 0 && xfs_inode_item_format_data_fork()
163 ip->i_df.if_bytes > 0) { xfs_inode_item_format_data_fork()
166 ASSERT(ip->i_df.if_u1.if_extents != NULL); xfs_inode_item_format_data_fork()
167 ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0); xfs_inode_item_format_data_fork()
170 data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK); xfs_inode_item_format_data_fork()
173 ASSERT(data_bytes <= ip->i_df.if_bytes); xfs_inode_item_format_data_fork()
187 ip->i_df.if_broot_bytes > 0) { xfs_inode_item_format_data_fork()
188 ASSERT(ip->i_df.if_broot != NULL); xfs_inode_item_format_data_fork()
190 ip->i_df.if_broot, xfs_inode_item_format_data_fork()
191 ip->i_df.if_broot_bytes); xfs_inode_item_format_data_fork()
192 ilf->ilf_dsize = ip->i_df.if_broot_bytes; xfs_inode_item_format_data_fork()
205 ip->i_df.if_bytes > 0) { xfs_inode_item_format_data_fork()
211 data_bytes = roundup(ip->i_df.if_bytes, 4); xfs_inode_item_format_data_fork()
212 ASSERT(ip->i_df.if_real_bytes == 0 || xfs_inode_item_format_data_fork()
213 ip->i_df.if_real_bytes == data_bytes); xfs_inode_item_format_data_fork()
214 ASSERT(ip->i_df.if_u1.if_data != NULL); xfs_inode_item_format_data_fork()
215 ASSERT(ip->i_d.di_size > 0); xfs_inode_item_format_data_fork()
217 ip->i_df.if_u1.if_data, data_bytes); xfs_inode_item_format_data_fork()
229 ilf->ilf_u.ilfu_rdev = ip->i_df.if_u2.if_rdev; xfs_inode_item_format_data_fork()
236 ilf->ilf_u.ilfu_uuid = ip->i_df.if_u2.if_uuid; xfs_inode_item_format_data_fork()
251 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_format_attr_fork() local
254 switch (ip->i_d.di_aformat) { xfs_inode_item_format_attr_fork()
260 ip->i_d.di_anextents > 0 && xfs_inode_item_format_attr_fork()
261 ip->i_afp->if_bytes > 0) { xfs_inode_item_format_attr_fork()
264 ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) == xfs_inode_item_format_attr_fork()
265 ip->i_d.di_anextents); xfs_inode_item_format_attr_fork()
266 ASSERT(ip->i_afp->if_u1.if_extents != NULL); xfs_inode_item_format_attr_fork()
269 data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK); xfs_inode_item_format_attr_fork()
283 ip->i_afp->if_broot_bytes > 0) { xfs_inode_item_format_attr_fork()
284 ASSERT(ip->i_afp->if_broot != NULL); xfs_inode_item_format_attr_fork()
287 ip->i_afp->if_broot, xfs_inode_item_format_attr_fork()
288 ip->i_afp->if_broot_bytes); xfs_inode_item_format_attr_fork()
289 ilf->ilf_asize = ip->i_afp->if_broot_bytes; xfs_inode_item_format_attr_fork()
300 ip->i_afp->if_bytes > 0) { xfs_inode_item_format_attr_fork()
306 data_bytes = roundup(ip->i_afp->if_bytes, 4); xfs_inode_item_format_attr_fork()
307 ASSERT(ip->i_afp->if_real_bytes == 0 || xfs_inode_item_format_attr_fork()
308 ip->i_afp->if_real_bytes == data_bytes); xfs_inode_item_format_attr_fork()
309 ASSERT(ip->i_afp->if_u1.if_data != NULL); xfs_inode_item_format_attr_fork()
311 ip->i_afp->if_u1.if_data, xfs_inode_item_format_attr_fork()
338 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_format() local
342 ASSERT(ip->i_d.di_version > 1); xfs_inode_item_format()
346 ilf->ilf_ino = ip->i_ino; xfs_inode_item_format()
347 ilf->ilf_blkno = ip->i_imap.im_blkno; xfs_inode_item_format()
348 ilf->ilf_len = ip->i_imap.im_len; xfs_inode_item_format()
349 ilf->ilf_boffset = ip->i_imap.im_boffset; xfs_inode_item_format()
355 &ip->i_d, xfs_inode_item_format()
356 xfs_icdinode_size(ip->i_d.di_version)); xfs_inode_item_format()
359 if (XFS_IFORK_Q(ip)) { xfs_inode_item_format()
378 struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; xfs_inode_item_pin() local
380 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_inode_item_pin()
382 trace_xfs_inode_pin(ip, _RET_IP_); xfs_inode_item_pin()
383 atomic_inc(&ip->i_pincount); xfs_inode_item_pin()
398 struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; xfs_inode_item_unpin() local
400 trace_xfs_inode_unpin(ip, _RET_IP_); xfs_inode_item_unpin()
401 ASSERT(atomic_read(&ip->i_pincount) > 0); xfs_inode_item_unpin()
402 if (atomic_dec_and_test(&ip->i_pincount)) xfs_inode_item_unpin()
403 wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT); xfs_inode_item_unpin()
412 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_push() local
417 if (xfs_ipincount(ip) > 0) xfs_inode_item_push()
420 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) xfs_inode_item_push()
427 if (xfs_ipincount(ip) > 0) { xfs_inode_item_push()
435 if (ip->i_flags & XFS_ISTALE) { xfs_inode_item_push()
445 if (!xfs_iflock_nowait(ip)) { xfs_inode_item_push()
450 ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); xfs_inode_item_push()
451 ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); xfs_inode_item_push()
455 error = xfs_iflush(ip, &bp); xfs_inode_item_push()
464 xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_inode_item_push()
479 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_unlock() local
482 ASSERT(ip->i_itemp != NULL); xfs_inode_item_unlock()
483 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_inode_item_unlock()
488 xfs_iunlock(ip, lock_flags); xfs_inode_item_unlock()
519 struct xfs_inode *ip = iip->ili_inode; xfs_inode_item_committed() local
521 if (xfs_iflags_test(ip, XFS_ISTALE)) { xfs_inode_item_committed()
560 struct xfs_inode *ip, xfs_inode_item_init()
565 ASSERT(ip->i_itemp == NULL); xfs_inode_item_init()
566 iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); xfs_inode_item_init()
568 iip->ili_inode = ip; xfs_inode_item_init()
578 xfs_inode_t *ip) xfs_inode_item_destroy()
580 kmem_zone_free(xfs_ili_zone, ip->i_itemp); xfs_inode_item_destroy()
700 xfs_inode_t *ip, xfs_iflush_abort()
703 xfs_inode_log_item_t *iip = ip->i_itemp; xfs_iflush_abort()
727 xfs_ifunlock(ip); xfs_iflush_abort()
559 xfs_inode_item_init( struct xfs_inode *ip, struct xfs_mount *mp) xfs_inode_item_init() argument
577 xfs_inode_item_destroy( xfs_inode_t *ip) xfs_inode_item_destroy() argument
699 xfs_iflush_abort( xfs_inode_t *ip, bool stale) xfs_iflush_abort() argument
H A Dxfs_trans_inode.c40 struct xfs_inode *ip, xfs_trans_ijoin()
45 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_trans_ijoin()
46 if (ip->i_itemp == NULL) xfs_trans_ijoin()
47 xfs_inode_item_init(ip, ip->i_mount); xfs_trans_ijoin()
48 iip = ip->i_itemp; xfs_trans_ijoin()
67 struct xfs_inode *ip, xfs_trans_ichgtime()
70 struct inode *inode = VFS_I(ip); xfs_trans_ichgtime()
74 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_trans_ichgtime()
81 ip->i_d.di_mtime.t_sec = tv.tv_sec; xfs_trans_ichgtime()
82 ip->i_d.di_mtime.t_nsec = tv.tv_nsec; xfs_trans_ichgtime()
87 ip->i_d.di_ctime.t_sec = tv.tv_sec; xfs_trans_ichgtime()
88 ip->i_d.di_ctime.t_nsec = tv.tv_nsec; xfs_trans_ichgtime()
104 xfs_inode_t *ip, xfs_trans_log_inode()
107 ASSERT(ip->i_itemp != NULL); xfs_trans_log_inode()
108 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_trans_log_inode()
117 ip->i_itemp->ili_fsync_fields |= flags; xfs_trans_log_inode()
126 if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) && xfs_trans_log_inode()
127 IS_I_VERSION(VFS_I(ip))) { xfs_trans_log_inode()
128 ip->i_d.di_changecount = ++VFS_I(ip)->i_version; xfs_trans_log_inode()
133 ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY; xfs_trans_log_inode()
142 flags |= ip->i_itemp->ili_last_fields; xfs_trans_log_inode()
143 ip->i_itemp->ili_fields |= flags; xfs_trans_log_inode()
38 xfs_trans_ijoin( struct xfs_trans *tp, struct xfs_inode *ip, uint lock_flags) xfs_trans_ijoin() argument
65 xfs_trans_ichgtime( struct xfs_trans *tp, struct xfs_inode *ip, int flags) xfs_trans_ichgtime() argument
102 xfs_trans_log_inode( xfs_trans_t *tp, xfs_inode_t *ip, uint flags) xfs_trans_log_inode() argument
H A Dxfs_pnfs.c37 struct xfs_inode *ip = XFS_I(inode); xfs_break_layouts() local
40 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)); xfs_break_layouts()
43 xfs_iunlock(ip, *iolock); xfs_break_layouts()
50 xfs_ilock(ip, *iolock); xfs_break_layouts()
84 struct xfs_inode *ip, xfs_bmbt_to_iomap()
88 struct xfs_mount *mp = ip->i_mount; xfs_bmbt_to_iomap()
98 XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock); xfs_bmbt_to_iomap()
120 struct xfs_inode *ip = XFS_I(inode); xfs_fs_map_blocks() local
121 struct xfs_mount *mp = ip->i_mount; xfs_fs_map_blocks()
138 if (XFS_IS_REALTIME_INODE(ip)) xfs_fs_map_blocks()
148 xfs_ilock(ip, XFS_IOLOCK_EXCL); xfs_fs_map_blocks()
170 lock_flags = xfs_ilock_data_map_shared(ip); xfs_fs_map_blocks()
171 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, xfs_fs_map_blocks()
173 xfs_iunlock(ip, lock_flags); xfs_fs_map_blocks()
188 xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_fs_map_blocks()
189 error = xfs_iomap_write_direct(ip, offset, length, xfs_fs_map_blocks()
203 error = xfs_update_prealloc_flags(ip, flags); xfs_fs_map_blocks()
207 xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_fs_map_blocks()
209 xfs_bmbt_to_iomap(ip, iomap, &imap); xfs_fs_map_blocks()
213 xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_fs_map_blocks()
222 struct xfs_inode *ip, xfs_pnfs_validate_isize()
229 xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_pnfs_validate_isize()
230 error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1, xfs_pnfs_validate_isize()
232 xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_pnfs_validate_isize()
260 struct xfs_inode *ip = XFS_I(inode); xfs_fs_commit_blocks() local
261 struct xfs_mount *mp = ip->i_mount; xfs_fs_commit_blocks()
269 xfs_ilock(ip, XFS_IOLOCK_EXCL); xfs_fs_commit_blocks()
300 error = xfs_iomap_write_unwritten(ip, start, length); xfs_fs_commit_blocks()
306 error = xfs_pnfs_validate_isize(ip, size); xfs_fs_commit_blocks()
318 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_fs_commit_blocks()
319 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_fs_commit_blocks()
320 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_fs_commit_blocks()
322 xfs_setattr_time(ip, iattr); xfs_fs_commit_blocks()
325 ip->i_d.di_size = iattr->ia_size; xfs_fs_commit_blocks()
332 xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_fs_commit_blocks()
83 xfs_bmbt_to_iomap( struct xfs_inode *ip, struct iomap *iomap, struct xfs_bmbt_irec *imap) xfs_bmbt_to_iomap() argument
221 xfs_pnfs_validate_isize( struct xfs_inode *ip, xfs_off_t isize) xfs_pnfs_validate_isize() argument
H A Dxfs_iops.c68 struct xfs_inode *ip = XFS_I(inode); xfs_initxattrs() local
72 error = xfs_attr_set(ip, xattr->name, xattr->value, xfs_initxattrs()
135 struct xfs_inode *ip = NULL; xfs_generic_create() local
158 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); xfs_generic_create()
160 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); xfs_generic_create()
165 inode = VFS_I(ip); xfs_generic_create()
189 xfs_finish_inode_setup(ip); xfs_generic_create()
199 xfs_finish_inode_setup(ip); xfs_generic_create()
266 struct xfs_inode *ip; xfs_vn_ci_lookup() local
276 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); xfs_vn_ci_lookup()
290 return d_splice_alias(VFS_I(ip), dentry); xfs_vn_ci_lookup()
295 dentry = d_add_ci(dentry, VFS_I(ip), &dname); xfs_vn_ci_lookup()
447 struct xfs_inode *ip = XFS_I(inode); xfs_vn_getattr() local
448 struct xfs_mount *mp = ip->i_mount; xfs_vn_getattr()
450 trace_xfs_getattr(ip); xfs_vn_getattr()
455 stat->size = XFS_ISIZE(ip); xfs_vn_getattr()
457 stat->mode = ip->i_d.di_mode; xfs_vn_getattr()
458 stat->nlink = ip->i_d.di_nlink; xfs_vn_getattr()
461 stat->ino = ip->i_ino; xfs_vn_getattr()
466 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); xfs_vn_getattr()
473 stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, xfs_vn_getattr()
474 sysv_minor(ip->i_df.if_u2.if_rdev)); xfs_vn_getattr()
477 if (XFS_IS_REALTIME_INODE(ip)) { xfs_vn_getattr()
484 xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; xfs_vn_getattr()
496 struct xfs_inode *ip, xfs_setattr_mode()
499 struct inode *inode = VFS_I(ip); xfs_setattr_mode()
502 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_setattr_mode()
504 ip->i_d.di_mode &= S_IFMT; xfs_setattr_mode()
505 ip->i_d.di_mode |= mode & ~S_IFMT; xfs_setattr_mode()
513 struct xfs_inode *ip, xfs_setattr_time()
516 struct inode *inode = VFS_I(ip); xfs_setattr_time()
518 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_setattr_time()
522 ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; xfs_setattr_time()
523 ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; xfs_setattr_time()
527 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; xfs_setattr_time()
528 ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; xfs_setattr_time()
532 ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; xfs_setattr_time()
533 ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; xfs_setattr_time()
539 struct xfs_inode *ip, xfs_setattr_nonsize()
543 xfs_mount_t *mp = ip->i_mount; xfs_setattr_nonsize()
544 struct inode *inode = VFS_I(ip); xfs_setattr_nonsize()
553 trace_xfs_setattr(ip); xfs_setattr_nonsize()
601 error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid), xfs_setattr_nonsize()
603 xfs_get_projid(ip), xfs_setattr_nonsize()
614 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_setattr_nonsize()
639 error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, xfs_setattr_nonsize()
647 xfs_trans_ijoin(tp, ip, 0); xfs_setattr_nonsize()
659 if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && xfs_setattr_nonsize()
661 ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); xfs_setattr_nonsize()
671 olddquot1 = xfs_qm_vop_chown(tp, ip, xfs_setattr_nonsize()
672 &ip->i_udquot, udqp); xfs_setattr_nonsize()
674 ip->i_d.di_uid = xfs_kuid_to_uid(uid); xfs_setattr_nonsize()
683 olddquot2 = xfs_qm_vop_chown(tp, ip, xfs_setattr_nonsize()
684 &ip->i_gdquot, gdqp); xfs_setattr_nonsize()
686 ip->i_d.di_gid = xfs_kgid_to_gid(gid); xfs_setattr_nonsize()
692 xfs_setattr_mode(ip, iattr); xfs_setattr_nonsize()
694 xfs_setattr_time(ip, iattr); xfs_setattr_nonsize()
696 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_setattr_nonsize()
704 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_setattr_nonsize()
733 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_setattr_nonsize()
746 struct xfs_inode *ip, xfs_setattr_size()
749 struct xfs_mount *mp = ip->i_mount; xfs_setattr_size()
750 struct inode *inode = VFS_I(ip); xfs_setattr_size()
757 trace_xfs_setattr(ip); xfs_setattr_size()
769 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); xfs_setattr_size()
770 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); xfs_setattr_size()
771 ASSERT(S_ISREG(ip->i_d.di_mode)); xfs_setattr_size()
781 if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { xfs_setattr_size()
789 return xfs_setattr_nonsize(ip, iattr, 0); xfs_setattr_size()
795 error = xfs_qm_dqattach(ip, 0); xfs_setattr_size()
809 error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing); xfs_setattr_size()
822 if (newsize > ip->i_d.di_size && xfs_setattr_size()
823 (oldsize != ip->i_d.di_size || did_zeroing)) { xfs_setattr_size()
824 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, xfs_setattr_size()
825 ip->i_d.di_size, newsize); xfs_setattr_size()
865 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_setattr_size()
866 xfs_trans_ijoin(tp, ip, 0); xfs_setattr_size()
897 ip->i_d.di_size = newsize; xfs_setattr_size()
898 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_setattr_size()
901 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize); xfs_setattr_size()
912 xfs_iflags_set(ip, XFS_ITRUNCATED); xfs_setattr_size()
915 xfs_inode_clear_eofblocks_tag(ip); xfs_setattr_size()
919 xfs_setattr_mode(ip, iattr); xfs_setattr_size()
921 xfs_setattr_time(ip, iattr); xfs_setattr_size()
923 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_setattr_size()
933 xfs_iunlock(ip, lock_flags); xfs_setattr_size()
946 struct xfs_inode *ip = XFS_I(d_inode(dentry)); xfs_vn_setattr() local
952 xfs_ilock(ip, iolock); xfs_vn_setattr()
955 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); xfs_vn_setattr()
958 error = xfs_setattr_size(ip, iattr); xfs_vn_setattr()
960 xfs_iunlock(ip, iolock); xfs_vn_setattr()
962 error = xfs_setattr_nonsize(ip, iattr, 0); xfs_vn_setattr()
974 struct xfs_inode *ip = XFS_I(inode); xfs_vn_update_time() local
975 struct xfs_mount *mp = ip->i_mount; xfs_vn_update_time()
979 trace_xfs_update_time(ip); xfs_vn_update_time()
988 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_vn_update_time()
991 ip->i_d.di_ctime.t_sec = (__int32_t)now->tv_sec; xfs_vn_update_time()
992 ip->i_d.di_ctime.t_nsec = (__int32_t)now->tv_nsec; xfs_vn_update_time()
996 ip->i_d.di_mtime.t_sec = (__int32_t)now->tv_sec; xfs_vn_update_time()
997 ip->i_d.di_mtime.t_nsec = (__int32_t)now->tv_nsec; xfs_vn_update_time()
1001 ip->i_d.di_atime.t_sec = (__int32_t)now->tv_sec; xfs_vn_update_time()
1002 ip->i_d.di_atime.t_nsec = (__int32_t)now->tv_nsec; xfs_vn_update_time()
1004 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_vn_update_time()
1005 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); xfs_vn_update_time()
1061 xfs_inode_t *ip = XFS_I(inode); xfs_vn_fiemap() local
1088 error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); xfs_vn_fiemap()
1189 struct xfs_inode *ip) xfs_diflags_to_iflags()
1191 uint16_t flags = ip->i_d.di_flags; xfs_diflags_to_iflags()
1205 if (ip->i_mount->m_flags & XFS_MOUNT_DAX) xfs_diflags_to_iflags()
1219 struct xfs_inode *ip) xfs_setup_inode()
1221 struct inode *inode = &ip->i_vnode; xfs_setup_inode()
1224 inode->i_ino = ip->i_ino; xfs_setup_inode()
1231 inode->i_mode = ip->i_d.di_mode; xfs_setup_inode()
1232 set_nlink(inode, ip->i_d.di_nlink); xfs_setup_inode()
1233 inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid); xfs_setup_inode()
1234 inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid); xfs_setup_inode()
1240 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, xfs_setup_inode()
1241 sysv_minor(ip->i_df.if_u2.if_rdev)); xfs_setup_inode()
1248 inode->i_generation = ip->i_d.di_gen; xfs_setup_inode()
1249 i_size_write(inode, ip->i_d.di_size); xfs_setup_inode()
1250 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; xfs_setup_inode()
1251 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; xfs_setup_inode()
1252 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; xfs_setup_inode()
1253 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; xfs_setup_inode()
1254 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; xfs_setup_inode()
1255 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xfs_setup_inode()
1256 xfs_diflags_to_iflags(inode, ip); xfs_setup_inode()
1258 ip->d_ops = ip->i_mount->m_nondir_inode_ops; xfs_setup_inode()
1259 lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class); xfs_setup_inode()
1267 lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class); xfs_setup_inode()
1273 ip->d_ops = ip->i_mount->m_dir_inode_ops; xfs_setup_inode()
1277 if (!(ip->i_df.if_flags & XFS_IFINLINE)) xfs_setup_inode()
1298 if (!XFS_IFORK_Q(ip)) { xfs_setup_inode()
495 xfs_setattr_mode( struct xfs_inode *ip, struct iattr *iattr) xfs_setattr_mode() argument
512 xfs_setattr_time( struct xfs_inode *ip, struct iattr *iattr) xfs_setattr_time() argument
538 xfs_setattr_nonsize( struct xfs_inode *ip, struct iattr *iattr, int flags) xfs_setattr_nonsize() argument
745 xfs_setattr_size( struct xfs_inode *ip, struct iattr *iattr) xfs_setattr_size() argument
1187 xfs_diflags_to_iflags( struct inode *inode, struct xfs_inode *ip) xfs_diflags_to_iflags() argument
1218 xfs_setup_inode( struct xfs_inode *ip) xfs_setup_inode() argument
H A Dxfs_bmap_util.c52 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) xfs_fsb_to_db() argument
54 return (XFS_IS_REALTIME_INODE(ip) ? \ xfs_fsb_to_db()
55 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ xfs_fsb_to_db()
56 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); xfs_fsb_to_db()
68 struct xfs_inode *ip, xfs_zero_extent()
72 struct xfs_mount *mp = ip->i_mount; xfs_zero_extent()
73 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb); xfs_zero_extent()
77 if (IS_DAX(VFS_I(ip))) xfs_zero_extent()
78 return dax_clear_blocks(VFS_I(ip), block, size); xfs_zero_extent()
176 mp = ap->ip->i_mount; xfs_bmap_rtalloc()
177 align = xfs_get_extsz_hint(ap->ip); xfs_bmap_rtalloc()
250 ap->ip->i_d.di_nblocks += ralen; xfs_bmap_rtalloc()
251 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); xfs_bmap_rtalloc()
253 ap->ip->i_delayed_blks -= ralen; xfs_bmap_rtalloc()
258 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, xfs_bmap_rtalloc()
264 error = xfs_zero_extent(ap->ip, ap->blkno, ap->length); xfs_bmap_rtalloc()
281 struct xfs_inode *ip, xfs_bmap_eof()
289 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof); xfs_bmap_eof()
422 xfs_inode_t *ip, /* incore inode */ xfs_bmap_count_blocks()
434 mp = ip->i_mount; xfs_bmap_count_blocks()
435 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_count_blocks()
436 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { xfs_bmap_count_blocks()
469 xfs_inode_t *ip, /* xfs incore inode pointer */ xfs_getbmapx_fix_eof_hole()
483 mp = ip->i_mount; xfs_getbmapx_fix_eof_hole()
485 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip))); xfs_getbmapx_fix_eof_hole()
497 out->bmv_block = xfs_fsb_to_db(ip, startblock); xfs_getbmapx_fix_eof_hole()
498 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); xfs_getbmapx_fix_eof_hole()
499 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); xfs_getbmapx_fix_eof_hole()
517 xfs_inode_t *ip, xfs_getbmap()
541 mp = ip->i_mount; xfs_getbmap()
546 if (XFS_IFORK_Q(ip)) { xfs_getbmap()
547 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && xfs_getbmap()
548 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && xfs_getbmap()
549 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) xfs_getbmap()
552 ip->i_d.di_aformat != 0 && xfs_getbmap()
553 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { xfs_getbmap()
555 ip->i_mount); xfs_getbmap()
562 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && xfs_getbmap()
563 ip->i_d.di_format != XFS_DINODE_FMT_BTREE && xfs_getbmap()
564 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) xfs_getbmap()
567 if (xfs_get_extsz_hint(ip) || xfs_getbmap()
568 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ xfs_getbmap()
573 fixlen = XFS_ISIZE(ip); xfs_getbmap()
600 xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_getbmap()
603 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { xfs_getbmap()
604 error = filemap_write_and_wait(VFS_I(ip)->i_mapping); xfs_getbmap()
614 * ip->i_delayed_blks == 0. xfs_getbmap()
618 lock = xfs_ilock_data_map_shared(ip); xfs_getbmap()
620 lock = xfs_ilock_attr_map_shared(ip); xfs_getbmap()
627 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1) xfs_getbmap()
628 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; xfs_getbmap()
645 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 && xfs_getbmap()
655 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), xfs_getbmap()
685 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip))) xfs_getbmap()
695 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext], xfs_getbmap()
726 xfs_iunlock(ip, lock); xfs_getbmap()
728 xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_getbmap()
753 struct xfs_inode *ip, xfs_bmap_punch_delalloc_range()
760 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_bmap_punch_delalloc_range()
775 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps, xfs_bmap_punch_delalloc_range()
780 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_bmap_punch_delalloc_range()
781 xfs_alert(ip->i_mount, xfs_bmap_punch_delalloc_range()
783 ip->i_ino, start_fsb); xfs_bmap_punch_delalloc_range()
804 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock, xfs_bmap_punch_delalloc_range()
824 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force) xfs_can_free_eofblocks() argument
827 if (!S_ISREG(ip->i_d.di_mode)) xfs_can_free_eofblocks()
834 if (VFS_I(ip)->i_size == 0 && xfs_can_free_eofblocks()
835 VFS_I(ip)->i_mapping->nrpages == 0 && xfs_can_free_eofblocks()
836 ip->i_delayed_blks == 0) xfs_can_free_eofblocks()
840 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) xfs_can_free_eofblocks()
847 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) xfs_can_free_eofblocks()
848 if (!force || ip->i_delayed_blks == 0) xfs_can_free_eofblocks()
862 xfs_inode_t *ip, xfs_free_eofblocks()
877 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip)); xfs_free_eofblocks()
884 xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_free_eofblocks()
885 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0); xfs_free_eofblocks()
886 xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_free_eofblocks()
890 ip->i_delayed_blks)) { xfs_free_eofblocks()
894 error = xfs_qm_dqattach(ip, 0); xfs_free_eofblocks()
906 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { xfs_free_eofblocks()
917 xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_free_eofblocks()
921 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_free_eofblocks()
922 xfs_trans_ijoin(tp, ip, 0); xfs_free_eofblocks()
930 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, xfs_free_eofblocks()
931 XFS_ISIZE(ip)); xfs_free_eofblocks()
941 xfs_inode_clear_eofblocks_tag(ip); xfs_free_eofblocks()
944 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_free_eofblocks()
946 xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_free_eofblocks()
953 struct xfs_inode *ip, xfs_alloc_file_space()
958 xfs_mount_t *mp = ip->i_mount; xfs_alloc_file_space()
975 trace_xfs_alloc_file_space(ip); xfs_alloc_file_space()
980 error = xfs_qm_dqattach(ip, 0); xfs_alloc_file_space()
987 rt = XFS_IS_REALTIME_INODE(ip); xfs_alloc_file_space()
988 extsz = xfs_get_extsz_hint(ip); xfs_alloc_file_space()
1055 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_alloc_file_space()
1056 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, xfs_alloc_file_space()
1061 xfs_trans_ijoin(tp, ip, 0); xfs_alloc_file_space()
1064 error = xfs_bmapi_write(tp, ip, startoffset_fsb, xfs_alloc_file_space()
1080 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_alloc_file_space()
1100 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); xfs_alloc_file_space()
1104 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_alloc_file_space()
1121 xfs_inode_t *ip, xfs_zero_remaining_bytes()
1130 xfs_mount_t *mp = ip->i_mount; xfs_zero_remaining_bytes()
1139 if (startoff >= XFS_ISIZE(ip)) xfs_zero_remaining_bytes()
1142 if (endoff > XFS_ISIZE(ip)) xfs_zero_remaining_bytes()
1143 endoff = XFS_ISIZE(ip); xfs_zero_remaining_bytes()
1151 lock_mode = xfs_ilock_data_map_shared(ip); xfs_zero_remaining_bytes()
1152 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0); xfs_zero_remaining_bytes()
1153 xfs_iunlock(ip, lock_mode); xfs_zero_remaining_bytes()
1174 if (IS_DAX(VFS_I(ip))) { xfs_zero_remaining_bytes()
1175 error = dax_zero_page_range(VFS_I(ip), offset, xfs_zero_remaining_bytes()
1183 error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ? xfs_zero_remaining_bytes()
1185 xfs_fsb_to_db(ip, imap.br_startblock), xfs_zero_remaining_bytes()
1205 struct xfs_inode *ip, xfs_free_file_space()
1227 mp = ip->i_mount; xfs_free_file_space()
1229 trace_xfs_free_file_space(ip); xfs_free_file_space()
1231 error = xfs_qm_dqattach(ip, 0); xfs_free_file_space()
1238 rt = XFS_IS_REALTIME_INODE(ip); xfs_free_file_space()
1243 inode_dio_wait(VFS_I(ip)); xfs_free_file_space()
1248 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset, xfs_free_file_space()
1252 truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset); xfs_free_file_space()
1262 error = xfs_bmapi_read(ip, startoffset_fsb, 1, xfs_free_file_space()
1277 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1, xfs_free_file_space()
1293 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1); xfs_free_file_space()
1299 error = xfs_zero_remaining_bytes(ip, offset, xfs_free_file_space()
1303 error = xfs_zero_remaining_bytes(ip, xfs_free_file_space()
1333 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_free_file_space()
1335 ip->i_udquot, ip->i_gdquot, ip->i_pdquot, xfs_free_file_space()
1340 xfs_trans_ijoin(tp, ip, 0); xfs_free_file_space()
1346 error = xfs_bunmapi(tp, ip, startoffset_fsb, xfs_free_file_space()
1362 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_free_file_space()
1372 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_free_file_space()
1382 struct xfs_inode *ip, xfs_zero_file_space()
1386 struct xfs_mount *mp = ip->i_mount; xfs_zero_file_space()
1390 trace_xfs_zero_file_space(ip); xfs_zero_file_space()
1403 error = xfs_free_file_space(ip, offset, len); xfs_zero_file_space()
1407 error = xfs_alloc_file_space(ip, round_down(offset, blksize), xfs_zero_file_space()
1426 struct xfs_inode *ip, xfs_shift_file_space()
1432 struct xfs_mount *mp = ip->i_mount; xfs_shift_file_space()
1446 stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size); xfs_shift_file_space()
1462 if (xfs_can_free_eofblocks(ip, true)) { xfs_shift_file_space()
1463 error = xfs_free_eofblocks(mp, ip, false); xfs_shift_file_space()
1472 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, xfs_shift_file_space()
1476 error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, xfs_shift_file_space()
1487 error = xfs_bmap_split_extent(ip, stop_fsb); xfs_shift_file_space()
1507 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_shift_file_space()
1508 error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, xfs_shift_file_space()
1509 ip->i_gdquot, ip->i_pdquot, xfs_shift_file_space()
1515 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_shift_file_space()
1523 error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb, xfs_shift_file_space()
1559 struct xfs_inode *ip, xfs_collapse_file_space()
1565 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); xfs_collapse_file_space()
1566 trace_xfs_collapse_file_space(ip); xfs_collapse_file_space()
1568 error = xfs_free_file_space(ip, offset, len); xfs_collapse_file_space()
1572 return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT); xfs_collapse_file_space()
1589 struct xfs_inode *ip, xfs_insert_file_space()
1593 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); xfs_insert_file_space()
1594 trace_xfs_insert_file_space(ip); xfs_insert_file_space()
1596 return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT); xfs_insert_file_space()
1621 xfs_inode_t *ip, /* target inode */ xfs_swap_extents_check_format()
1626 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || xfs_swap_extents_check_format()
1634 if (ip->i_d.di_nextents < tip->i_d.di_nextents) xfs_swap_extents_check_format()
1642 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && xfs_swap_extents_check_format()
1649 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) xfs_swap_extents_check_format()
1653 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && xfs_swap_extents_check_format()
1654 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > xfs_swap_extents_check_format()
1668 if (XFS_IFORK_BOFF(ip) && xfs_swap_extents_check_format()
1669 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) xfs_swap_extents_check_format()
1672 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) xfs_swap_extents_check_format()
1677 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { xfs_swap_extents_check_format()
1679 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) xfs_swap_extents_check_format()
1681 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= xfs_swap_extents_check_format()
1691 struct xfs_inode *ip) xfs_swap_extent_flush()
1695 error = filemap_write_and_wait(VFS_I(ip)->i_mapping); xfs_swap_extent_flush()
1698 truncate_pagecache_range(VFS_I(ip), 0, -1); xfs_swap_extent_flush()
1701 if (VFS_I(ip)->i_mapping->nrpages) xfs_swap_extent_flush()
1708 xfs_inode_t *ip, /* target inode */ xfs_swap_extents()
1712 xfs_mount_t *mp = ip->i_mount; xfs_swap_extents()
1736 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL); xfs_swap_extents()
1737 xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL); xfs_swap_extents()
1740 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { xfs_swap_extents()
1746 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { xfs_swap_extents()
1751 error = xfs_swap_extent_flush(ip); xfs_swap_extents()
1769 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); xfs_swap_extents()
1771 xfs_trans_ijoin(tp, ip, lock_flags); xfs_swap_extents()
1777 sxp->sx_length != ip->i_d.di_size || xfs_swap_extents()
1783 trace_xfs_swap_extent_before(ip, 0); xfs_swap_extents()
1787 error = xfs_swap_extents_check_format(ip, tip); xfs_swap_extents()
1791 __func__, ip->i_ino); xfs_swap_extents()
1802 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) || xfs_swap_extents()
1803 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) || xfs_swap_extents()
1804 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) || xfs_swap_extents()
1805 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) { xfs_swap_extents()
1812 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) && xfs_swap_extents()
1813 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) { xfs_swap_extents()
1814 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks); xfs_swap_extents()
1841 if (ip->i_d.di_version == 3 && xfs_swap_extents()
1842 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { xfs_swap_extents()
1844 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, xfs_swap_extents()
1854 ip->i_ino, NULL); xfs_swap_extents()
1862 ifp = &ip->i_df; xfs_swap_extents()
1871 tmp = (__uint64_t)ip->i_d.di_nblocks; xfs_swap_extents()
1872 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks; xfs_swap_extents()
1875 tmp = (__uint64_t) ip->i_d.di_nextents; xfs_swap_extents()
1876 ip->i_d.di_nextents = tip->i_d.di_nextents; xfs_swap_extents()
1879 tmp = (__uint64_t) ip->i_d.di_format; xfs_swap_extents()
1880 ip->i_d.di_format = tip->i_d.di_format; xfs_swap_extents()
1893 tip->i_delayed_blks = ip->i_delayed_blks; xfs_swap_extents()
1894 ip->i_delayed_blks = 0; xfs_swap_extents()
1896 switch (ip->i_d.di_format) { xfs_swap_extents()
1902 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) { xfs_swap_extents()
1909 ASSERT(ip->i_d.di_version < 3 || xfs_swap_extents()
1934 xfs_trans_log_inode(tp, ip, src_log_flags); xfs_swap_extents()
1946 trace_xfs_swap_extent_after(ip, 0); xfs_swap_extents()
1953 xfs_iunlock(ip, lock_flags); xfs_swap_extents()
67 xfs_zero_extent( struct xfs_inode *ip, xfs_fsblock_t start_fsb, xfs_off_t count_fsb) xfs_zero_extent() argument
280 xfs_bmap_eof( struct xfs_inode *ip, xfs_fileoff_t endoff, int whichfork, int *eof) xfs_bmap_eof() argument
420 xfs_bmap_count_blocks( xfs_trans_t *tp, xfs_inode_t *ip, int whichfork, int *count) xfs_bmap_count_blocks() argument
468 xfs_getbmapx_fix_eof_hole( xfs_inode_t *ip, struct getbmapx *out, int prealloced, __int64_t end, xfs_fsblock_t startblock) xfs_getbmapx_fix_eof_hole() argument
516 xfs_getbmap( xfs_inode_t *ip, struct getbmapx *bmv, xfs_bmap_format_t formatter, void *arg) xfs_getbmap() argument
752 xfs_bmap_punch_delalloc_range( struct xfs_inode *ip, xfs_fileoff_t start_fsb, xfs_fileoff_t length) xfs_bmap_punch_delalloc_range() argument
860 xfs_free_eofblocks( xfs_mount_t *mp, xfs_inode_t *ip, bool need_iolock) xfs_free_eofblocks() argument
952 xfs_alloc_file_space( struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len, int alloc_type) xfs_alloc_file_space() argument
1120 xfs_zero_remaining_bytes( xfs_inode_t *ip, xfs_off_t startoff, xfs_off_t endoff) xfs_zero_remaining_bytes() argument
1204 xfs_free_file_space( struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len) xfs_free_file_space() argument
1381 xfs_zero_file_space( struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len) xfs_zero_file_space() argument
1425 xfs_shift_file_space( struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len, enum shift_direction direction) xfs_shift_file_space() argument
1558 xfs_collapse_file_space( struct xfs_inode *ip, xfs_off_t offset, xfs_off_t len) xfs_collapse_file_space() argument
1588 xfs_insert_file_space( struct xfs_inode *ip, loff_t offset, loff_t len) xfs_insert_file_space() argument
1620 xfs_swap_extents_check_format( xfs_inode_t *ip, xfs_inode_t *tip) xfs_swap_extents_check_format() argument
1690 xfs_swap_extent_flush( struct xfs_inode *ip) xfs_swap_extent_flush() argument
1707 xfs_swap_extents( xfs_inode_t *ip, xfs_inode_t *tip, xfs_swapext_t *sxp) xfs_swap_extents() argument
H A Dxfs_qm.c261 xfs_inode_t *ip, xfs_qm_dqattach_one()
270 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_qm_dqattach_one()
290 error = xfs_qm_dqget(ip->i_mount, ip, id, type, xfs_qm_dqattach_one()
308 struct xfs_inode *ip) xfs_qm_need_dqattach()
310 struct xfs_mount *mp = ip->i_mount; xfs_qm_need_dqattach()
316 if (!XFS_NOT_DQATTACHED(mp, ip)) xfs_qm_need_dqattach()
318 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) xfs_qm_need_dqattach()
332 xfs_inode_t *ip, xfs_qm_dqattach_locked()
335 xfs_mount_t *mp = ip->i_mount; xfs_qm_dqattach_locked()
338 if (!xfs_qm_need_dqattach(ip)) xfs_qm_dqattach_locked()
341 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_qm_dqattach_locked()
343 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { xfs_qm_dqattach_locked()
344 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, xfs_qm_dqattach_locked()
346 &ip->i_udquot); xfs_qm_dqattach_locked()
349 ASSERT(ip->i_udquot); xfs_qm_dqattach_locked()
352 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { xfs_qm_dqattach_locked()
353 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, xfs_qm_dqattach_locked()
355 &ip->i_gdquot); xfs_qm_dqattach_locked()
358 ASSERT(ip->i_gdquot); xfs_qm_dqattach_locked()
361 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { xfs_qm_dqattach_locked()
362 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, xfs_qm_dqattach_locked()
364 &ip->i_pdquot); xfs_qm_dqattach_locked()
367 ASSERT(ip->i_pdquot); xfs_qm_dqattach_locked()
375 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_qm_dqattach_locked()
381 struct xfs_inode *ip, xfs_qm_dqattach()
386 if (!xfs_qm_need_dqattach(ip)) xfs_qm_dqattach()
389 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_dqattach()
390 error = xfs_qm_dqattach_locked(ip, flags); xfs_qm_dqattach()
391 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_qm_dqattach()
403 xfs_inode_t *ip) xfs_qm_dqdetach()
405 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) xfs_qm_dqdetach()
408 trace_xfs_dquot_dqdetach(ip); xfs_qm_dqdetach()
410 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); xfs_qm_dqdetach()
411 if (ip->i_udquot) { xfs_qm_dqdetach()
412 xfs_qm_dqrele(ip->i_udquot); xfs_qm_dqdetach()
413 ip->i_udquot = NULL; xfs_qm_dqdetach()
415 if (ip->i_gdquot) { xfs_qm_dqdetach()
416 xfs_qm_dqrele(ip->i_gdquot); xfs_qm_dqdetach()
417 ip->i_gdquot = NULL; xfs_qm_dqdetach()
419 if (ip->i_pdquot) { xfs_qm_dqdetach()
420 xfs_qm_dqrele(ip->i_pdquot); xfs_qm_dqdetach()
421 ip->i_pdquot = NULL; xfs_qm_dqdetach()
716 xfs_inode_t **ip, xfs_qm_qino_alloc()
724 *ip = NULL; xfs_qm_qino_alloc()
746 error = xfs_iget(mp, NULL, ino, 0, 0, ip); xfs_qm_qino_alloc()
764 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, xfs_qm_qino_alloc()
790 mp->m_sb.sb_uquotino = (*ip)->i_ino; xfs_qm_qino_alloc()
792 mp->m_sb.sb_gquotino = (*ip)->i_ino; xfs_qm_qino_alloc()
794 mp->m_sb.sb_pquotino = (*ip)->i_ino; xfs_qm_qino_alloc()
804 xfs_finish_inode_setup(*ip); xfs_qm_qino_alloc()
1039 struct xfs_inode *ip, xfs_qm_quotacheck_dqadjust()
1045 struct xfs_mount *mp = ip->i_mount; xfs_qm_quotacheck_dqadjust()
1049 error = xfs_qm_dqget(mp, ip, id, type, xfs_qm_quotacheck_dqadjust()
1094 xfs_inode_t *ip, xfs_qm_get_rtblks()
1103 ASSERT(XFS_IS_REALTIME_INODE(ip)); xfs_qm_get_rtblks()
1104 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); xfs_qm_get_rtblks()
1106 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) xfs_qm_get_rtblks()
1131 xfs_inode_t *ip; xfs_qm_dqusage_adjust() local
1152 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); xfs_qm_dqusage_adjust()
1158 ASSERT(ip->i_delayed_blks == 0); xfs_qm_dqusage_adjust()
1160 if (XFS_IS_REALTIME_INODE(ip)) { xfs_qm_dqusage_adjust()
1164 error = xfs_qm_get_rtblks(ip, &rtblks); xfs_qm_dqusage_adjust()
1169 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; xfs_qm_dqusage_adjust()
1184 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, xfs_qm_dqusage_adjust()
1191 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, xfs_qm_dqusage_adjust()
1198 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), xfs_qm_dqusage_adjust()
1204 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_qm_dqusage_adjust()
1205 IRELE(ip); xfs_qm_dqusage_adjust()
1210 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_qm_dqusage_adjust()
1211 IRELE(ip); xfs_qm_dqusage_adjust()
1601 struct xfs_inode *ip, xfs_qm_vop_dqalloc()
1610 struct xfs_mount *mp = ip->i_mount; xfs_qm_vop_dqalloc()
1621 xfs_ilock(ip, lockflags); xfs_qm_vop_dqalloc()
1623 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) xfs_qm_vop_dqalloc()
1624 gid = ip->i_d.di_gid; xfs_qm_vop_dqalloc()
1630 if (XFS_NOT_DQATTACHED(mp, ip)) { xfs_qm_vop_dqalloc()
1631 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); xfs_qm_vop_dqalloc()
1633 xfs_iunlock(ip, lockflags); xfs_qm_vop_dqalloc()
1639 if (ip->i_d.di_uid != uid) { xfs_qm_vop_dqalloc()
1649 xfs_iunlock(ip, lockflags); xfs_qm_vop_dqalloc()
1664 xfs_ilock(ip, lockflags); xfs_qm_vop_dqalloc()
1670 ASSERT(ip->i_udquot); xfs_qm_vop_dqalloc()
1671 uq = xfs_qm_dqhold(ip->i_udquot); xfs_qm_vop_dqalloc()
1675 if (ip->i_d.di_gid != gid) { xfs_qm_vop_dqalloc()
1676 xfs_iunlock(ip, lockflags); xfs_qm_vop_dqalloc()
1688 xfs_ilock(ip, lockflags); xfs_qm_vop_dqalloc()
1690 ASSERT(ip->i_gdquot); xfs_qm_vop_dqalloc()
1691 gq = xfs_qm_dqhold(ip->i_gdquot); xfs_qm_vop_dqalloc()
1695 if (xfs_get_projid(ip) != prid) { xfs_qm_vop_dqalloc()
1696 xfs_iunlock(ip, lockflags); xfs_qm_vop_dqalloc()
1708 xfs_ilock(ip, lockflags); xfs_qm_vop_dqalloc()
1710 ASSERT(ip->i_pdquot); xfs_qm_vop_dqalloc()
1711 pq = xfs_qm_dqhold(ip->i_pdquot); xfs_qm_vop_dqalloc()
1715 trace_xfs_dquot_dqalloc(ip); xfs_qm_vop_dqalloc()
1717 xfs_iunlock(ip, lockflags); xfs_qm_vop_dqalloc()
1745 xfs_inode_t *ip, xfs_qm_vop_chown()
1750 uint bfield = XFS_IS_REALTIME_INODE(ip) ? xfs_qm_vop_chown()
1754 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_qm_vop_chown()
1755 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); xfs_qm_vop_chown()
1762 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); xfs_qm_vop_chown()
1766 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); xfs_qm_vop_chown()
1784 struct xfs_inode *ip, xfs_qm_vop_chown_reserve()
1790 struct xfs_mount *mp = ip->i_mount; xfs_qm_vop_chown_reserve()
1801 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); xfs_qm_vop_chown_reserve()
1804 delblks = ip->i_delayed_blks; xfs_qm_vop_chown_reserve()
1805 blkflags = XFS_IS_REALTIME_INODE(ip) ? xfs_qm_vop_chown_reserve()
1809 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) { xfs_qm_vop_chown_reserve()
1817 ASSERT(ip->i_udquot); xfs_qm_vop_chown_reserve()
1818 udq_unres = ip->i_udquot; xfs_qm_vop_chown_reserve()
1821 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && xfs_qm_vop_chown_reserve()
1822 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) { xfs_qm_vop_chown_reserve()
1825 ASSERT(ip->i_gdquot); xfs_qm_vop_chown_reserve()
1826 gdq_unres = ip->i_gdquot; xfs_qm_vop_chown_reserve()
1830 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && xfs_qm_vop_chown_reserve()
1831 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) { xfs_qm_vop_chown_reserve()
1835 ASSERT(ip->i_pdquot); xfs_qm_vop_chown_reserve()
1836 pdq_unres = ip->i_pdquot; xfs_qm_vop_chown_reserve()
1840 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, xfs_qm_vop_chown_reserve()
1842 ip->i_d.di_nblocks, 1, xfs_qm_vop_chown_reserve()
1859 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, xfs_qm_vop_chown_reserve()
1865 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, xfs_qm_vop_chown_reserve()
1884 struct xfs_inode *ip = i_tab[i]; xfs_qm_vop_rename_dqattach() local
1890 if (i == 0 || ip != i_tab[i-1]) { xfs_qm_vop_rename_dqattach()
1891 if (XFS_NOT_DQATTACHED(mp, ip)) { xfs_qm_vop_rename_dqattach()
1892 error = xfs_qm_dqattach(ip, 0); xfs_qm_vop_rename_dqattach()
1904 struct xfs_inode *ip, xfs_qm_vop_create_dqattach()
1914 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_qm_vop_create_dqattach()
1918 ASSERT(ip->i_udquot == NULL); xfs_qm_vop_create_dqattach()
1919 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); xfs_qm_vop_create_dqattach()
1921 ip->i_udquot = xfs_qm_dqhold(udqp); xfs_qm_vop_create_dqattach()
1925 ASSERT(ip->i_gdquot == NULL); xfs_qm_vop_create_dqattach()
1926 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); xfs_qm_vop_create_dqattach()
1927 ip->i_gdquot = xfs_qm_dqhold(gdqp); xfs_qm_vop_create_dqattach()
1931 ASSERT(ip->i_pdquot == NULL); xfs_qm_vop_create_dqattach()
1932 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id)); xfs_qm_vop_create_dqattach()
1934 ip->i_pdquot = xfs_qm_dqhold(pdqp); xfs_qm_vop_create_dqattach()
260 xfs_qm_dqattach_one( xfs_inode_t *ip, xfs_dqid_t id, uint type, uint doalloc, xfs_dquot_t **IO_idqpp) xfs_qm_dqattach_one() argument
307 xfs_qm_need_dqattach( struct xfs_inode *ip) xfs_qm_need_dqattach() argument
331 xfs_qm_dqattach_locked( xfs_inode_t *ip, uint flags) xfs_qm_dqattach_locked() argument
380 xfs_qm_dqattach( struct xfs_inode *ip, uint flags) xfs_qm_dqattach() argument
402 xfs_qm_dqdetach( xfs_inode_t *ip) xfs_qm_dqdetach() argument
714 xfs_qm_qino_alloc( xfs_mount_t *mp, xfs_inode_t **ip, uint flags) xfs_qm_qino_alloc() argument
1038 xfs_qm_quotacheck_dqadjust( struct xfs_inode *ip, xfs_dqid_t id, uint type, xfs_qcnt_t nblks, xfs_qcnt_t rtblks) xfs_qm_quotacheck_dqadjust() argument
1093 xfs_qm_get_rtblks( xfs_inode_t *ip, xfs_qcnt_t *O_rtblks) xfs_qm_get_rtblks() argument
1600 xfs_qm_vop_dqalloc( struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid, prid_t prid, uint flags, struct xfs_dquot **O_udqpp, struct xfs_dquot **O_gdqpp, struct xfs_dquot **O_pdqpp) xfs_qm_vop_dqalloc() argument
1743 xfs_qm_vop_chown( xfs_trans_t *tp, xfs_inode_t *ip, xfs_dquot_t **IO_olddq, xfs_dquot_t *newdq) xfs_qm_vop_chown() argument
1782 xfs_qm_vop_chown_reserve( struct xfs_trans *tp, struct xfs_inode *ip, struct xfs_dquot *udqp, struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, uint flags) xfs_qm_vop_chown_reserve() argument
1902 xfs_qm_vop_create_dqattach( struct xfs_trans *tp, struct xfs_inode *ip, struct xfs_dquot *udqp, struct xfs_dquot *gdqp, struct xfs_dquot *pdqp) xfs_qm_vop_create_dqattach() argument
H A Dxfs_bmap_util.h32 int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
34 int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
36 int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
41 int xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
53 int xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
58 int xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
60 int xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
62 int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
70 bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
71 int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip,
74 int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
77 xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
H A Dxfs_ioctl.c75 struct xfs_inode *ip; xfs_find_handle() local
88 ip = XFS_I(inode); xfs_find_handle()
105 memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); xfs_find_handle()
117 handle.ha_fid.fid_gen = ip->i_d.di_gen; xfs_find_handle()
118 handle.ha_fid.fid_ino = ip->i_ino; xfs_find_handle()
323 xfs_inode_t *ip, xfs_set_dmattrs()
327 xfs_mount_t *mp = ip->i_mount; xfs_set_dmattrs()
343 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_set_dmattrs()
344 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_set_dmattrs()
346 ip->i_d.di_dmevmask = evmask; xfs_set_dmattrs()
347 ip->i_d.di_dmstate = state; xfs_set_dmattrs()
349 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_set_dmattrs()
614 struct xfs_inode *ip, xfs_ioc_space()
630 if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && xfs_ioc_space()
652 xfs_ilock(ip, iolock); xfs_ioc_space()
657 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); xfs_ioc_space()
667 bf->l_start += XFS_ISIZE(ip); xfs_ioc_space()
707 error = xfs_zero_file_space(ip, bf->l_start, bf->l_len); xfs_ioc_space()
712 error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len, xfs_ioc_space()
717 error = xfs_free_file_space(ip, bf->l_start, bf->l_len); xfs_ioc_space()
724 if (bf->l_start > XFS_ISIZE(ip)) { xfs_ioc_space()
725 error = xfs_alloc_file_space(ip, XFS_ISIZE(ip), xfs_ioc_space()
726 bf->l_start - XFS_ISIZE(ip), 0); xfs_ioc_space()
734 error = xfs_setattr_size(ip, &iattr); xfs_ioc_space()
744 error = xfs_update_prealloc_flags(ip, flags); xfs_ioc_space()
747 xfs_iunlock(ip, iolock); xfs_ioc_space()
906 xfs_inode_t *ip, xfs_ioc_fsgetxattr()
914 xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ioc_fsgetxattr()
915 fa.fsx_xflags = xfs_ip2xflags(ip); xfs_ioc_fsgetxattr()
916 fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; xfs_ioc_fsgetxattr()
917 fa.fsx_projid = xfs_get_projid(ip); xfs_ioc_fsgetxattr()
920 if (ip->i_afp) { xfs_ioc_fsgetxattr()
921 if (ip->i_afp->if_flags & XFS_IFEXTENTS) xfs_ioc_fsgetxattr()
922 fa.fsx_nextents = ip->i_afp->if_bytes / xfs_ioc_fsgetxattr()
925 fa.fsx_nextents = ip->i_d.di_anextents; xfs_ioc_fsgetxattr()
929 if (ip->i_df.if_flags & XFS_IFEXTENTS) xfs_ioc_fsgetxattr()
930 fa.fsx_nextents = ip->i_df.if_bytes / xfs_ioc_fsgetxattr()
933 fa.fsx_nextents = ip->i_d.di_nextents; xfs_ioc_fsgetxattr()
935 xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_ioc_fsgetxattr()
944 struct xfs_inode *ip, xfs_set_diflags()
950 di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); xfs_set_diflags()
965 if (S_ISDIR(ip->i_d.di_mode)) { xfs_set_diflags()
974 } else if (S_ISREG(ip->i_d.di_mode)) { xfs_set_diflags()
981 ip->i_d.di_flags = di_flags; xfs_set_diflags()
986 struct xfs_inode *ip) xfs_diflags_to_linux()
988 struct inode *inode = VFS_I(ip); xfs_diflags_to_linux()
989 unsigned int xflags = xfs_ip2xflags(ip); xfs_diflags_to_linux()
1012 struct xfs_inode *ip, xfs_ioctl_setattr_xflags()
1015 struct xfs_mount *mp = ip->i_mount; xfs_ioctl_setattr_xflags()
1018 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && xfs_ioctl_setattr_xflags()
1019 XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & XFS_XFLAG_REALTIME)) xfs_ioctl_setattr_xflags()
1025 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) xfs_ioctl_setattr_xflags()
1033 if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) || xfs_ioctl_setattr_xflags()
1038 xfs_set_diflags(ip, fa->fsx_xflags); xfs_ioctl_setattr_xflags()
1039 xfs_diflags_to_linux(ip); xfs_ioctl_setattr_xflags()
1040 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); xfs_ioctl_setattr_xflags()
1041 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_ioctl_setattr_xflags()
1054 struct xfs_inode *ip) xfs_ioctl_setattr_get_trans()
1056 struct xfs_mount *mp = ip->i_mount; xfs_ioctl_setattr_get_trans()
1070 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_ioctl_setattr_get_trans()
1071 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_ioctl_setattr_get_trans()
1079 if (!inode_owner_or_capable(VFS_I(ip))) { xfs_ioctl_setattr_get_trans()
1110 struct xfs_inode *ip, xfs_ioctl_setattr_check_extsize()
1113 struct xfs_mount *mp = ip->i_mount; xfs_ioctl_setattr_check_extsize()
1115 if ((fa->fsx_xflags & XFS_XFLAG_EXTSIZE) && !S_ISREG(ip->i_d.di_mode)) xfs_ioctl_setattr_check_extsize()
1119 !S_ISDIR(ip->i_d.di_mode)) xfs_ioctl_setattr_check_extsize()
1122 if (S_ISREG(ip->i_d.di_mode) && ip->i_d.di_nextents && xfs_ioctl_setattr_check_extsize()
1123 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize)) xfs_ioctl_setattr_check_extsize()
1134 if (XFS_IS_REALTIME_INODE(ip) || xfs_ioctl_setattr_check_extsize()
1153 struct xfs_inode *ip, xfs_ioctl_setattr_check_projid()
1158 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) xfs_ioctl_setattr_check_projid()
1169 if (xfs_get_projid(ip) != fa->fsx_projid) xfs_ioctl_setattr_check_projid()
1172 (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)) xfs_ioctl_setattr_check_projid()
1180 xfs_inode_t *ip, xfs_ioctl_setattr()
1183 struct xfs_mount *mp = ip->i_mount; xfs_ioctl_setattr()
1190 trace_xfs_ioctl_setattr(ip); xfs_ioctl_setattr()
1192 code = xfs_ioctl_setattr_check_projid(ip, fa); xfs_ioctl_setattr()
1205 code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, xfs_ioctl_setattr()
1206 ip->i_d.di_gid, fa->fsx_projid, xfs_ioctl_setattr()
1212 tp = xfs_ioctl_setattr_get_trans(ip); xfs_ioctl_setattr()
1220 xfs_get_projid(ip) != fa->fsx_projid) { xfs_ioctl_setattr()
1221 code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp, xfs_ioctl_setattr()
1227 code = xfs_ioctl_setattr_check_extsize(ip, fa); xfs_ioctl_setattr()
1231 code = xfs_ioctl_setattr_xflags(tp, ip, fa); xfs_ioctl_setattr()
1243 if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && xfs_ioctl_setattr()
1244 !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID)) xfs_ioctl_setattr()
1245 ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); xfs_ioctl_setattr()
1248 if (xfs_get_projid(ip) != fa->fsx_projid) { xfs_ioctl_setattr()
1250 olddquot = xfs_qm_vop_chown(tp, ip, xfs_ioctl_setattr()
1251 &ip->i_pdquot, pdqp); xfs_ioctl_setattr()
1253 ASSERT(ip->i_d.di_version > 1); xfs_ioctl_setattr()
1254 xfs_set_projid(ip, fa->fsx_projid); xfs_ioctl_setattr()
1262 if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT)) xfs_ioctl_setattr()
1263 ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; xfs_ioctl_setattr()
1265 ip->i_d.di_extsize = 0; xfs_ioctl_setattr()
1288 xfs_inode_t *ip, xfs_ioc_fssetxattr()
1301 error = xfs_ioctl_setattr(ip, &fa); xfs_ioc_fssetxattr()
1308 xfs_inode_t *ip, xfs_ioc_getxflags()
1313 flags = xfs_di2lxflags(ip->i_d.di_flags); xfs_ioc_getxflags()
1321 struct xfs_inode *ip, xfs_ioc_setxflags()
1338 fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); xfs_ioc_setxflags()
1344 tp = xfs_ioctl_setattr_get_trans(ip); xfs_ioc_setxflags()
1350 error = xfs_ioctl_setattr_xflags(tp, ip, &fa); xfs_ioc_setxflags()
1377 struct xfs_inode *ip, xfs_ioc_getbmap()
1395 error = xfs_getbmap(ip, &bmx, xfs_getbmap_format, xfs_ioc_getbmap()
1420 struct xfs_inode *ip, xfs_ioc_getbmapx()
1435 error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, xfs_ioc_getbmapx()
1451 xfs_inode_t *ip, *tip; xfs_ioc_swapext() local
1488 ip = XFS_I(file_inode(f.file)); xfs_ioc_swapext()
1491 if (ip->i_mount != tip->i_mount) { xfs_ioc_swapext()
1496 if (ip->i_ino == tip->i_ino) { xfs_ioc_swapext()
1501 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_ioc_swapext()
1506 error = xfs_swap_extents(ip, tip, sxp); xfs_ioc_swapext()
1529 struct xfs_inode *ip = XFS_I(inode); xfs_file_ioctl() local
1530 struct xfs_mount *mp = ip->i_mount; xfs_file_ioctl()
1538 trace_xfs_file_ioctl(ip); xfs_file_ioctl()
1556 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); xfs_file_ioctl()
1561 XFS_IS_REALTIME_INODE(ip) ? xfs_file_ioctl()
1587 return xfs_ioc_fsgetxattr(ip, 0, arg); xfs_file_ioctl()
1589 return xfs_ioc_fsgetxattr(ip, 1, arg); xfs_file_ioctl()
1591 return xfs_ioc_fssetxattr(ip, filp, arg); xfs_file_ioctl()
1593 return xfs_ioc_getxflags(ip, arg); xfs_file_ioctl()
1595 return xfs_ioc_setxflags(ip, filp, arg); xfs_file_ioctl()
1607 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, xfs_file_ioctl()
1615 return xfs_ioc_getbmap(ip, ioflags, cmd, arg); xfs_file_ioctl()
1618 return xfs_ioc_getbmapx(ip, arg); xfs_file_ioctl()
322 xfs_set_dmattrs( xfs_inode_t *ip, u_int evmask, u_int16_t state) xfs_set_dmattrs() argument
613 xfs_ioc_space( struct xfs_inode *ip, struct inode *inode, struct file *filp, int ioflags, unsigned int cmd, xfs_flock64_t *bf) xfs_ioc_space() argument
905 xfs_ioc_fsgetxattr( xfs_inode_t *ip, int attr, void __user *arg) xfs_ioc_fsgetxattr() argument
943 xfs_set_diflags( struct xfs_inode *ip, unsigned int xflags) xfs_set_diflags() argument
985 xfs_diflags_to_linux( struct xfs_inode *ip) xfs_diflags_to_linux() argument
1010 xfs_ioctl_setattr_xflags( struct xfs_trans *tp, struct xfs_inode *ip, struct fsxattr *fa) xfs_ioctl_setattr_xflags() argument
1053 xfs_ioctl_setattr_get_trans( struct xfs_inode *ip) xfs_ioctl_setattr_get_trans() argument
1109 xfs_ioctl_setattr_check_extsize( struct xfs_inode *ip, struct fsxattr *fa) xfs_ioctl_setattr_check_extsize() argument
1152 xfs_ioctl_setattr_check_projid( struct xfs_inode *ip, struct fsxattr *fa) xfs_ioctl_setattr_check_projid() argument
1179 xfs_ioctl_setattr( xfs_inode_t *ip, struct fsxattr *fa) xfs_ioctl_setattr() argument
1287 xfs_ioc_fssetxattr( xfs_inode_t *ip, struct file *filp, void __user *arg) xfs_ioc_fssetxattr() argument
1307 xfs_ioc_getxflags( xfs_inode_t *ip, void __user *arg) xfs_ioc_getxflags() argument
1320 xfs_ioc_setxflags( struct xfs_inode *ip, struct file *filp, void __user *arg) xfs_ioc_setxflags() argument
1376 xfs_ioc_getbmap( struct xfs_inode *ip, int ioflags, unsigned int cmd, void __user *arg) xfs_ioc_getbmap() argument
1419 xfs_ioc_getbmapx( struct xfs_inode *ip, void __user *arg) xfs_ioc_getbmapx() argument
H A Dxfs_inode.c69 struct xfs_inode *ip) xfs_get_extsz_hint()
71 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize) xfs_get_extsz_hint()
72 return ip->i_d.di_extsize; xfs_get_extsz_hint()
73 if (XFS_IS_REALTIME_INODE(ip)) xfs_get_extsz_hint()
74 return ip->i_mount->m_sb.sb_rextsize; xfs_get_extsz_hint()
82 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
95 struct xfs_inode *ip) xfs_ilock_data_map_shared()
99 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE && xfs_ilock_data_map_shared()
100 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0) xfs_ilock_data_map_shared()
102 xfs_ilock(ip, lock_mode); xfs_ilock_data_map_shared()
108 struct xfs_inode *ip) xfs_ilock_attr_map_shared()
112 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE && xfs_ilock_attr_map_shared()
113 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0) xfs_ilock_attr_map_shared()
115 xfs_ilock(ip, lock_mode); xfs_ilock_attr_map_shared()
151 xfs_inode_t *ip, xfs_ilock()
154 trace_xfs_ilock(ip, lock_flags, _RET_IP_); xfs_ilock()
170 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); xfs_ilock()
172 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); xfs_ilock()
175 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); xfs_ilock()
177 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); xfs_ilock()
180 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); xfs_ilock()
182 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); xfs_ilock()
192 * ip -- the inode being locked
199 xfs_inode_t *ip, xfs_ilock_nowait()
202 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); xfs_ilock_nowait()
218 if (!mrtryupdate(&ip->i_iolock)) xfs_ilock_nowait()
221 if (!mrtryaccess(&ip->i_iolock)) xfs_ilock_nowait()
226 if (!mrtryupdate(&ip->i_mmaplock)) xfs_ilock_nowait()
229 if (!mrtryaccess(&ip->i_mmaplock)) xfs_ilock_nowait()
234 if (!mrtryupdate(&ip->i_lock)) xfs_ilock_nowait()
237 if (!mrtryaccess(&ip->i_lock)) xfs_ilock_nowait()
244 mrunlock_excl(&ip->i_mmaplock); xfs_ilock_nowait()
246 mrunlock_shared(&ip->i_mmaplock); xfs_ilock_nowait()
249 mrunlock_excl(&ip->i_iolock); xfs_ilock_nowait()
251 mrunlock_shared(&ip->i_iolock); xfs_ilock_nowait()
262 * ip -- the inode being unlocked
270 xfs_inode_t *ip, xfs_iunlock()
288 mrunlock_excl(&ip->i_iolock); xfs_iunlock()
290 mrunlock_shared(&ip->i_iolock); xfs_iunlock()
293 mrunlock_excl(&ip->i_mmaplock); xfs_iunlock()
295 mrunlock_shared(&ip->i_mmaplock); xfs_iunlock()
298 mrunlock_excl(&ip->i_lock); xfs_iunlock()
300 mrunlock_shared(&ip->i_lock); xfs_iunlock()
302 trace_xfs_iunlock(ip, lock_flags, _RET_IP_); xfs_iunlock()
311 xfs_inode_t *ip, xfs_ilock_demote()
319 mrdemote(&ip->i_lock); xfs_ilock_demote()
321 mrdemote(&ip->i_mmaplock); xfs_ilock_demote()
323 mrdemote(&ip->i_iolock); xfs_ilock_demote()
325 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); xfs_ilock_demote()
331 xfs_inode_t *ip, xfs_isilocked()
336 return !!ip->i_lock.mr_writer; xfs_isilocked()
337 return rwsem_is_locked(&ip->i_lock.mr_lock); xfs_isilocked()
342 return !!ip->i_mmaplock.mr_writer; xfs_isilocked()
343 return rwsem_is_locked(&ip->i_mmaplock.mr_lock); xfs_isilocked()
348 return !!ip->i_iolock.mr_writer; xfs_isilocked()
349 return rwsem_is_locked(&ip->i_iolock.mr_lock); xfs_isilocked()
597 struct xfs_inode *ip) __xfs_iflock()
599 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT); __xfs_iflock()
600 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT); __xfs_iflock()
604 if (xfs_isiflocked(ip)) __xfs_iflock()
606 } while (!xfs_iflock_nowait(ip)); __xfs_iflock()
653 xfs_inode_t *ip) xfs_ip2xflags()
655 xfs_icdinode_t *dic = &ip->i_d; xfs_ip2xflags()
658 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); xfs_ip2xflags()
756 xfs_inode_t *ip; xfs_ialloc() local
781 XFS_ILOCK_EXCL, &ip); xfs_ialloc()
784 ASSERT(ip != NULL); xfs_ialloc()
791 if (ip->i_d.di_version == 1) xfs_ialloc()
792 ip->i_d.di_version = 2; xfs_ialloc()
794 ip->i_d.di_mode = mode; xfs_ialloc()
795 ip->i_d.di_onlink = 0; xfs_ialloc()
796 ip->i_d.di_nlink = nlink; xfs_ialloc()
797 ASSERT(ip->i_d.di_nlink == nlink); xfs_ialloc()
798 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid()); xfs_ialloc()
799 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid()); xfs_ialloc()
800 xfs_set_projid(ip, prid); xfs_ialloc()
801 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); xfs_ialloc()
804 ip->i_d.di_gid = pip->i_d.di_gid; xfs_ialloc()
806 ip->i_d.di_mode |= S_ISGID; xfs_ialloc()
816 (ip->i_d.di_mode & S_ISGID) && xfs_ialloc()
817 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) { xfs_ialloc()
818 ip->i_d.di_mode &= ~S_ISGID; xfs_ialloc()
821 ip->i_d.di_size = 0; xfs_ialloc()
822 ip->i_d.di_nextents = 0; xfs_ialloc()
823 ASSERT(ip->i_d.di_nblocks == 0); xfs_ialloc()
826 ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; xfs_ialloc()
827 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; xfs_ialloc()
828 ip->i_d.di_atime = ip->i_d.di_mtime; xfs_ialloc()
829 ip->i_d.di_ctime = ip->i_d.di_mtime; xfs_ialloc()
834 ip->i_d.di_extsize = 0; xfs_ialloc()
835 ip->i_d.di_dmevmask = 0; xfs_ialloc()
836 ip->i_d.di_dmstate = 0; xfs_ialloc()
837 ip->i_d.di_flags = 0; xfs_ialloc()
839 if (ip->i_d.di_version == 3) { xfs_ialloc()
840 ASSERT(ip->i_d.di_ino == ino); xfs_ialloc()
841 ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid)); xfs_ialloc()
842 ip->i_d.di_crc = 0; xfs_ialloc()
843 ip->i_d.di_changecount = 1; xfs_ialloc()
844 ip->i_d.di_lsn = 0; xfs_ialloc()
845 ip->i_d.di_flags2 = 0; xfs_ialloc()
846 memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2)); xfs_ialloc()
847 ip->i_d.di_crtime = ip->i_d.di_mtime; xfs_ialloc()
857 ip->i_d.di_format = XFS_DINODE_FMT_DEV; xfs_ialloc()
858 ip->i_df.if_u2.if_rdev = rdev; xfs_ialloc()
859 ip->i_df.if_flags = 0; xfs_ialloc()
872 ip->i_d.di_extsize = pip->i_d.di_extsize; xfs_ialloc()
881 ip->i_d.di_extsize = pip->i_d.di_extsize; xfs_ialloc()
901 ip->i_d.di_flags |= di_flags; xfs_ialloc()
905 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; xfs_ialloc()
906 ip->i_df.if_flags = XFS_IFEXTENTS; xfs_ialloc()
907 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0; xfs_ialloc()
908 ip->i_df.if_u1.if_extents = NULL; xfs_ialloc()
916 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; xfs_ialloc()
917 ip->i_d.di_anextents = 0; xfs_ialloc()
922 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_ialloc()
923 xfs_trans_log_inode(tp, ip, flags); xfs_ialloc()
926 xfs_setup_inode(ip); xfs_ialloc()
928 *ipp = ip; xfs_ialloc()
959 xfs_inode_t *ip; xfs_dir_ialloc() local
984 &ialloc_context, &ip); xfs_dir_ialloc()
995 if (!ialloc_context && !ip) { xfs_dir_ialloc()
1056 okalloc, &ialloc_context, &ip); xfs_dir_ialloc()
1067 ASSERT(!ialloc_context && ip); xfs_dir_ialloc()
1074 *ipp = ip; xfs_dir_ialloc()
1088 xfs_inode_t *ip) xfs_droplink()
1092 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); xfs_droplink()
1094 ASSERT (ip->i_d.di_nlink > 0); xfs_droplink()
1095 ip->i_d.di_nlink--; xfs_droplink()
1096 drop_nlink(VFS_I(ip)); xfs_droplink()
1097 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_droplink()
1100 if (ip->i_d.di_nlink == 0) { xfs_droplink()
1107 error = xfs_iunlink(tp, ip); xfs_droplink()
1118 xfs_inode_t *ip) xfs_bumplink()
1120 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); xfs_bumplink()
1122 ASSERT(ip->i_d.di_version > 1); xfs_bumplink()
1123 ASSERT(ip->i_d.di_nlink > 0 || (VFS_I(ip)->i_state & I_LINKABLE)); xfs_bumplink()
1124 ip->i_d.di_nlink++; xfs_bumplink()
1125 inc_nlink(VFS_I(ip)); xfs_bumplink()
1126 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_bumplink()
1140 struct xfs_inode *ip = NULL; xfs_create() local
1229 prid, resblks > 0, &ip, &committed); xfs_create()
1243 error = xfs_dir_createname(tp, dp, name, ip->i_ino, xfs_create()
1254 error = xfs_dir_init(tp, ip, dp); xfs_create()
1276 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); xfs_create()
1290 *ipp = ip; xfs_create()
1303 if (ip) { xfs_create()
1304 xfs_finish_inode_setup(ip); xfs_create()
1305 IRELE(ip); xfs_create()
1325 struct xfs_inode *ip = NULL; xfs_create_tmpfile() local
1369 prid, resblks > 0, &ip, NULL); xfs_create_tmpfile()
1381 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); xfs_create_tmpfile()
1383 ip->i_d.di_nlink--; xfs_create_tmpfile()
1384 error = xfs_iunlink(tp, ip); xfs_create_tmpfile()
1396 *ipp = ip; xfs_create_tmpfile()
1407 if (ip) { xfs_create_tmpfile()
1408 xfs_finish_inode_setup(ip); xfs_create_tmpfile()
1409 IRELE(ip); xfs_create_tmpfile()
1547 struct xfs_inode *ip, xfs_itruncate_extents()
1551 struct xfs_mount *mp = ip->i_mount; xfs_itruncate_extents()
1562 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_itruncate_extents()
1563 ASSERT(!atomic_read(&VFS_I(ip)->i_count) || xfs_itruncate_extents()
1564 xfs_isilocked(ip, XFS_IOLOCK_EXCL)); xfs_itruncate_extents()
1565 ASSERT(new_size <= XFS_ISIZE(ip)); xfs_itruncate_extents()
1567 ASSERT(ip->i_itemp != NULL); xfs_itruncate_extents()
1568 ASSERT(ip->i_itemp->ili_lock_flags == 0); xfs_itruncate_extents()
1569 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); xfs_itruncate_extents()
1571 trace_xfs_itruncate_extents_start(ip, new_size); xfs_itruncate_extents()
1591 error = xfs_bunmapi(tp, ip, xfs_itruncate_extents()
1606 xfs_trans_ijoin(tp, ip, 0); xfs_itruncate_extents()
1610 error = xfs_trans_roll(&tp, ip); xfs_itruncate_extents()
1619 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_itruncate_extents()
1621 trace_xfs_itruncate_extents_end(ip, new_size); xfs_itruncate_extents()
1638 xfs_inode_t *ip) xfs_release()
1640 xfs_mount_t *mp = ip->i_mount; xfs_release()
1643 if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0)) xfs_release()
1663 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); xfs_release()
1665 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); xfs_release()
1666 if (ip->i_delayed_blks > 0) { xfs_release()
1667 error = filemap_flush(VFS_I(ip)->i_mapping); xfs_release()
1674 if (ip->i_d.di_nlink == 0) xfs_release()
1677 if (xfs_can_free_eofblocks(ip, false)) { xfs_release()
1700 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) xfs_release()
1703 error = xfs_free_eofblocks(mp, ip, true); xfs_release()
1708 if (ip->i_delayed_blks) xfs_release()
1709 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); xfs_release()
1721 struct xfs_inode *ip) xfs_inactive_truncate()
1723 struct xfs_mount *mp = ip->i_mount; xfs_inactive_truncate()
1735 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_inactive_truncate()
1736 xfs_trans_ijoin(tp, ip, 0); xfs_inactive_truncate()
1743 ip->i_d.di_size = 0; xfs_inactive_truncate()
1744 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_inactive_truncate()
1746 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); xfs_inactive_truncate()
1750 ASSERT(ip->i_d.di_nextents == 0); xfs_inactive_truncate()
1756 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_truncate()
1762 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_truncate()
1773 struct xfs_inode *ip) xfs_inactive_ifree()
1778 struct xfs_mount *mp = ip->i_mount; xfs_inactive_ifree()
1814 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_inactive_ifree()
1815 xfs_trans_ijoin(tp, ip, 0); xfs_inactive_ifree()
1818 error = xfs_ifree(tp, ip, &free_list); xfs_inactive_ifree()
1831 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_ifree()
1838 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); xfs_inactive_ifree()
1855 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_inactive_ifree()
1869 xfs_inode_t *ip) xfs_inactive()
1879 if (ip->i_d.di_mode == 0) { xfs_inactive()
1880 ASSERT(ip->i_df.if_real_bytes == 0); xfs_inactive()
1881 ASSERT(ip->i_df.if_broot_bytes == 0); xfs_inactive()
1885 mp = ip->i_mount; xfs_inactive()
1891 if (ip->i_d.di_nlink != 0) { xfs_inactive()
1897 if (xfs_can_free_eofblocks(ip, true)) xfs_inactive()
1898 xfs_free_eofblocks(mp, ip, false); xfs_inactive()
1903 if (S_ISREG(ip->i_d.di_mode) && xfs_inactive()
1904 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 || xfs_inactive()
1905 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0)) xfs_inactive()
1908 error = xfs_qm_dqattach(ip, 0); xfs_inactive()
1912 if (S_ISLNK(ip->i_d.di_mode)) xfs_inactive()
1913 error = xfs_inactive_symlink(ip); xfs_inactive()
1915 error = xfs_inactive_truncate(ip); xfs_inactive()
1924 if (XFS_IFORK_Q(ip)) { xfs_inactive()
1925 error = xfs_attr_inactive(ip); xfs_inactive()
1930 ASSERT(!ip->i_afp); xfs_inactive()
1931 ASSERT(ip->i_d.di_anextents == 0); xfs_inactive()
1932 ASSERT(ip->i_d.di_forkoff == 0); xfs_inactive()
1937 error = xfs_inactive_ifree(ip); xfs_inactive()
1944 xfs_qm_dqdetach(ip); xfs_inactive()
1955 xfs_inode_t *ip) xfs_iunlink()
1967 ASSERT(ip->i_d.di_nlink == 0); xfs_iunlink()
1968 ASSERT(ip->i_d.di_mode != 0); xfs_iunlink()
1976 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); xfs_iunlink()
1985 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); xfs_iunlink()
1998 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, xfs_iunlink()
2005 offset = ip->i_imap.im_boffset + xfs_iunlink()
2036 xfs_inode_t *ip) xfs_iunlink_remove()
2054 agno = XFS_INO_TO_AGNO(mp, ip->i_ino); xfs_iunlink_remove()
2070 agino = XFS_INO_TO_AGINO(mp, ip->i_ino); xfs_iunlink_remove()
2084 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, xfs_iunlink_remove()
2095 offset = ip->i_imap.im_boffset + xfs_iunlink_remove()
2161 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, xfs_iunlink_remove()
2173 offset = ip->i_imap.im_boffset + xfs_iunlink_remove()
2223 xfs_inode_t *ip; xfs_ifree_cluster() local
2311 ip = radix_tree_lookup(&pag->pag_ici_root, xfs_ifree_cluster()
2315 if (!ip) { xfs_ifree_cluster()
2327 spin_lock(&ip->i_flags_lock); xfs_ifree_cluster()
2328 if (ip->i_ino != inum + i || xfs_ifree_cluster()
2329 __xfs_iflags_test(ip, XFS_ISTALE)) { xfs_ifree_cluster()
2330 spin_unlock(&ip->i_flags_lock); xfs_ifree_cluster()
2334 spin_unlock(&ip->i_flags_lock); xfs_ifree_cluster()
2343 if (ip != free_ip && xfs_ifree_cluster()
2344 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { xfs_ifree_cluster()
2351 xfs_iflock(ip); xfs_ifree_cluster()
2352 xfs_iflags_set(ip, XFS_ISTALE); xfs_ifree_cluster()
2358 iip = ip->i_itemp; xfs_ifree_cluster()
2359 if (!iip || xfs_inode_clean(ip)) { xfs_ifree_cluster()
2360 ASSERT(ip != free_ip); xfs_ifree_cluster()
2361 xfs_ifunlock(ip); xfs_ifree_cluster()
2362 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_ifree_cluster()
2376 if (ip != free_ip) xfs_ifree_cluster()
2377 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_ifree_cluster()
2401 xfs_inode_t *ip, xfs_ifree()
2407 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_ifree()
2408 ASSERT(ip->i_d.di_nlink == 0); xfs_ifree()
2409 ASSERT(ip->i_d.di_nextents == 0); xfs_ifree()
2410 ASSERT(ip->i_d.di_anextents == 0); xfs_ifree()
2411 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode)); xfs_ifree()
2412 ASSERT(ip->i_d.di_nblocks == 0); xfs_ifree()
2417 error = xfs_iunlink_remove(tp, ip); xfs_ifree()
2421 error = xfs_difree(tp, ip->i_ino, flist, &xic); xfs_ifree()
2425 ip->i_d.di_mode = 0; /* mark incore inode as free */ xfs_ifree()
2426 ip->i_d.di_flags = 0; xfs_ifree()
2427 ip->i_d.di_dmevmask = 0; xfs_ifree()
2428 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ xfs_ifree()
2429 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS; xfs_ifree()
2430 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; xfs_ifree()
2435 ip->i_d.di_gen++; xfs_ifree()
2436 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_ifree()
2439 error = xfs_ifree_cluster(ip, tp, &xic); xfs_ifree()
2451 struct xfs_inode *ip) xfs_iunpin()
2453 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); xfs_iunpin()
2455 trace_xfs_inode_unpin_nowait(ip, _RET_IP_); xfs_iunpin()
2458 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0); xfs_iunpin()
2464 struct xfs_inode *ip) __xfs_iunpin_wait()
2466 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); __xfs_iunpin_wait()
2467 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); __xfs_iunpin_wait()
2469 xfs_iunpin(ip); __xfs_iunpin_wait()
2473 if (xfs_ipincount(ip)) __xfs_iunpin_wait()
2475 } while (xfs_ipincount(ip)); __xfs_iunpin_wait()
2481 struct xfs_inode *ip) xfs_iunpin_wait()
2483 if (xfs_ipincount(ip)) xfs_iunpin_wait()
2484 __xfs_iunpin_wait(ip); xfs_iunpin_wait()
2518 xfs_inode_t *ip) xfs_remove()
2522 int is_dir = S_ISDIR(ip->i_d.di_mode); xfs_remove()
2538 error = xfs_qm_dqattach(ip, 0); xfs_remove()
2568 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); xfs_remove()
2571 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_remove()
2577 ASSERT(ip->i_d.di_nlink >= 2); xfs_remove()
2578 if (ip->i_d.di_nlink != 2) { xfs_remove()
2582 if (!xfs_dir_isempty(ip)) { xfs_remove()
2587 /* Drop the link from ip's "..". */ xfs_remove()
2592 /* Drop the "." link from ip to self. */ xfs_remove()
2593 error = xfs_droplink(tp, ip); xfs_remove()
2606 /* Drop the link from dp to ip. */ xfs_remove()
2607 error = xfs_droplink(tp, ip); xfs_remove()
2612 error = xfs_dir_removename(tp, dp, name, ip->i_ino, xfs_remove()
2635 if (is_dir && xfs_inode_is_filestream(ip)) xfs_remove()
2636 xfs_filestream_deassociate(ip); xfs_remove()
3180 xfs_inode_t *ip, xfs_iflush_cluster()
3183 xfs_mount_t *mp = ip->i_mount; xfs_iflush_cluster()
3195 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); xfs_iflush_cluster()
3204 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; xfs_iflush_cluster()
3214 if (iq == ip) xfs_iflush_cluster()
3342 struct xfs_inode *ip, xfs_iflush()
3345 struct xfs_mount *mp = ip->i_mount; xfs_iflush()
3352 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); xfs_iflush()
3353 ASSERT(xfs_isiflocked(ip)); xfs_iflush()
3354 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || xfs_iflush()
3355 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); xfs_iflush()
3359 xfs_iunpin_wait(ip); xfs_iflush()
3369 if (xfs_iflags_test(ip, XFS_ISTALE)) { xfs_iflush()
3370 xfs_ifunlock(ip); xfs_iflush()
3396 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK, xfs_iflush()
3399 xfs_ifunlock(ip); xfs_iflush()
3408 error = xfs_iflush_int(ip, bp); xfs_iflush()
3423 error = xfs_iflush_cluster(ip, bp); xfs_iflush()
3440 xfs_iflush_abort(ip, false); xfs_iflush()
3446 struct xfs_inode *ip, xfs_iflush_int()
3449 struct xfs_inode_log_item *iip = ip->i_itemp; xfs_iflush_int()
3451 struct xfs_mount *mp = ip->i_mount; xfs_iflush_int()
3453 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); xfs_iflush_int()
3454 ASSERT(xfs_isiflocked(ip)); xfs_iflush_int()
3455 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || xfs_iflush_int()
3456 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); xfs_iflush_int()
3458 ASSERT(ip->i_d.di_version > 1); xfs_iflush_int()
3461 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); xfs_iflush_int()
3467 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); xfs_iflush_int()
3470 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, xfs_iflush_int()
3474 __func__, ip->i_ino, ip, ip->i_d.di_magic); xfs_iflush_int()
3477 if (S_ISREG(ip->i_d.di_mode)) { xfs_iflush_int()
3479 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && xfs_iflush_int()
3480 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), xfs_iflush_int()
3484 __func__, ip->i_ino, ip); xfs_iflush_int()
3487 } else if (S_ISDIR(ip->i_d.di_mode)) { xfs_iflush_int()
3489 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && xfs_iflush_int()
3490 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && xfs_iflush_int()
3491 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), xfs_iflush_int()
3495 __func__, ip->i_ino, ip); xfs_iflush_int()
3499 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > xfs_iflush_int()
3500 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, xfs_iflush_int()
3505 __func__, ip->i_ino, xfs_iflush_int()
3506 ip->i_d.di_nextents + ip->i_d.di_anextents, xfs_iflush_int()
3507 ip->i_d.di_nblocks, ip); xfs_iflush_int()
3510 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, xfs_iflush_int()
3514 __func__, ip->i_ino, ip->i_d.di_forkoff, ip); xfs_iflush_int()
3527 if (ip->i_d.di_version < 3) xfs_iflush_int()
3528 ip->i_d.di_flushiter++; xfs_iflush_int()
3536 xfs_dinode_to_disk(dip, &ip->i_d); xfs_iflush_int()
3539 if (ip->i_d.di_flushiter == DI_MAX_FLUSH) xfs_iflush_int()
3540 ip->i_d.di_flushiter = 0; xfs_iflush_int()
3542 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); xfs_iflush_int()
3543 if (XFS_IFORK_Q(ip)) xfs_iflush_int()
3544 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); xfs_iflush_int()
3589 if (ip->i_d.di_version == 3) xfs_iflush_int()
68 xfs_get_extsz_hint( struct xfs_inode *ip) xfs_get_extsz_hint() argument
94 xfs_ilock_data_map_shared( struct xfs_inode *ip) xfs_ilock_data_map_shared() argument
107 xfs_ilock_attr_map_shared( struct xfs_inode *ip) xfs_ilock_attr_map_shared() argument
150 xfs_ilock( xfs_inode_t *ip, uint lock_flags) xfs_ilock() argument
198 xfs_ilock_nowait( xfs_inode_t *ip, uint lock_flags) xfs_ilock_nowait() argument
269 xfs_iunlock( xfs_inode_t *ip, uint lock_flags) xfs_iunlock() argument
310 xfs_ilock_demote( xfs_inode_t *ip, uint lock_flags) xfs_ilock_demote() argument
330 xfs_isilocked( xfs_inode_t *ip, uint lock_flags) xfs_isilocked() argument
596 __xfs_iflock( struct xfs_inode *ip) __xfs_iflock() argument
652 xfs_ip2xflags( xfs_inode_t *ip) xfs_ip2xflags() argument
1086 xfs_droplink( xfs_trans_t *tp, xfs_inode_t *ip) xfs_droplink() argument
1116 xfs_bumplink( xfs_trans_t *tp, xfs_inode_t *ip) xfs_bumplink() argument
1545 xfs_itruncate_extents( struct xfs_trans **tpp, struct xfs_inode *ip, int whichfork, xfs_fsize_t new_size) xfs_itruncate_extents() argument
1637 xfs_release( xfs_inode_t *ip) xfs_release() argument
1720 xfs_inactive_truncate( struct xfs_inode *ip) xfs_inactive_truncate() argument
1772 xfs_inactive_ifree( struct xfs_inode *ip) xfs_inactive_ifree() argument
1868 xfs_inactive( xfs_inode_t *ip) xfs_inactive() argument
1953 xfs_iunlink( xfs_trans_t *tp, xfs_inode_t *ip) xfs_iunlink() argument
2034 xfs_iunlink_remove( xfs_trans_t *tp, xfs_inode_t *ip) xfs_iunlink_remove() argument
2399 xfs_ifree( xfs_trans_t *tp, xfs_inode_t *ip, xfs_bmap_free_t *flist) xfs_ifree() argument
2450 xfs_iunpin( struct xfs_inode *ip) xfs_iunpin() argument
2463 __xfs_iunpin_wait( struct xfs_inode *ip) __xfs_iunpin_wait() argument
2480 xfs_iunpin_wait( struct xfs_inode *ip) xfs_iunpin_wait() argument
2515 xfs_remove( xfs_inode_t *dp, struct xfs_name *name, xfs_inode_t *ip) xfs_remove() argument
3179 xfs_iflush_cluster( xfs_inode_t *ip, xfs_buf_t *bp) xfs_iflush_cluster() argument
3341 xfs_iflush( struct xfs_inode *ip, struct xfs_buf **bpp) xfs_iflush() argument
3445 xfs_iflush_int( struct xfs_inode *ip, struct xfs_buf *bp) xfs_iflush_int() argument
H A Dxfs_iomap.c48 xfs_inode_t *ip, xfs_iomap_eof_align_last_fsb()
55 if (!XFS_IS_REALTIME_INODE(ip)) { xfs_iomap_eof_align_last_fsb()
69 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) xfs_iomap_eof_align_last_fsb()
86 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); xfs_iomap_eof_align_last_fsb()
97 xfs_inode_t *ip, xfs_alert_fsblock_zero()
100 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, xfs_alert_fsblock_zero()
104 (unsigned long long)ip->i_ino, xfs_alert_fsblock_zero()
114 xfs_inode_t *ip, xfs_iomap_write_direct()
120 xfs_mount_t *mp = ip->i_mount; xfs_iomap_write_direct()
137 rt = XFS_IS_REALTIME_INODE(ip); xfs_iomap_write_direct()
138 extsz = xfs_get_extsz_hint(ip); xfs_iomap_write_direct()
141 ASSERT(xfs_isilocked(ip, lockmode)); xfs_iomap_write_direct()
145 if ((offset + count) > XFS_ISIZE(ip)) { xfs_iomap_write_direct()
153 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags & xfs_iomap_write_direct()
155 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); xfs_iomap_write_direct()
190 xfs_iunlock(ip, lockmode); xfs_iomap_write_direct()
191 error = xfs_qm_dqattach(ip, 0); xfs_iomap_write_direct()
212 if (IS_DAX(VFS_I(ip))) { xfs_iomap_write_direct()
227 xfs_ilock(ip, lockmode); xfs_iomap_write_direct()
229 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag); xfs_iomap_write_direct()
233 xfs_trans_ijoin(tp, ip, 0); xfs_iomap_write_direct()
241 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, xfs_iomap_write_direct()
266 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) xfs_iomap_write_direct()
267 error = xfs_alert_fsblock_zero(ip, imap); xfs_iomap_write_direct()
270 xfs_iunlock(ip, lockmode); xfs_iomap_write_direct()
275 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag); xfs_iomap_write_direct()
292 xfs_inode_t *ip, xfs_iomap_eof_want_preallocate()
305 if (offset + count <= XFS_ISIZE(ip)) xfs_iomap_eof_want_preallocate()
314 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)) xfs_iomap_eof_want_preallocate()
325 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps, xfs_iomap_eof_want_preallocate()
363 struct xfs_inode *ip, xfs_iomap_eof_prealloc_initial_size()
379 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign)) xfs_iomap_eof_prealloc_initial_size()
393 error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE); xfs_iomap_eof_prealloc_initial_size()
407 struct xfs_inode *ip, xfs_quota_need_throttle()
411 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); xfs_quota_need_throttle()
413 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) xfs_quota_need_throttle()
429 struct xfs_inode *ip, xfs_quota_calc_throttle()
437 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); xfs_quota_calc_throttle()
474 struct xfs_inode *ip, xfs_iomap_prealloc_size()
485 alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset, xfs_iomap_prealloc_size()
518 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) xfs_iomap_prealloc_size()
519 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, xfs_iomap_prealloc_size()
521 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) xfs_iomap_prealloc_size()
522 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, xfs_iomap_prealloc_size()
524 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) xfs_iomap_prealloc_size()
525 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, xfs_iomap_prealloc_size()
562 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, xfs_iomap_prealloc_size()
570 xfs_inode_t *ip, xfs_iomap_write_delay()
575 xfs_mount_t *mp = ip->i_mount; xfs_iomap_write_delay()
586 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_iomap_write_delay()
592 error = xfs_qm_dqattach_locked(ip, 0); xfs_iomap_write_delay()
596 extsz = xfs_get_extsz_hint(ip); xfs_iomap_write_delay()
599 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, xfs_iomap_write_delay()
608 alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap, xfs_iomap_write_delay()
619 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); xfs_iomap_write_delay()
634 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb, xfs_iomap_write_delay()
650 trace_xfs_delalloc_enospc(ip, offset, count); xfs_iomap_write_delay()
659 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) xfs_iomap_write_delay()
660 return xfs_alert_fsblock_zero(ip, &imap[0]); xfs_iomap_write_delay()
667 xfs_inode_set_eofblocks_tag(ip); xfs_iomap_write_delay()
685 xfs_inode_t *ip, xfs_iomap_write_allocate()
689 xfs_mount_t *mp = ip->i_mount; xfs_iomap_write_allocate()
703 error = xfs_qm_dqattach(ip, 0); xfs_iomap_write_allocate()
734 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_iomap_write_allocate()
735 xfs_trans_ijoin(tp, ip, 0); xfs_iomap_write_allocate()
771 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); xfs_iomap_write_allocate()
772 error = xfs_bmap_last_offset(ip, &last_block, xfs_iomap_write_allocate()
790 error = xfs_bmapi_write(tp, ip, map_start_fsb, xfs_iomap_write_allocate()
805 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iomap_write_allocate()
812 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) xfs_iomap_write_allocate()
813 return xfs_alert_fsblock_zero(ip, imap); xfs_iomap_write_allocate()
834 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iomap_write_allocate()
840 xfs_inode_t *ip, xfs_iomap_write_unwritten()
844 xfs_mount_t *mp = ip->i_mount; xfs_iomap_write_unwritten()
858 trace_xfs_unwritten_convert(ip, offset, count); xfs_iomap_write_unwritten()
898 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_iomap_write_unwritten()
899 xfs_trans_ijoin(tp, ip, 0); xfs_iomap_write_unwritten()
906 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, xfs_iomap_write_unwritten()
921 i_size = xfs_new_eof(ip, i_size); xfs_iomap_write_unwritten()
923 ip->i_d.di_size = i_size; xfs_iomap_write_unwritten()
924 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_iomap_write_unwritten()
932 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iomap_write_unwritten()
936 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) xfs_iomap_write_unwritten()
937 return xfs_alert_fsblock_zero(ip, &imap); xfs_iomap_write_unwritten()
956 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iomap_write_unwritten()
46 xfs_iomap_eof_align_last_fsb( xfs_mount_t *mp, xfs_inode_t *ip, xfs_extlen_t extsize, xfs_fileoff_t *last_fsb) xfs_iomap_eof_align_last_fsb() argument
96 xfs_alert_fsblock_zero( xfs_inode_t *ip, xfs_bmbt_irec_t *imap) xfs_alert_fsblock_zero() argument
113 xfs_iomap_write_direct( xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *imap, int nmaps) xfs_iomap_write_direct() argument
290 xfs_iomap_eof_want_preallocate( xfs_mount_t *mp, xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *imap, int nimaps, int *prealloc) xfs_iomap_eof_want_preallocate() argument
361 xfs_iomap_eof_prealloc_initial_size( struct xfs_mount *mp, struct xfs_inode *ip, xfs_off_t offset, xfs_bmbt_irec_t *imap, int nimaps) xfs_iomap_eof_prealloc_initial_size() argument
406 xfs_quota_need_throttle( struct xfs_inode *ip, int type, xfs_fsblock_t alloc_blocks) xfs_quota_need_throttle() argument
428 xfs_quota_calc_throttle( struct xfs_inode *ip, int type, xfs_fsblock_t *qblocks, int *qshift, int64_t *qfreesp) xfs_quota_calc_throttle() argument
472 xfs_iomap_prealloc_size( struct xfs_mount *mp, struct xfs_inode *ip, xfs_off_t offset, struct xfs_bmbt_irec *imap, int nimaps) xfs_iomap_prealloc_size() argument
569 xfs_iomap_write_delay( xfs_inode_t *ip, xfs_off_t offset, size_t count, xfs_bmbt_irec_t *ret_imap) xfs_iomap_write_delay() argument
684 xfs_iomap_write_allocate( xfs_inode_t *ip, xfs_off_t offset, xfs_bmbt_irec_t *imap) xfs_iomap_write_allocate() argument
839 xfs_iomap_write_unwritten( xfs_inode_t *ip, xfs_off_t offset, xfs_off_t count) xfs_iomap_write_unwritten() argument
H A Dxfs_file.c50 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
54 struct xfs_inode *ip, xfs_rw_ilock()
58 mutex_lock(&VFS_I(ip)->i_mutex); xfs_rw_ilock()
59 xfs_ilock(ip, type); xfs_rw_ilock()
64 struct xfs_inode *ip, xfs_rw_iunlock()
67 xfs_iunlock(ip, type); xfs_rw_iunlock()
69 mutex_unlock(&VFS_I(ip)->i_mutex); xfs_rw_iunlock()
74 struct xfs_inode *ip, xfs_rw_ilock_demote()
77 xfs_ilock_demote(ip, type); xfs_rw_ilock_demote()
79 mutex_unlock(&VFS_I(ip)->i_mutex); xfs_rw_ilock_demote()
95 struct xfs_inode *ip, /* inode */ xfs_iozero()
104 mapping = VFS_I(ip)->i_mapping; xfs_iozero()
114 if (IS_DAX(VFS_I(ip))) { xfs_iozero()
115 status = dax_zero_page_range(VFS_I(ip), pos, bytes, xfs_iozero()
142 struct xfs_inode *ip, xfs_update_prealloc_flags()
148 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID); xfs_update_prealloc_flags()
149 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0); xfs_update_prealloc_flags()
155 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_update_prealloc_flags()
156 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_update_prealloc_flags()
159 ip->i_d.di_mode &= ~S_ISUID; xfs_update_prealloc_flags()
160 if (ip->i_d.di_mode & S_IXGRP) xfs_update_prealloc_flags()
161 ip->i_d.di_mode &= ~S_ISGID; xfs_update_prealloc_flags()
162 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_update_prealloc_flags()
166 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; xfs_update_prealloc_flags()
168 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC; xfs_update_prealloc_flags()
170 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_update_prealloc_flags()
189 struct xfs_inode *ip = XFS_I(file->f_mapping->host); xfs_dir_fsync() local
190 struct xfs_mount *mp = ip->i_mount; xfs_dir_fsync()
193 trace_xfs_dir_fsync(ip); xfs_dir_fsync()
195 xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_dir_fsync()
196 if (xfs_ipincount(ip)) xfs_dir_fsync()
197 lsn = ip->i_itemp->ili_last_lsn; xfs_dir_fsync()
198 xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_dir_fsync()
213 struct xfs_inode *ip = XFS_I(inode); xfs_file_fsync() local
214 struct xfs_mount *mp = ip->i_mount; xfs_file_fsync()
219 trace_xfs_file_fsync(ip); xfs_file_fsync()
228 xfs_iflags_clear(ip, XFS_ITRUNCATED); xfs_file_fsync()
238 if (XFS_IS_REALTIME_INODE(ip)) xfs_file_fsync()
257 xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_file_fsync()
258 if (xfs_ipincount(ip)) { xfs_file_fsync()
260 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) xfs_file_fsync()
261 lsn = ip->i_itemp->ili_last_lsn; xfs_file_fsync()
266 ip->i_itemp->ili_fsync_fields = 0; xfs_file_fsync()
268 xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_file_fsync()
279 !XFS_IS_REALTIME_INODE(ip) && xfs_file_fsync()
293 struct xfs_inode *ip = XFS_I(inode); xfs_file_read_iter() local
294 struct xfs_mount *mp = ip->i_mount; xfs_file_read_iter()
310 XFS_IS_REALTIME_INODE(ip) ? xfs_file_read_iter()
340 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); xfs_file_read_iter()
342 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); xfs_file_read_iter()
343 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); xfs_file_read_iter()
357 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); xfs_file_read_iter()
359 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); xfs_file_read_iter()
368 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); xfs_file_read_iter()
372 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); xfs_file_read_iter()
375 trace_xfs_file_read(ip, size, pos, ioflags); xfs_file_read_iter()
381 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); xfs_file_read_iter()
393 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); xfs_file_splice_read() local
397 XFS_STATS_INC(ip->i_mount, xs_read_calls); xfs_file_splice_read()
402 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) xfs_file_splice_read()
405 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); xfs_file_splice_read()
407 trace_xfs_file_splice_read(ip, count, *ppos, ioflags); xfs_file_splice_read()
410 if (IS_DAX(VFS_I(ip))) xfs_file_splice_read()
415 XFS_STATS_ADD(ip->i_mount, xs_read_bytes, ret); xfs_file_splice_read()
417 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); xfs_file_splice_read()
429 struct xfs_inode *ip, xfs_zero_last_block()
434 struct xfs_mount *mp = ip->i_mount; xfs_zero_last_block()
442 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_zero_last_block()
443 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); xfs_zero_last_block()
444 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_zero_last_block()
461 return xfs_iozero(ip, isize, zero_len); xfs_zero_last_block()
477 struct xfs_inode *ip, xfs_zero_eof()
482 struct xfs_mount *mp = ip->i_mount; xfs_zero_eof()
493 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); xfs_zero_eof()
496 trace_xfs_zero_eof(ip, isize, offset - isize); xfs_zero_eof()
504 error = xfs_zero_last_block(ip, offset, isize, did_zeroing); xfs_zero_eof()
535 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_zero_eof()
536 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, xfs_zero_eof()
538 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_zero_eof()
560 error = xfs_iozero(ip, zero_off, zero_len); xfs_zero_eof()
587 struct xfs_inode *ip = XFS_I(inode); xfs_file_aio_write_checks() local
603 xfs_rw_iunlock(ip, *iolock); xfs_file_aio_write_checks()
605 xfs_rw_ilock(ip, *iolock); xfs_file_aio_write_checks()
623 spin_lock(&ip->i_flags_lock); xfs_file_aio_write_checks()
627 spin_unlock(&ip->i_flags_lock); xfs_file_aio_write_checks()
630 xfs_rw_iunlock(ip, *iolock); xfs_file_aio_write_checks()
632 xfs_rw_ilock(ip, *iolock); xfs_file_aio_write_checks()
647 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); xfs_file_aio_write_checks()
651 spin_unlock(&ip->i_flags_lock); xfs_file_aio_write_checks()
708 struct xfs_inode *ip = XFS_I(inode); xfs_file_dio_aio_write() local
709 struct xfs_mount *mp = ip->i_mount; xfs_file_dio_aio_write()
717 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? xfs_file_dio_aio_write()
739 xfs_rw_ilock(ip, iolock); xfs_file_dio_aio_write()
747 xfs_rw_iunlock(ip, iolock); xfs_file_dio_aio_write()
749 xfs_rw_ilock(ip, iolock); xfs_file_dio_aio_write()
763 ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); xfs_file_dio_aio_write()
771 ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); xfs_file_dio_aio_write()
783 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); xfs_file_dio_aio_write()
787 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); xfs_file_dio_aio_write()
805 xfs_rw_iunlock(ip, iolock); xfs_file_dio_aio_write()
811 ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip))); xfs_file_dio_aio_write()
823 struct xfs_inode *ip = XFS_I(inode); xfs_file_buffered_aio_write() local
828 xfs_rw_ilock(ip, iolock); xfs_file_buffered_aio_write()
838 trace_xfs_file_buffered_write(ip, iov_iter_count(from), xfs_file_buffered_aio_write()
854 enospc = xfs_inode_free_quota_eofblocks(ip); xfs_file_buffered_aio_write()
861 xfs_flush_inodes(ip->i_mount); xfs_file_buffered_aio_write()
862 eofb.eof_scan_owner = ip->i_ino; /* for locking */ xfs_file_buffered_aio_write()
864 xfs_icache_free_eofblocks(ip->i_mount, &eofb); xfs_file_buffered_aio_write()
870 xfs_rw_iunlock(ip, iolock); xfs_file_buffered_aio_write()
882 struct xfs_inode *ip = XFS_I(inode); xfs_file_write_iter() local
886 XFS_STATS_INC(ip->i_mount, xs_write_calls); xfs_file_write_iter()
891 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) xfs_file_write_iter()
902 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); xfs_file_write_iter()
925 struct xfs_inode *ip = XFS_I(inode); xfs_file_fallocate() local
937 xfs_ilock(ip, iolock); xfs_file_fallocate()
942 xfs_ilock(ip, XFS_MMAPLOCK_EXCL); xfs_file_fallocate()
946 error = xfs_free_file_space(ip, offset, len); xfs_file_fallocate()
968 error = xfs_collapse_file_space(ip, offset, len); xfs_file_fallocate()
1004 error = xfs_zero_file_space(ip, offset, len); xfs_file_fallocate()
1006 error = xfs_alloc_file_space(ip, offset, len, xfs_file_fallocate()
1015 error = xfs_update_prealloc_flags(ip, flags); xfs_file_fallocate()
1025 error = xfs_setattr_size(ip, &iattr); xfs_file_fallocate()
1037 error = xfs_insert_file_space(ip, offset, len); xfs_file_fallocate()
1040 xfs_iunlock(ip, iolock); xfs_file_fallocate()
1062 struct xfs_inode *ip = XFS_I(inode); xfs_dir_open() local
1074 mode = xfs_ilock_data_map_shared(ip); xfs_dir_open()
1075 if (ip->i_d.di_nextents > 0) xfs_dir_open()
1076 xfs_dir3_data_readahead(ip, 0, -1); xfs_dir_open()
1077 xfs_iunlock(ip, mode); xfs_dir_open()
1095 xfs_inode_t *ip = XFS_I(inode); xfs_file_readdir() local
1110 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); xfs_file_readdir()
1112 return xfs_readdir(ip, ctx, bufsize); xfs_file_readdir()
1191 struct xfs_inode *ip = XFS_I(inode); xfs_find_get_desired_pgoff() local
1192 struct xfs_mount *mp = ip->i_mount; xfs_find_get_desired_pgoff()
1340 struct xfs_inode *ip = XFS_I(inode); xfs_seek_hole_data() local
1341 struct xfs_mount *mp = ip->i_mount; xfs_seek_hole_data()
1352 lock = xfs_ilock_data_map_shared(ip); xfs_seek_hole_data()
1372 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, xfs_seek_hole_data()
1464 xfs_iunlock(ip, lock); xfs_seek_hole_data()
1579 struct xfs_inode *ip = XFS_I(inode); xfs_filemap_pmd_fault() local
1585 trace_xfs_filemap_pmd_fault(ip); xfs_filemap_pmd_fault()
1617 struct xfs_inode *ip = XFS_I(inode); xfs_filemap_pfn_mkwrite() local
1621 trace_xfs_filemap_pfn_mkwrite(ip); xfs_filemap_pfn_mkwrite()
1627 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); xfs_filemap_pfn_mkwrite()
1631 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); xfs_filemap_pfn_mkwrite()
53 xfs_rw_ilock( struct xfs_inode *ip, int type) xfs_rw_ilock() argument
63 xfs_rw_iunlock( struct xfs_inode *ip, int type) xfs_rw_iunlock() argument
73 xfs_rw_ilock_demote( struct xfs_inode *ip, int type) xfs_rw_ilock_demote() argument
94 xfs_iozero( struct xfs_inode *ip, loff_t pos, size_t count) xfs_iozero() argument
141 xfs_update_prealloc_flags( struct xfs_inode *ip, enum xfs_prealloc_flags flags) xfs_update_prealloc_flags() argument
428 xfs_zero_last_block( struct xfs_inode *ip, xfs_fsize_t offset, xfs_fsize_t isize, bool *did_zeroing) xfs_zero_last_block() argument
476 xfs_zero_eof( struct xfs_inode *ip, xfs_off_t offset, xfs_fsize_t isize, bool *did_zeroing) xfs_zero_eof() argument
H A Dxfs_filestream.h27 void xfs_filestream_deassociate(struct xfs_inode *ip);
28 xfs_agnumber_t xfs_filestream_lookup_ag(struct xfs_inode *ip);
34 struct xfs_inode *ip) xfs_inode_is_filestream()
36 return (ip->i_mount->m_flags & XFS_MOUNT_FILESTREAMS) || xfs_inode_is_filestream()
37 (ip->i_d.di_flags & XFS_DIFLAG_FILESTREAM); xfs_inode_is_filestream()
33 xfs_inode_is_filestream( struct xfs_inode *ip) xfs_inode_is_filestream() argument
H A Dxfs_quota.h38 #define XFS_NOT_DQATTACHED(mp, ip) \
39 ((XFS_IS_UQUOTA_ON(mp) && (ip)->i_udquot == NULL) || \
40 (XFS_IS_GQUOTA_ON(mp) && (ip)->i_gdquot == NULL) || \
41 (XFS_IS_PQUOTA_ON(mp) && (ip)->i_pdquot == NULL))
105 xfs_qm_vop_dqalloc(struct xfs_inode *ip, xfs_dqid_t uid, xfs_dqid_t gid, xfs_qm_vop_dqalloc() argument
116 #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
120 struct xfs_inode *ip, long nblks, long ninos, uint flags) xfs_trans_reserve_quota_nblks()
131 #define xfs_qm_vop_create_dqattach(tp, ip, u, g, p)
133 #define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
134 #define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl) (0)
135 #define xfs_qm_dqattach(ip, fl) (0)
136 #define xfs_qm_dqattach_locked(ip, fl) (0)
137 #define xfs_qm_dqdetach(ip)
139 #define xfs_qm_statvfs(ip, s)
146 #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
147 xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags)
119 xfs_trans_reserve_quota_nblks(struct xfs_trans *tp, struct xfs_inode *ip, long nblks, long ninos, uint flags) xfs_trans_reserve_quota_nblks() argument
H A Dxfs_icache.h56 void xfs_inode_free(struct xfs_inode *ip);
64 void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
66 void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
67 void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
69 int xfs_inode_free_quota_eofblocks(struct xfs_inode *ip);
73 int (*execute)(struct xfs_inode *ip, int flags, void *args),
76 int (*execute)(struct xfs_inode *ip, int flags, void *args),
H A Dxfs_filestream.c35 struct xfs_inode *ip; member in struct:xfs_fstrm_item
128 xfs_filestream_put_ag(item->ip->i_mount, item->ag); xfs_fstrm_free_func()
130 trace_xfs_filestream_free(item->ip, item->ag); xfs_fstrm_free_func()
141 struct xfs_inode *ip, xfs_filestream_pick_ag()
147 struct xfs_mount *mp = ip->i_mount; xfs_filestream_pick_ag()
154 ASSERT(S_ISDIR(ip->i_d.di_mode)); xfs_filestream_pick_ag()
166 trace_xfs_filestream_scan(ip, ag); xfs_filestream_pick_ag()
249 trace_xfs_filestream_pick(ip, *agp, free, nscan); xfs_filestream_pick_ag()
254 trace_xfs_filestream_pick(ip, *agp, free, nscan); xfs_filestream_pick_ag()
265 item->ip = ip; xfs_filestream_pick_ag()
267 err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru); xfs_filestream_pick_ag()
285 struct xfs_inode *ip) xfs_filestream_get_parent()
287 struct inode *inode = VFS_I(ip), *dir = NULL; xfs_filestream_get_parent()
315 struct xfs_inode *ip) xfs_filestream_lookup_ag()
317 struct xfs_mount *mp = ip->i_mount; xfs_filestream_lookup_ag()
322 ASSERT(S_ISREG(ip->i_d.di_mode)); xfs_filestream_lookup_ag()
324 pip = xfs_filestream_get_parent(ip); xfs_filestream_lookup_ag()
333 trace_xfs_filestream_lookup(ip, ag); xfs_filestream_lookup_ag()
367 struct xfs_inode *ip = ap->ip, *pip; xfs_filestream_new_ag() local
368 struct xfs_mount *mp = ip->i_mount; xfs_filestream_new_ag()
376 pip = xfs_filestream_get_parent(ip); xfs_filestream_new_ag()
407 struct xfs_inode *ip) xfs_filestream_deassociate()
409 xfs_mru_cache_delete(ip->i_mount->m_filestream, ip->i_ino); xfs_filestream_deassociate()
140 xfs_filestream_pick_ag( struct xfs_inode *ip, xfs_agnumber_t startag, xfs_agnumber_t *agp, int flags, xfs_extlen_t minlen) xfs_filestream_pick_ag() argument
284 xfs_filestream_get_parent( struct xfs_inode *ip) xfs_filestream_get_parent() argument
314 xfs_filestream_lookup_ag( struct xfs_inode *ip) xfs_filestream_lookup_ag() argument
406 xfs_filestream_deassociate( struct xfs_inode *ip) xfs_filestream_deassociate() argument
H A Dxfs_trace.h216 TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
218 TP_ARGS(ip, idx, r, state, caller_ip),
231 __entry->dev = VFS_I(ip)->i_sb->s_dev;
232 __entry->ino = ip->i_ino;
255 TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state,
257 TP_ARGS(ip, idx, state, caller_ip),
271 ip->i_afp : &ip->i_df;
275 __entry->dev = VFS_I(ip)->i_sb->s_dev;
276 __entry->ino = ip->i_ino;
300 TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
302 TP_ARGS(ip, idx, state, caller_ip))
543 TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno),
544 TP_ARGS(ip, agno),
552 __entry->dev = VFS_I(ip)->i_sb->s_dev;
553 __entry->ino = ip->i_ino;
555 __entry->streams = xfs_filestream_peek_ag(ip->i_mount, agno);
565 TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno), \
566 TP_ARGS(ip, agno))
572 TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno,
574 TP_ARGS(ip, agno, free, nscan),
584 __entry->dev = VFS_I(ip)->i_sb->s_dev;
585 __entry->ino = ip->i_ino;
587 __entry->streams = xfs_filestream_peek_ag(ip->i_mount, agno);
601 TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
603 TP_ARGS(ip, lock_flags, caller_ip),
611 __entry->dev = VFS_I(ip)->i_sb->s_dev;
612 __entry->ino = ip->i_ino;
625 TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
627 TP_ARGS(ip, lock_flags, caller_ip))
634 TP_PROTO(struct xfs_inode *ip),
635 TP_ARGS(ip),
641 __entry->dev = VFS_I(ip)->i_sb->s_dev;
642 __entry->ino = ip->i_ino;
651 TP_PROTO(struct xfs_inode *ip), \
652 TP_ARGS(ip))
695 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
696 TP_ARGS(ip, caller_ip),
705 __entry->dev = VFS_I(ip)->i_sb->s_dev;
706 __entry->ino = ip->i_ino;
707 __entry->count = atomic_read(&VFS_I(ip)->i_count);
708 __entry->pincount = atomic_read(&ip->i_pincount);
720 TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t blocks, int shift,
722 TP_ARGS(ip, blocks, shift, writeio_blocks),
731 __entry->dev = VFS_I(ip)->i_sb->s_dev;
732 __entry->ino = ip->i_ino;
792 TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
793 TP_ARGS(ip, caller_ip))
1139 TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags),
1140 TP_ARGS(ip, count, offset, flags),
1150 __entry->dev = VFS_I(ip)->i_sb->s_dev;
1151 __entry->ino = ip->i_ino;
1152 __entry->size = ip->i_d.di_size;
1169 TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
1170 TP_ARGS(ip, count, offset, flags))
1226 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
1228 TP_ARGS(ip, offset, count, type, irec),
1241 __entry->dev = VFS_I(ip)->i_sb->s_dev;
1242 __entry->ino = ip->i_ino;
1243 __entry->size = ip->i_d.di_size;
1266 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
1268 TP_ARGS(ip, offset, count, type, irec))
1280 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
1281 TP_ARGS(ip, offset, count),
1291 __entry->dev = VFS_I(ip)->i_sb->s_dev;
1292 __entry->ino = ip->i_ino;
1293 __entry->isize = VFS_I(ip)->i_size;
1294 __entry->disize = ip->i_d.di_size;
1310 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
1311 TP_ARGS(ip, offset, count))
1319 TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
1320 TP_ARGS(ip, new_size),
1328 __entry->dev = VFS_I(ip)->i_sb->s_dev;
1329 __entry->ino = ip->i_ino;
1330 __entry->size = ip->i_d.di_size;
1342 TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
1343 TP_ARGS(ip, new_size))
1348 TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
1349 TP_ARGS(ip, start, finish),
1358 __entry->dev = VFS_I(ip)->i_sb->s_dev;
1359 __entry->ino = ip->i_ino;
1360 __entry->size = ip->i_d.di_size;
1373 TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
1375 TP_ARGS(ip, bno, len, flags, caller_ip),
1386 __entry->dev = VFS_I(ip)->i_sb->s_dev;
1387 __entry->ino = ip->i_ino;
1388 __entry->size = ip->i_d.di_size;
1922 TP_PROTO(struct xfs_inode *ip, int which),
1923 TP_ARGS(ip, which),
1934 __entry->dev = VFS_I(ip)->i_sb->s_dev;
1936 __entry->ino = ip->i_ino;
1937 __entry->format = ip->i_d.di_format;
1938 __entry->nex = ip->i_d.di_nextents;
1939 __entry->broot_size = ip->i_df.if_broot_bytes;
1940 __entry->fork_off = XFS_IFORK_BOFF(ip);
1955 TP_PROTO(struct xfs_inode *ip, int which), \
1956 TP_ARGS(ip, which))
H A Dxfs_qm_syscalls.c226 struct xfs_inode *ip; xfs_qm_scall_trunc_qfile() local
233 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); xfs_qm_scall_trunc_qfile()
237 xfs_ilock(ip, XFS_IOLOCK_EXCL); xfs_qm_scall_trunc_qfile()
243 xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_qm_scall_trunc_qfile()
247 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_scall_trunc_qfile()
248 xfs_trans_ijoin(tp, ip, 0); xfs_qm_scall_trunc_qfile()
250 ip->i_d.di_size = 0; xfs_qm_scall_trunc_qfile()
251 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_qm_scall_trunc_qfile()
253 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); xfs_qm_scall_trunc_qfile()
259 ASSERT(ip->i_d.di_nextents == 0); xfs_qm_scall_trunc_qfile()
261 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_qm_scall_trunc_qfile()
265 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); xfs_qm_scall_trunc_qfile()
267 IRELE(ip); xfs_qm_scall_trunc_qfile()
723 struct xfs_inode *ip, xfs_dqrele_inode()
728 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || xfs_dqrele_inode()
729 ip == ip->i_mount->m_quotainfo->qi_gquotaip || xfs_dqrele_inode()
730 ip == ip->i_mount->m_quotainfo->qi_pquotaip) { xfs_dqrele_inode()
731 ASSERT(ip->i_udquot == NULL); xfs_dqrele_inode()
732 ASSERT(ip->i_gdquot == NULL); xfs_dqrele_inode()
733 ASSERT(ip->i_pdquot == NULL); xfs_dqrele_inode()
737 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_dqrele_inode()
738 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_dqrele_inode()
739 xfs_qm_dqrele(ip->i_udquot); xfs_dqrele_inode()
740 ip->i_udquot = NULL; xfs_dqrele_inode()
742 if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) { xfs_dqrele_inode()
743 xfs_qm_dqrele(ip->i_gdquot); xfs_dqrele_inode()
744 ip->i_gdquot = NULL; xfs_dqrele_inode()
746 if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) { xfs_dqrele_inode()
747 xfs_qm_dqrele(ip->i_pdquot); xfs_dqrele_inode()
748 ip->i_pdquot = NULL; xfs_dqrele_inode()
750 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_dqrele_inode()
722 xfs_dqrele_inode( struct xfs_inode *ip, int flags, void *args) xfs_dqrele_inode() argument
H A Dxfs_symlink.h24 int xfs_readlink(struct xfs_inode *ip, char *link);
25 int xfs_inactive_symlink(struct xfs_inode *ip);
/linux-4.4.14/include/linux/netfilter/ipset/
H A Dpfxlen.h15 return ip_set_netmask_map[pfxlen].ip; ip_set_netmask()
27 return (__force u32) ip_set_hostmask_map[pfxlen].ip; ip_set_hostmask()
45 ip6_netmask(union nf_inet_addr *ip, u8 prefix) ip6_netmask() argument
47 ip->ip6[0] &= ip_set_netmask6(prefix)[0]; ip6_netmask()
48 ip->ip6[1] &= ip_set_netmask6(prefix)[1]; ip6_netmask()
49 ip->ip6[2] &= ip_set_netmask6(prefix)[2]; ip6_netmask()
50 ip->ip6[3] &= ip_set_netmask6(prefix)[3]; ip6_netmask()
/linux-4.4.14/fs/freevxfs/
H A Dvxfs_inode.c216 * @ip: VFS inode
221 * fields in @ip from @vip.
224 vxfs_iinit(struct inode *ip, struct vxfs_inode_info *vip) vxfs_iinit() argument
227 ip->i_mode = vxfs_transmod(vip); vxfs_iinit()
228 i_uid_write(ip, (uid_t)vip->vii_uid); vxfs_iinit()
229 i_gid_write(ip, (gid_t)vip->vii_gid); vxfs_iinit()
231 set_nlink(ip, vip->vii_nlink); vxfs_iinit()
232 ip->i_size = vip->vii_size; vxfs_iinit()
234 ip->i_atime.tv_sec = vip->vii_atime; vxfs_iinit()
235 ip->i_ctime.tv_sec = vip->vii_ctime; vxfs_iinit()
236 ip->i_mtime.tv_sec = vip->vii_mtime; vxfs_iinit()
237 ip->i_atime.tv_nsec = 0; vxfs_iinit()
238 ip->i_ctime.tv_nsec = 0; vxfs_iinit()
239 ip->i_mtime.tv_nsec = 0; vxfs_iinit()
241 ip->i_blocks = vip->vii_blocks; vxfs_iinit()
242 ip->i_generation = vip->vii_gen; vxfs_iinit()
244 ip->i_private = vip; vxfs_iinit()
261 struct inode *ip = NULL; vxfs_get_fake_inode() local
263 if ((ip = new_inode(sbp))) { vxfs_get_fake_inode()
264 ip->i_ino = get_next_ino(); vxfs_get_fake_inode()
265 vxfs_iinit(ip, vip); vxfs_get_fake_inode()
266 ip->i_mapping->a_ops = &vxfs_aops; vxfs_get_fake_inode()
268 return (ip); vxfs_get_fake_inode()
273 * *ip: VFS inode
276 * vxfs_put_fake_inode frees all data associated with @ip.
279 vxfs_put_fake_inode(struct inode *ip) vxfs_put_fake_inode() argument
281 iput(ip); vxfs_put_fake_inode()
298 struct inode *ip; vxfs_iget() local
300 ip = iget_locked(sbp, ino); vxfs_iget()
301 if (!ip) vxfs_iget()
303 if (!(ip->i_state & I_NEW)) vxfs_iget()
304 return ip; vxfs_iget()
308 iget_failed(ip); vxfs_iget()
312 vxfs_iinit(ip, vip); vxfs_iget()
319 if (S_ISREG(ip->i_mode)) { vxfs_iget()
320 ip->i_fop = &generic_ro_fops; vxfs_iget()
321 ip->i_mapping->a_ops = aops; vxfs_iget()
322 } else if (S_ISDIR(ip->i_mode)) { vxfs_iget()
323 ip->i_op = &vxfs_dir_inode_ops; vxfs_iget()
324 ip->i_fop = &vxfs_dir_operations; vxfs_iget()
325 ip->i_mapping->a_ops = aops; vxfs_iget()
326 } else if (S_ISLNK(ip->i_mode)) { vxfs_iget()
328 ip->i_op = &page_symlink_inode_operations; vxfs_iget()
329 ip->i_mapping->a_ops = &vxfs_aops; vxfs_iget()
331 ip->i_op = &simple_symlink_inode_operations; vxfs_iget()
332 ip->i_link = vip->vii_immed.vi_immed; vxfs_iget()
333 nd_terminate_link(ip->i_link, ip->i_size, vxfs_iget()
337 init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); vxfs_iget()
339 unlock_new_inode(ip); vxfs_iget()
340 return ip; vxfs_iget()
351 * @ip: inode to discard.
358 vxfs_evict_inode(struct inode *ip) vxfs_evict_inode() argument
360 truncate_inode_pages_final(&ip->i_data); vxfs_evict_inode()
361 clear_inode(ip); vxfs_evict_inode()
362 call_rcu(&ip->i_rcu, vxfs_i_callback); vxfs_evict_inode()
H A Dvxfs_bmap.c55 * @ip: pointer to the inode we do bmap for
67 vxfs_bmap_ext4(struct inode *ip, long bn) vxfs_bmap_ext4() argument
69 struct super_block *sb = ip->i_sb; vxfs_bmap_ext4()
70 struct vxfs_inode_info *vip = VXFS_INO(ip); vxfs_bmap_ext4()
112 * @ip: pointer to the inode we do bmap for
128 vxfs_bmap_indir(struct inode *ip, long indir, int size, long block) vxfs_bmap_indir() argument
134 for (i = 0; i < size * VXFS_TYPED_PER_BLOCK(ip->i_sb); i++) { vxfs_bmap_indir()
138 bp = sb_bread(ip->i_sb, vxfs_bmap_indir()
139 indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb))); vxfs_bmap_indir()
144 (i % VXFS_TYPED_PER_BLOCK(ip->i_sb)); vxfs_bmap_indir()
154 pblock = vxfs_bmap_indir(ip, typ->vt_block, vxfs_bmap_indir()
191 * @ip: pointer to the inode we do bmap for
201 vxfs_bmap_typed(struct inode *ip, long iblock) vxfs_bmap_typed() argument
203 struct vxfs_inode_info *vip = VXFS_INO(ip); vxfs_bmap_typed()
218 pblock = vxfs_bmap_indir(ip, typ->vt_block, vxfs_bmap_typed()
249 * @ip: pointer to the inode we do bmap for
260 vxfs_bmap1(struct inode *ip, long iblock) vxfs_bmap1() argument
262 struct vxfs_inode_info *vip = VXFS_INO(ip); vxfs_bmap1()
265 return vxfs_bmap_ext4(ip, iblock); vxfs_bmap1()
267 return vxfs_bmap_typed(ip, iblock); vxfs_bmap1()
274 ip->i_ino, vip->vii_orgtype); vxfs_bmap1()
279 ip->i_ino, vip->vii_orgtype); vxfs_bmap1()
H A Dvxfs_lookup.c65 dir_blocks(struct inode *ip) dir_blocks() argument
67 u_long bsize = ip->i_sb->s_blocksize; dir_blocks()
68 return (ip->i_size + bsize - 1) & ~(bsize - 1); dir_blocks()
94 * @ip: directory inode
107 vxfs_find_entry(struct inode *ip, struct dentry *dp, struct page **ppp) vxfs_find_entry() argument
110 u_long bsize = ip->i_sb->s_blocksize; vxfs_find_entry()
114 npages = dir_pages(ip); vxfs_find_entry()
115 nblocks = dir_blocks(ip); vxfs_find_entry()
116 pblocks = VXFS_BLOCK_PER_PAGE(ip->i_sb); vxfs_find_entry()
122 pp = vxfs_get_page(ip->i_mapping, page); vxfs_find_entry()
201 struct inode *ip = NULL; vxfs_lookup() local
209 ip = vxfs_iget(dip->i_sb, ino); vxfs_lookup()
210 if (IS_ERR(ip)) vxfs_lookup()
211 return ERR_CAST(ip); vxfs_lookup()
213 d_add(dp, ip); vxfs_lookup()
233 struct inode *ip = file_inode(fp); vxfs_readdir() local
234 struct super_block *sbp = ip->i_sb; vxfs_readdir()
245 if (!dir_emit(ctx, "..", 2, VXFS_INO(ip)->vii_dotdot, DT_DIR)) vxfs_readdir()
251 if (pos > VXFS_DIRROUND(ip->i_size)) vxfs_readdir()
254 npages = dir_pages(ip); vxfs_readdir()
255 nblocks = dir_blocks(ip); vxfs_readdir()
266 pp = vxfs_get_page(ip->i_mapping, page); vxfs_readdir()
H A Dvxfs_subr.c58 * @ip: inode to read from
62 * vxfs_get_page reads the @n th page of @ip into the pagecache.
91 * @ip: inode
96 * @ip into the buffercache.
102 vxfs_bread(struct inode *ip, int block) vxfs_bread() argument
107 pblock = vxfs_bmap1(ip, block); vxfs_bread()
108 bp = sb_bread(ip->i_sb, pblock); vxfs_bread()
115 * @ip: inode
129 vxfs_getblk(struct inode *ip, sector_t iblock, vxfs_getblk() argument
134 pblock = vxfs_bmap1(ip, iblock); vxfs_getblk()
136 map_bh(bp, ip->i_sb, pblock); vxfs_getblk()
H A Dvxfs.h220 #define VXFS_IS_TYPE(ip,type) (((ip)->vii_mode & VXFS_TYPE_MASK) == (type))
244 #define VXFS_IS_ORG(ip,org) ((ip)->vii_orgtype == (org))
245 #define VXFS_ISNONE(ip) VXFS_IS_ORG((ip), VXFS_ORG_NONE)
246 #define VXFS_ISEXT4(ip) VXFS_IS_ORG((ip), VXFS_ORG_EXT4)
247 #define VXFS_ISIMMED(ip) VXFS_IS_ORG((ip), VXFS_ORG_IMMED)
248 #define VXFS_ISTYPED(ip) VXFS_IS_ORG((ip), VXFS_ORG_TYPED)
254 #define VXFS_INO(ip) \
255 ((struct vxfs_inode_info *)(ip)->i_private)
/linux-4.4.14/drivers/net/ethernet/sgi/
H A Dioc3-eth.c42 #include <linux/ip.h>
58 #include <net/ip.h>
109 static inline void ioc3_stop(struct ioc3_private *ip);
401 static void ioc3_get_eaddr_nic(struct ioc3_private *ip) ioc3_get_eaddr_nic() argument
403 struct ioc3 *ioc3 = ip->regs; ioc3_get_eaddr_nic()
430 priv_netdev(ip)->dev_addr[i - 2] = nic[i]; ioc3_get_eaddr_nic()
438 static void ioc3_get_eaddr(struct ioc3_private *ip) ioc3_get_eaddr() argument
440 ioc3_get_eaddr_nic(ip); ioc3_get_eaddr()
442 printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr); ioc3_get_eaddr()
447 struct ioc3_private *ip = netdev_priv(dev); __ioc3_set_mac_address() local
448 struct ioc3 *ioc3 = ip->regs; __ioc3_set_mac_address()
457 struct ioc3_private *ip = netdev_priv(dev); ioc3_set_mac_address() local
462 spin_lock_irq(&ip->ioc3_lock); ioc3_set_mac_address()
464 spin_unlock_irq(&ip->ioc3_lock); ioc3_set_mac_address()
475 struct ioc3_private *ip = netdev_priv(dev); ioc3_mdio_read() local
476 struct ioc3 *ioc3 = ip->regs; ioc3_mdio_read()
487 struct ioc3_private *ip = netdev_priv(dev); ioc3_mdio_write() local
488 struct ioc3 *ioc3 = ip->regs; ioc3_mdio_write()
496 static int ioc3_mii_init(struct ioc3_private *ip);
500 struct ioc3_private *ip = netdev_priv(dev); ioc3_get_stats() local
501 struct ioc3 *ioc3 = ip->regs; ioc3_get_stats()
525 * malformed packet we'll try to access the packet at ip header + ioc3_tcpudp_checksum()
577 struct ioc3_private *ip = netdev_priv(dev); ioc3_rx() local
579 struct ioc3 *ioc3 = ip->regs; ioc3_rx()
585 rxr = ip->rxr; /* Ring base */ ioc3_rx()
586 rx_entry = ip->rx_ci; /* RX consume index */ ioc3_rx()
587 n_entry = ip->rx_pi; ioc3_rx()
589 skb = ip->rx_skbs[rx_entry]; ioc3_rx()
615 ip->rx_skbs[rx_entry] = NULL; /* Poison */ ioc3_rx()
636 ip->rx_skbs[n_entry] = new_skb; ioc3_rx()
643 skb = ip->rx_skbs[rx_entry]; ioc3_rx()
648 ip->rx_pi = n_entry; ioc3_rx()
649 ip->rx_ci = rx_entry; ioc3_rx()
654 struct ioc3_private *ip = netdev_priv(dev); ioc3_tx() local
656 struct ioc3 *ioc3 = ip->regs; ioc3_tx()
661 spin_lock(&ip->ioc3_lock); ioc3_tx()
665 o_entry = ip->tx_ci; ioc3_tx()
671 skb = ip->tx_skbs[o_entry]; ioc3_tx()
674 ip->tx_skbs[o_entry] = NULL; ioc3_tx()
684 ip->txqlen -= packets; ioc3_tx()
686 if (ip->txqlen < 128) ioc3_tx()
689 ip->tx_ci = o_entry; ioc3_tx()
690 spin_unlock(&ip->ioc3_lock); ioc3_tx()
702 struct ioc3_private *ip = netdev_priv(dev); ioc3_error() local
705 spin_lock(&ip->ioc3_lock); ioc3_error()
720 ioc3_stop(ip); ioc3_error()
722 ioc3_mii_init(ip); ioc3_error()
726 spin_unlock(&ip->ioc3_lock); ioc3_error()
734 struct ioc3_private *ip = netdev_priv(dev); ioc3_interrupt() local
735 struct ioc3 *ioc3 = ip->regs; ioc3_interrupt()
757 static inline void ioc3_setup_duplex(struct ioc3_private *ip) ioc3_setup_duplex() argument
759 struct ioc3 *ioc3 = ip->regs; ioc3_setup_duplex()
761 if (ip->mii.full_duplex) { ioc3_setup_duplex()
763 ip->emcr |= EMCR_DUPLEX; ioc3_setup_duplex()
766 ip->emcr &= ~EMCR_DUPLEX; ioc3_setup_duplex()
768 ioc3_w_emcr(ip->emcr); ioc3_setup_duplex()
773 struct ioc3_private *ip = (struct ioc3_private *) data; ioc3_timer() local
776 mii_check_media(&ip->mii, 1, 0); ioc3_timer()
777 ioc3_setup_duplex(ip); ioc3_timer()
779 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */ ioc3_timer()
780 add_timer(&ip->ioc3_timer); ioc3_timer()
791 static int ioc3_mii_init(struct ioc3_private *ip) ioc3_mii_init() argument
793 struct net_device *dev = priv_netdev(ip); ioc3_mii_init()
811 ip->mii.phy_id = -1; ioc3_mii_init()
817 ip->mii.phy_id = i; ioc3_mii_init()
823 static void ioc3_mii_start(struct ioc3_private *ip) ioc3_mii_start() argument
825 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ ioc3_mii_start()
826 ip->ioc3_timer.data = (unsigned long) ip; ioc3_mii_start()
827 ip->ioc3_timer.function = ioc3_timer; ioc3_mii_start()
828 add_timer(&ip->ioc3_timer); ioc3_mii_start()
831 static inline void ioc3_clean_rx_ring(struct ioc3_private *ip) ioc3_clean_rx_ring() argument
836 for (i = ip->rx_ci; i & 15; i++) { ioc3_clean_rx_ring()
837 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci]; ioc3_clean_rx_ring()
838 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++]; ioc3_clean_rx_ring()
840 ip->rx_pi &= 511; ioc3_clean_rx_ring()
841 ip->rx_ci &= 511; ioc3_clean_rx_ring()
843 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) { ioc3_clean_rx_ring()
845 skb = ip->rx_skbs[i]; ioc3_clean_rx_ring()
851 static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) ioc3_clean_tx_ring() argument
857 skb = ip->tx_skbs[i]; ioc3_clean_tx_ring()
859 ip->tx_skbs[i] = NULL; ioc3_clean_tx_ring()
862 ip->txr[i].cmd = 0; ioc3_clean_tx_ring()
864 ip->tx_pi = 0; ioc3_clean_tx_ring()
865 ip->tx_ci = 0; ioc3_clean_tx_ring()
868 static void ioc3_free_rings(struct ioc3_private *ip) ioc3_free_rings() argument
873 if (ip->txr) { ioc3_free_rings()
874 ioc3_clean_tx_ring(ip); ioc3_free_rings()
875 free_pages((unsigned long)ip->txr, 2); ioc3_free_rings()
876 ip->txr = NULL; ioc3_free_rings()
879 if (ip->rxr) { ioc3_free_rings()
880 n_entry = ip->rx_ci; ioc3_free_rings()
881 rx_entry = ip->rx_pi; ioc3_free_rings()
884 skb = ip->rx_skbs[n_entry]; ioc3_free_rings()
890 free_page((unsigned long)ip->rxr); ioc3_free_rings()
891 ip->rxr = NULL; ioc3_free_rings()
897 struct ioc3_private *ip = netdev_priv(dev); ioc3_alloc_rings() local
902 if (ip->rxr == NULL) { ioc3_alloc_rings()
904 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC); ioc3_alloc_rings()
905 rxr = ip->rxr; ioc3_alloc_rings()
921 ip->rx_skbs[i] = skb; ioc3_alloc_rings()
929 ip->rx_ci = 0; ioc3_alloc_rings()
930 ip->rx_pi = RX_BUFFS; ioc3_alloc_rings()
933 if (ip->txr == NULL) { ioc3_alloc_rings()
935 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2); ioc3_alloc_rings()
936 if (!ip->txr) ioc3_alloc_rings()
938 ip->tx_pi = 0; ioc3_alloc_rings()
939 ip->tx_ci = 0; ioc3_alloc_rings()
945 struct ioc3_private *ip = netdev_priv(dev); ioc3_init_rings() local
946 struct ioc3 *ioc3 = ip->regs; ioc3_init_rings()
949 ioc3_free_rings(ip); ioc3_init_rings()
952 ioc3_clean_rx_ring(ip); ioc3_init_rings()
953 ioc3_clean_tx_ring(ip); ioc3_init_rings()
956 ring = ioc3_map(ip->rxr, 0); ioc3_init_rings()
959 ioc3_w_ercir(ip->rx_ci << 3); ioc3_init_rings()
960 ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM); ioc3_init_rings()
962 ring = ioc3_map(ip->txr, 0); ioc3_init_rings()
964 ip->txqlen = 0; /* nothing queued */ ioc3_init_rings()
969 ioc3_w_etpir(ip->tx_pi << 7); ioc3_init_rings()
970 ioc3_w_etcir(ip->tx_ci << 7); ioc3_init_rings()
974 static inline void ioc3_ssram_disc(struct ioc3_private *ip) ioc3_ssram_disc() argument
976 struct ioc3 *ioc3 = ip->regs; ioc3_ssram_disc()
990 ip->emcr = EMCR_RAMPAR; ioc3_ssram_disc()
993 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR; ioc3_ssram_disc()
998 struct ioc3_private *ip = netdev_priv(dev); ioc3_init() local
999 struct ioc3 *ioc3 = ip->regs; ioc3_init()
1001 del_timer_sync(&ip->ioc3_timer); /* Kill if running */ ioc3_init()
1019 ioc3_w_ehar_h(ip->ehar_h); ioc3_init()
1020 ioc3_w_ehar_l(ip->ehar_l); ioc3_init()
1025 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN | ioc3_init()
1027 ioc3_w_emcr(ip->emcr); ioc3_init()
1034 static inline void ioc3_stop(struct ioc3_private *ip) ioc3_stop() argument
1036 struct ioc3 *ioc3 = ip->regs; ioc3_stop()
1045 struct ioc3_private *ip = netdev_priv(dev); ioc3_open() local
1053 ip->ehar_h = 0; ioc3_open()
1054 ip->ehar_l = 0; ioc3_open()
1056 ioc3_mii_start(ip); ioc3_open()
1064 struct ioc3_private *ip = netdev_priv(dev); ioc3_close() local
1066 del_timer_sync(&ip->ioc3_timer); ioc3_close()
1070 ioc3_stop(ip); ioc3_close()
1073 ioc3_free_rings(ip); ioc3_close()
1235 struct ioc3_private *ip; ioc3_probe() local
1279 ip = netdev_priv(dev); ioc3_probe()
1292 ip->regs = ioc3; ioc3_probe()
1298 spin_lock_init(&ip->ioc3_lock); ioc3_probe()
1299 init_timer(&ip->ioc3_timer); ioc3_probe()
1301 ioc3_stop(ip); ioc3_probe()
1304 ip->pdev = pdev; ioc3_probe()
1306 ip->mii.phy_id_mask = 0x1f; ioc3_probe()
1307 ip->mii.reg_num_mask = 0x1f; ioc3_probe()
1308 ip->mii.dev = dev; ioc3_probe()
1309 ip->mii.mdio_read = ioc3_mdio_read; ioc3_probe()
1310 ip->mii.mdio_write = ioc3_mdio_write; ioc3_probe()
1312 ioc3_mii_init(ip); ioc3_probe()
1314 if (ip->mii.phy_id == -1) { ioc3_probe()
1321 ioc3_mii_start(ip); ioc3_probe()
1322 ioc3_ssram_disc(ip); ioc3_probe()
1323 ioc3_get_eaddr(ip); ioc3_probe()
1332 sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); ioc3_probe()
1333 sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); ioc3_probe()
1339 mii_check_media(&ip->mii, 1, 1); ioc3_probe()
1340 ioc3_setup_duplex(ip); ioc3_probe()
1346 "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev); ioc3_probe()
1348 ip->emcr & EMCR_BUFSIZ ? 128 : 64); ioc3_probe()
1353 ioc3_stop(ip); ioc3_probe()
1354 del_timer_sync(&ip->ioc3_timer); ioc3_probe()
1355 ioc3_free_rings(ip); ioc3_probe()
1372 struct ioc3_private *ip = netdev_priv(dev); ioc3_remove_one() local
1373 struct ioc3 *ioc3 = ip->regs; ioc3_remove_one()
1376 del_timer_sync(&ip->ioc3_timer); ioc3_remove_one()
1403 struct ioc3_private *ip = netdev_priv(dev); ioc3_start_xmit() local
1404 struct ioc3 *ioc3 = ip->regs; ioc3_start_xmit()
1458 spin_lock_irq(&ip->ioc3_lock); ioc3_start_xmit()
1463 produce = ip->tx_pi; ioc3_start_xmit()
1464 desc = &ip->txr[produce]; ioc3_start_xmit()
1496 ip->tx_skbs[produce] = skb; /* Remember skb */ ioc3_start_xmit()
1498 ip->tx_pi = produce; ioc3_start_xmit()
1501 ip->txqlen++; ioc3_start_xmit()
1503 if (ip->txqlen >= 127) ioc3_start_xmit()
1506 spin_unlock_irq(&ip->ioc3_lock); ioc3_start_xmit()
1513 struct ioc3_private *ip = netdev_priv(dev); ioc3_timeout() local
1517 spin_lock_irq(&ip->ioc3_lock); ioc3_timeout()
1519 ioc3_stop(ip); ioc3_timeout()
1521 ioc3_mii_init(ip); ioc3_timeout()
1522 ioc3_mii_start(ip); ioc3_timeout()
1524 spin_unlock_irq(&ip->ioc3_lock); ioc3_timeout()
1555 struct ioc3_private *ip = netdev_priv(dev); ioc3_get_drvinfo() local
1559 strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info)); ioc3_get_drvinfo()
1564 struct ioc3_private *ip = netdev_priv(dev); ioc3_get_settings() local
1567 spin_lock_irq(&ip->ioc3_lock); ioc3_get_settings()
1568 rc = mii_ethtool_gset(&ip->mii, cmd); ioc3_get_settings()
1569 spin_unlock_irq(&ip->ioc3_lock); ioc3_get_settings()
1576 struct ioc3_private *ip = netdev_priv(dev); ioc3_set_settings() local
1579 spin_lock_irq(&ip->ioc3_lock); ioc3_set_settings()
1580 rc = mii_ethtool_sset(&ip->mii, cmd); ioc3_set_settings()
1581 spin_unlock_irq(&ip->ioc3_lock); ioc3_set_settings()
1588 struct ioc3_private *ip = netdev_priv(dev); ioc3_nway_reset() local
1591 spin_lock_irq(&ip->ioc3_lock); ioc3_nway_reset()
1592 rc = mii_nway_restart(&ip->mii); ioc3_nway_reset()
1593 spin_unlock_irq(&ip->ioc3_lock); ioc3_nway_reset()
1600 struct ioc3_private *ip = netdev_priv(dev); ioc3_get_link() local
1603 spin_lock_irq(&ip->ioc3_lock); ioc3_get_link()
1604 rc = mii_link_ok(&ip->mii); ioc3_get_link()
1605 spin_unlock_irq(&ip->ioc3_lock); ioc3_get_link()
1620 struct ioc3_private *ip = netdev_priv(dev); ioc3_ioctl() local
1623 spin_lock_irq(&ip->ioc3_lock); ioc3_ioctl()
1624 rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL); ioc3_ioctl()
1625 spin_unlock_irq(&ip->ioc3_lock); ioc3_ioctl()
1633 struct ioc3_private *ip = netdev_priv(dev); ioc3_set_multicast_list() local
1634 struct ioc3 *ioc3 = ip->regs; ioc3_set_multicast_list()
1640 ip->emcr |= EMCR_PROMISC; ioc3_set_multicast_list()
1641 ioc3_w_emcr(ip->emcr); ioc3_set_multicast_list()
1644 ip->emcr &= ~EMCR_PROMISC; ioc3_set_multicast_list()
1645 ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */ ioc3_set_multicast_list()
1653 ip->ehar_h = 0xffffffff; ioc3_set_multicast_list()
1654 ip->ehar_l = 0xffffffff; ioc3_set_multicast_list()
1659 ip->ehar_h = ehar >> 32;
1660 ip->ehar_l = ehar & 0xffffffff;
1662 ioc3_w_ehar_h(ip->ehar_h);
1663 ioc3_w_ehar_l(ip->ehar_l);
/linux-4.4.14/include/uapi/linux/
H A Dbfs_fs.h69 #define BFS_NZFILESIZE(ip) \
70 ((le32_to_cpu((ip)->i_eoffset) + 1) - le32_to_cpu((ip)->i_sblock) * BFS_BSIZE)
72 #define BFS_FILESIZE(ip) \
73 ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip))
75 #define BFS_FILEBLOCKS(ip) \
76 ((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) - le32_to_cpu((ip)->i_sblock))
/linux-4.4.14/arch/blackfin/kernel/
H A Dftrace.c33 static int ftrace_modify_code(unsigned long ip, const unsigned char *code, ftrace_modify_code() argument
36 int ret = probe_kernel_write((void *)ip, (void *)code, len); ftrace_modify_code()
37 flush_icache_range(ip, ip + len); ftrace_modify_code()
45 return ftrace_modify_code(rec->ip, mnop, sizeof(mnop)); ftrace_make_nop()
54 bfin_make_pcrel24(&call[2], rec->ip + 2, addr); ftrace_make_call()
57 return ftrace_modify_code(rec->ip, call, sizeof(call)); ftrace_make_call()
63 unsigned long ip = (unsigned long)&ftrace_call; ftrace_update_ftrace_func() local
64 bfin_make_pcrel24(call, ip, func); ftrace_update_ftrace_func()
65 return ftrace_modify_code(ip, call, sizeof(call)); ftrace_update_ftrace_func()
83 unsigned long ip = (unsigned long)&ftrace_graph_call; ftrace_enable_ftrace_graph_caller() local
84 uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1; ftrace_enable_ftrace_graph_caller()
86 return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12)); ftrace_enable_ftrace_graph_caller()
/linux-4.4.14/fs/jfs/
H A Dnamei.c80 struct inode *ip = NULL; /* child directory inode */ jfs_create() local
105 ip = ialloc(dip, mode); jfs_create()
106 if (IS_ERR(ip)) { jfs_create()
107 rc = PTR_ERR(ip); jfs_create()
114 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); jfs_create()
116 rc = jfs_init_acl(tid, ip, dip); jfs_create()
120 rc = jfs_init_security(tid, ip, dip, &dentry->d_name); jfs_create()
134 tblk->ino = ip->i_ino; jfs_create()
135 tblk->u.ixpxd = JFS_IP(ip)->ixpxd; jfs_create()
138 iplist[1] = ip; jfs_create()
143 xtInitRoot(tid, ip); jfs_create()
149 ino = ip->i_ino; jfs_create()
159 ip->i_op = &jfs_file_inode_operations; jfs_create()
160 ip->i_fop = &jfs_file_operations; jfs_create()
161 ip->i_mapping->a_ops = &jfs_aops; jfs_create()
163 mark_inode_dirty(ip); jfs_create()
173 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_create()
176 free_ea_wmap(ip); jfs_create()
177 clear_nlink(ip); jfs_create()
178 unlock_new_inode(ip); jfs_create()
179 iput(ip); jfs_create()
181 unlock_new_inode(ip); jfs_create()
182 d_instantiate(dentry, ip); jfs_create()
214 struct inode *ip = NULL; /* child directory inode */ jfs_mkdir() local
239 ip = ialloc(dip, S_IFDIR | mode); jfs_mkdir()
240 if (IS_ERR(ip)) { jfs_mkdir()
241 rc = PTR_ERR(ip); jfs_mkdir()
248 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); jfs_mkdir()
250 rc = jfs_init_acl(tid, ip, dip); jfs_mkdir()
254 rc = jfs_init_security(tid, ip, dip, &dentry->d_name); jfs_mkdir()
268 tblk->ino = ip->i_ino; jfs_mkdir()
269 tblk->u.ixpxd = JFS_IP(ip)->ixpxd; jfs_mkdir()
272 iplist[1] = ip; jfs_mkdir()
277 dtInitRoot(tid, ip, dip->i_ino); jfs_mkdir()
283 ino = ip->i_ino; jfs_mkdir()
293 set_nlink(ip, 2); /* for '.' */ jfs_mkdir()
294 ip->i_op = &jfs_dir_inode_operations; jfs_mkdir()
295 ip->i_fop = &jfs_dir_operations; jfs_mkdir()
297 mark_inode_dirty(ip); jfs_mkdir()
308 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_mkdir()
311 free_ea_wmap(ip); jfs_mkdir()
312 clear_nlink(ip); jfs_mkdir()
313 unlock_new_inode(ip); jfs_mkdir()
314 iput(ip); jfs_mkdir()
316 unlock_new_inode(ip); jfs_mkdir()
317 d_instantiate(dentry, ip); jfs_mkdir()
353 struct inode *ip = d_inode(dentry); jfs_rmdir() local
365 rc = dquot_initialize(ip); jfs_rmdir()
370 if (!dtEmpty(ip)) { jfs_rmdir()
382 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); jfs_rmdir()
385 iplist[1] = ip; jfs_rmdir()
389 tblk->u.ip = ip; jfs_rmdir()
394 ino = ip->i_ino; jfs_rmdir()
400 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_rmdir()
416 if (JFS_IP(ip)->ea.flag & DXD_EXTENT) { jfs_rmdir()
418 txEA(tid, ip, &JFS_IP(ip)->ea, NULL); jfs_rmdir()
420 JFS_IP(ip)->ea.flag = 0; jfs_rmdir()
423 if (JFS_IP(ip)->acl.flag & DXD_EXTENT) { jfs_rmdir()
425 txEA(tid, ip, &JFS_IP(ip)->acl, NULL); jfs_rmdir()
427 JFS_IP(ip)->acl.flag = 0; jfs_rmdir()
430 clear_nlink(ip); jfs_rmdir()
431 mark_inode_dirty(ip); jfs_rmdir()
437 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_rmdir()
483 struct inode *ip = d_inode(dentry); jfs_unlink() local
497 rc = dquot_initialize(ip); jfs_unlink()
504 IWRITE_LOCK(ip, RDWRLOCK_NORMAL); jfs_unlink()
509 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); jfs_unlink()
512 iplist[1] = ip; jfs_unlink()
517 ino = ip->i_ino; jfs_unlink()
523 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_unlink()
525 IWRITE_UNLOCK(ip); jfs_unlink()
529 ASSERT(ip->i_nlink); jfs_unlink()
531 ip->i_ctime = dip->i_ctime = dip->i_mtime = CURRENT_TIME; jfs_unlink()
535 inode_dec_link_count(ip); jfs_unlink()
540 if (ip->i_nlink == 0) { jfs_unlink()
541 assert(!test_cflag(COMMIT_Nolink, ip)); jfs_unlink()
543 if ((new_size = commitZeroLink(tid, ip)) < 0) { jfs_unlink()
546 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_unlink()
548 IWRITE_UNLOCK(ip); jfs_unlink()
554 tblk->u.ip = ip; jfs_unlink()
575 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_unlink()
580 mutex_lock(&JFS_IP(ip)->commit_mutex); jfs_unlink()
581 new_size = xtTruncate_pmap(tid, ip, new_size); jfs_unlink()
588 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_unlink()
591 if (ip->i_nlink == 0) jfs_unlink()
592 set_cflag(COMMIT_Nolink, ip); jfs_unlink()
594 IWRITE_UNLOCK(ip); jfs_unlink()
638 static s64 commitZeroLink(tid_t tid, struct inode *ip) commitZeroLink() argument
643 jfs_info("commitZeroLink: tid = %d, ip = 0x%p", tid, ip); commitZeroLink()
645 filetype = ip->i_mode & S_IFMT; commitZeroLink()
651 if (ip->i_size < IDATASIZE) { commitZeroLink()
652 ip->i_size = 0; commitZeroLink()
661 set_cflag(COMMIT_Freewmap, ip); commitZeroLink()
670 if (JFS_IP(ip)->ea.flag & DXD_EXTENT) commitZeroLink()
672 txEA(tid, ip, &JFS_IP(ip)->ea, NULL); commitZeroLink()
677 if (JFS_IP(ip)->acl.flag & DXD_EXTENT) commitZeroLink()
679 txEA(tid, ip, &JFS_IP(ip)->acl, NULL); commitZeroLink()
687 if (ip->i_size) commitZeroLink()
688 return xtTruncate_pmap(tid, ip, 0); commitZeroLink()
702 * PARAMETER: ip - pointer to inode of file.
704 void jfs_free_zero_link(struct inode *ip) jfs_free_zero_link() argument
708 jfs_info("jfs_free_zero_link: ip = 0x%p", ip); jfs_free_zero_link()
713 type = ip->i_mode & S_IFMT; jfs_free_zero_link()
720 if (ip->i_size < IDATASIZE) jfs_free_zero_link()
730 if (JFS_IP(ip)->ea.flag & DXD_EXTENT) { jfs_free_zero_link()
731 s64 xaddr = addressDXD(&JFS_IP(ip)->ea); jfs_free_zero_link()
732 int xlen = lengthDXD(&JFS_IP(ip)->ea); jfs_free_zero_link()
737 invalidate_dxd_metapages(ip, JFS_IP(ip)->ea); jfs_free_zero_link()
745 txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP); jfs_free_zero_link()
751 if (JFS_IP(ip)->acl.flag & DXD_EXTENT) { jfs_free_zero_link()
752 s64 xaddr = addressDXD(&JFS_IP(ip)->acl); jfs_free_zero_link()
753 int xlen = lengthDXD(&JFS_IP(ip)->acl); jfs_free_zero_link()
757 invalidate_dxd_metapages(ip, JFS_IP(ip)->acl); jfs_free_zero_link()
765 txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP); jfs_free_zero_link()
773 if (ip->i_size) jfs_free_zero_link()
774 xtTruncate(0, ip, 0, COMMIT_WMAP); jfs_free_zero_link()
806 struct inode *ip = d_inode(old_dentry); jfs_link() local
818 tid = txBegin(ip->i_sb, 0); jfs_link()
821 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); jfs_link()
835 ino = ip->i_ino; jfs_link()
840 inc_nlink(ip); /* for new link */ jfs_link()
841 ip->i_ctime = CURRENT_TIME; jfs_link()
844 ihold(ip); jfs_link()
846 iplist[0] = ip; jfs_link()
851 drop_nlink(ip); /* never instantiated */ jfs_link()
852 iput(ip); jfs_link()
854 d_instantiate(dentry, ip); jfs_link()
862 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_link()
897 struct inode *ip = d_inode(dentry); jfs_symlink() local
927 ip = ialloc(dip, S_IFLNK | 0777); jfs_symlink()
928 if (IS_ERR(ip)) { jfs_symlink()
929 rc = PTR_ERR(ip); jfs_symlink()
936 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); jfs_symlink()
938 rc = jfs_init_security(tid, ip, dip, &dentry->d_name); jfs_symlink()
944 tblk->ino = ip->i_ino; jfs_symlink()
945 tblk->u.ixpxd = JFS_IP(ip)->ixpxd; jfs_symlink()
951 ip->i_mode |= 0777; jfs_symlink()
956 xtInitRoot(tid, ip); jfs_symlink()
963 ip->i_op = &jfs_fast_symlink_inode_operations; jfs_symlink()
965 ip->i_link = JFS_IP(ip)->i_inline; jfs_symlink()
966 memcpy(ip->i_link, name, ssize); jfs_symlink()
967 ip->i_size = ssize - 1; jfs_symlink()
973 if (ssize > sizeof (JFS_IP(ip)->i_inline)) jfs_symlink()
974 JFS_IP(ip)->mode2 &= ~INLINEEA; jfs_symlink()
983 jfs_info("jfs_symlink: allocate extent ip:0x%p", ip); jfs_symlink()
985 ip->i_op = &jfs_symlink_inode_operations; jfs_symlink()
986 ip->i_mapping->a_ops = &jfs_aops; jfs_symlink()
993 sb = ip->i_sb; jfs_symlink()
998 if ((rc = xtInsert(tid, ip, 0, 0, xlen, &xaddr, 0))) { jfs_symlink()
1002 ip->i_size = ssize - 1; jfs_symlink()
1007 mp = get_metapage(ip, xaddr, PSIZE, 1); jfs_symlink()
1010 xtTruncate(tid, ip, 0, COMMIT_PWMAP); jfs_symlink()
1028 ino = ip->i_ino; jfs_symlink()
1033 xtTruncate(tid, ip, 0, COMMIT_PWMAP); jfs_symlink()
1039 mark_inode_dirty(ip); jfs_symlink()
1048 iplist[1] = ip; jfs_symlink()
1053 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_symlink()
1056 free_ea_wmap(ip); jfs_symlink()
1057 clear_nlink(ip); jfs_symlink()
1058 unlock_new_inode(ip); jfs_symlink()
1059 iput(ip); jfs_symlink()
1061 unlock_new_inode(ip); jfs_symlink()
1062 d_instantiate(dentry, ip); jfs_symlink()
1204 tblk->u.ip = new_ip; jfs_rename()
1215 tblk->u.ip = new_ip; jfs_rename()
1369 struct inode *ip; jfs_mknod() local
1384 ip = ialloc(dir, mode); jfs_mknod()
1385 if (IS_ERR(ip)) { jfs_mknod()
1386 rc = PTR_ERR(ip); jfs_mknod()
1389 jfs_ip = JFS_IP(ip); jfs_mknod()
1394 mutex_lock_nested(&JFS_IP(ip)->commit_mutex, COMMIT_MUTEX_CHILD); jfs_mknod()
1396 rc = jfs_init_acl(tid, ip, dir); jfs_mknod()
1400 rc = jfs_init_security(tid, ip, dir, &dentry->d_name); jfs_mknod()
1413 tblk->ino = ip->i_ino; jfs_mknod()
1414 tblk->u.ixpxd = JFS_IP(ip)->ixpxd; jfs_mknod()
1416 ino = ip->i_ino; jfs_mknod()
1422 ip->i_op = &jfs_file_inode_operations; jfs_mknod()
1424 init_special_inode(ip, ip->i_mode, rdev); jfs_mknod()
1426 mark_inode_dirty(ip); jfs_mknod()
1433 iplist[1] = ip; jfs_mknod()
1438 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_mknod()
1441 free_ea_wmap(ip); jfs_mknod()
1442 clear_nlink(ip); jfs_mknod()
1443 unlock_new_inode(ip); jfs_mknod()
1444 iput(ip); jfs_mknod()
1446 unlock_new_inode(ip); jfs_mknod()
1447 d_instantiate(dentry, ip); jfs_mknod()
1462 struct inode *ip; jfs_lookup() local
1473 ip = NULL; jfs_lookup()
1476 ip = ERR_PTR(rc); jfs_lookup()
1478 ip = jfs_iget(dip->i_sb, inum); jfs_lookup()
1479 if (IS_ERR(ip)) jfs_lookup()
1483 return d_splice_alias(ip, dentry); jfs_lookup()
H A Dinode.c197 int jfs_get_block(struct inode *ip, sector_t lblock, jfs_get_block() argument
205 s32 xlen = bh_result->b_size >> ip->i_blkbits; jfs_get_block()
211 IWRITE_LOCK(ip, RDWRLOCK_NORMAL); jfs_get_block()
213 IREAD_LOCK(ip, RDWRLOCK_NORMAL); jfs_get_block()
215 if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && jfs_get_block()
216 (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && jfs_get_block()
237 rc = extRecord(ip, &xad); jfs_get_block()
243 map_bh(bh_result, ip->i_sb, xaddr); jfs_get_block()
244 bh_result->b_size = xlen << ip->i_blkbits; jfs_get_block()
254 if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad))) jfs_get_block()
256 rc = extAlloc(ip, xlen, lblock64, &xad, false); jfs_get_block()
261 map_bh(bh_result, ip->i_sb, addressXAD(&xad)); jfs_get_block()
262 bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits; jfs_get_block()
277 IWRITE_UNLOCK(ip); jfs_get_block()
279 IREAD_UNLOCK(ip); jfs_get_block()
375 void jfs_truncate_nolock(struct inode *ip, loff_t length) jfs_truncate_nolock() argument
382 if (test_cflag(COMMIT_Nolink, ip)) { jfs_truncate_nolock()
383 xtTruncate(0, ip, length, COMMIT_WMAP); jfs_truncate_nolock()
388 tid = txBegin(ip->i_sb, 0); jfs_truncate_nolock()
396 mutex_lock(&JFS_IP(ip)->commit_mutex); jfs_truncate_nolock()
398 newsize = xtTruncate(tid, ip, length, jfs_truncate_nolock()
402 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_truncate_nolock()
406 ip->i_mtime = ip->i_ctime = CURRENT_TIME; jfs_truncate_nolock()
407 mark_inode_dirty(ip); jfs_truncate_nolock()
409 txCommit(tid, 1, &ip, 0); jfs_truncate_nolock()
411 mutex_unlock(&JFS_IP(ip)->commit_mutex); jfs_truncate_nolock()
415 void jfs_truncate(struct inode *ip) jfs_truncate() argument
417 jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size); jfs_truncate()
419 nobh_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block); jfs_truncate()
421 IWRITE_LOCK(ip, RDWRLOCK_NORMAL); jfs_truncate()
422 jfs_truncate_nolock(ip, ip->i_size); jfs_truncate()
423 IWRITE_UNLOCK(ip); jfs_truncate()
H A Djfs_extent.c70 * ip - the inode of the file.
86 extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr) extAlloc() argument
88 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); extAlloc()
94 txBeginAnon(ip->i_sb); extAlloc()
97 mutex_lock(&JFS_IP(ip)->commit_mutex); extAlloc()
138 if ((rc = extBalloc(ip, hint ? hint : INOHINT(ip), &nxlen, &nxaddr))) { extAlloc()
139 mutex_unlock(&JFS_IP(ip)->commit_mutex); extAlloc()
144 rc = dquot_alloc_block(ip, nxlen); extAlloc()
146 dbFree(ip, nxaddr, (s64) nxlen); extAlloc()
147 mutex_unlock(&JFS_IP(ip)->commit_mutex); extAlloc()
159 rc = xtExtend(0, ip, xoff, (int) nxlen, 0); extAlloc()
161 rc = xtInsert(0, ip, xflag, xoff, (int) nxlen, &nxaddr, 0); extAlloc()
167 dbFree(ip, nxaddr, nxlen); extAlloc()
168 dquot_free_block(ip, nxlen); extAlloc()
169 mutex_unlock(&JFS_IP(ip)->commit_mutex); extAlloc()
179 mark_inode_dirty(ip); extAlloc()
181 mutex_unlock(&JFS_IP(ip)->commit_mutex); extAlloc()
187 if (test_and_clear_cflag(COMMIT_Synclist,ip)) extAlloc()
188 jfs_commit_inode(ip, 0); extAlloc()
202 * ip - the inode of the file.
215 int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr) extRealloc() argument
217 struct super_block *sb = ip->i_sb; extRealloc()
224 txBeginAnon(ip->i_sb); extRealloc()
226 mutex_lock(&JFS_IP(ip)->commit_mutex); extRealloc()
244 if ((rc = xtUpdate(0, ip, xp))) extRealloc()
256 if ((rc = extBrealloc(ip, xaddr, xlen, &nxlen, &nxaddr))) extRealloc()
260 rc = dquot_alloc_block(ip, nxlen); extRealloc()
262 dbFree(ip, nxaddr, (s64) nxlen); extRealloc()
263 mutex_unlock(&JFS_IP(ip)->commit_mutex); extRealloc()
300 if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) { extRealloc()
301 dbFree(ip, xaddr + xlen, delta); extRealloc()
302 dquot_free_block(ip, nxlen); extRealloc()
311 if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) { extRealloc()
312 dbFree(ip, nxaddr, nxlen); extRealloc()
313 dquot_free_block(ip, nxlen); extRealloc()
326 if (xtInsert (0, ip, xflag, xoff + ntail, (int) ninsert, extRealloc()
328 dbFree(ip, xaddr, (s64) ninsert); extRealloc()
341 mark_inode_dirty(ip); extRealloc()
343 mutex_unlock(&JFS_IP(ip)->commit_mutex); extRealloc()
355 * ip - the inode of the file.
364 int extHint(struct inode *ip, s64 offset, xad_t * xp) extHint() argument
366 struct super_block *sb = ip->i_sb; extHint()
387 rc = xtLookup(ip, prev, nbperpage, &xflag, &xaddr, &xlen, 0); extHint()
391 jfs_error(ip->i_sb, "corrupt xtree\n"); extHint()
416 * ip - inode of the file.
424 int extRecord(struct inode *ip, xad_t * xp) extRecord() argument
428 txBeginAnon(ip->i_sb); extRecord()
430 mutex_lock(&JFS_IP(ip)->commit_mutex); extRecord()
433 rc = xtUpdate(0, ip, xp); extRecord()
435 mutex_unlock(&JFS_IP(ip)->commit_mutex); extRecord()
448 * ip - the inode of the file.
456 int extFill(struct inode *ip, xad_t * xp) extFill() argument
458 int rc, nbperpage = JFS_SBI(ip->i_sb)->nbperpage; extFill()
459 s64 blkno = offsetXAD(xp) >> ip->i_blkbits; extFill()
461 // assert(ISSPARSE(ip)); extFill()
467 if ((rc = extAlloc(ip, nbperpage, blkno, xp, false))) extFill()
493 * ip - the inode of the file.
509 extBalloc(struct inode *ip, s64 hint, s64 * nblocks, s64 * blkno) extBalloc() argument
511 struct jfs_inode_info *ji = JFS_IP(ip); extBalloc()
512 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); extBalloc()
531 while ((rc = dbAlloc(ip, hint, nb, &daddr)) != 0) { extBalloc()
549 if (S_ISREG(ip->i_mode) && (ji->fileset == FILESYSTEM_I)) { extBalloc()
587 * ip - the inode of the file.
602 extBrealloc(struct inode *ip, extBrealloc() argument
608 if ((rc = dbExtend(ip, blkno, nblks, *newnblks - nblks)) == 0) { extBrealloc()
619 return (extBalloc(ip, blkno, newnblks, newblkno)); extBrealloc()
H A Djfs_discard.c36 * ip - pointer to in-core inode
45 void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks) jfs_issue_discard() argument
47 struct super_block *sb = ip->i_sb; jfs_issue_discard()
73 * ip - pointer to in-core inode;
80 int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range) jfs_ioc_trim() argument
82 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; jfs_ioc_trim()
83 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; jfs_ioc_trim()
112 agno = BLKTOAG(start, JFS_SBI(ip->i_sb)); jfs_ioc_trim()
113 agno_end = BLKTOAG(end, JFS_SBI(ip->i_sb)); jfs_ioc_trim()
115 trimmed += dbDiscardAG(ip, agno, minlen); jfs_ioc_trim()
H A Djfs_xtree.h104 extern int xtLookup(struct inode *ip, s64 lstart, s64 llen,
106 extern void xtInitRoot(tid_t tid, struct inode *ip);
107 extern int xtInsert(tid_t tid, struct inode *ip,
109 extern int xtExtend(tid_t tid, struct inode *ip, s64 xoff, int xlen,
112 extern int xtTailgate(tid_t tid, struct inode *ip,
115 extern int xtUpdate(tid_t tid, struct inode *ip, struct xad *nxad);
116 extern int xtDelete(tid_t tid, struct inode *ip, s64 xoff, int xlen,
118 extern s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int type);
119 extern s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size);
120 extern int xtRelocate(tid_t tid, struct inode *ip,
123 struct inode *ip, int xflag, s64 xoff, int maxblocks,
H A Djfs_xtree.c117 static int xtSearch(struct inode *ip, s64 xoff, s64 *next, int *cmpp,
121 struct inode *ip,
124 static int xtSplitPage(tid_t tid, struct inode *ip, struct xtsplit * split,
127 static int xtSplitRoot(tid_t tid, struct inode *ip,
131 static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
134 static int xtSearchNode(struct inode *ip,
138 static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
146 int xtLookup(struct inode *ip, s64 lstart, xtLookup() argument
166 size = ((u64) ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >> xtLookup()
167 JFS_SBI(ip->i_sb)->l2bsize; xtLookup()
176 if ((rc = xtSearch(ip, lstart, &next, &cmp, &btstack, 0))) { xtLookup()
188 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); xtLookup()
227 * ip - file object;
239 static int xtSearch(struct inode *ip, s64 xoff, s64 *nextp, xtSearch() argument
242 struct jfs_inode_info *jfs_ip = JFS_IP(ip); xtSearch()
279 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtSearch()
503 jfs_error(ip->i_sb, "stack overrun!\n"); xtSearch()
524 * ip - file object;
538 struct inode *ip, int xflag, s64 xoff, s32 xlen, s64 * xaddrp, xtInsert()
565 if ((rc = xtSearch(ip, xoff, &next, &cmp, &btstack, XT_INSERT))) xtInsert()
569 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); xtInsert()
589 if ((rc = dquot_alloc_block(ip, xlen))) xtInsert()
591 if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { xtInsert()
592 dquot_free_block(ip, xlen); xtInsert()
617 if ((rc = xtSplitUp(tid, ip, &split, &btstack))) { xtInsert()
620 dbFree(ip, xaddr, (s64) xlen); xtInsert()
621 dquot_free_block(ip, xlen); xtInsert()
638 BT_MARK_DIRTY(mp, ip); xtInsert()
653 if (!test_cflag(COMMIT_Nolink, ip)) { xtInsert()
654 tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtInsert()
681 * ip - file object;
689 struct inode *ip, struct xtsplit * split, struct btstack * btstack) xtSplitUp()
712 sp = XT_PAGE(ip, smp); xtSplitUp()
715 if ((sp->header.flag & BT_ROOT) && (!S_ISDIR(ip->i_mode)) && xtSplitUp()
717 (JFS_IP(ip)->mode2 & INLINEEA)) { xtSplitUp()
719 JFS_IP(ip)->mode2 &= ~INLINEEA; xtSplitUp()
721 BT_MARK_DIRTY(smp, ip); xtSplitUp()
744 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitUp()
745 tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW); xtSplitUp()
767 xlen = JFS_SBI(ip->i_sb)->nbperpage; xtSplitUp()
769 if ((rc = dbAlloc(ip, (s64) 0, (s64) xlen, &xaddr)) xtSplitUp()
794 xtSplitRoot(tid, ip, split, &rmp) : xtSplitUp()
795 xtSplitPage(tid, ip, split, &rmp, &rbn); xtSplitUp()
828 rcp = XT_PAGE(ip, rcmp); xtSplitUp()
834 XT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc); xtSplitUp()
859 split->len = JFS_SBI(ip->i_sb)->nbperpage; xtSplitUp()
870 xtSplitRoot(tid, ip, split, &rmp) : xtSplitUp()
871 xtSplitPage(tid, ip, split, &rmp, &rbn); xtSplitUp()
893 BT_MARK_DIRTY(smp, ip); xtSplitUp()
907 JFS_SBI(ip->i_sb)->nbperpage, rcbn); xtSplitUp()
913 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitUp()
914 tlck = txLock(tid, ip, smp, xtSplitUp()
949 * struct inode *ip,
958 xtSplitPage(tid_t tid, struct inode *ip, xtSplitPage() argument
979 sp = XT_PAGE(ip, smp); xtSplitPage()
989 rc = dquot_alloc_block(ip, lengthPXD(pxd)); xtSplitPage()
998 rmp = get_metapage(ip, rbn, PSIZE, 1); xtSplitPage()
1004 jfs_info("xtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp); xtSplitPage()
1006 BT_MARK_DIRTY(rmp, ip); xtSplitPage()
1017 BT_MARK_DIRTY(smp, ip); xtSplitPage()
1019 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitPage()
1023 tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW); xtSplitPage()
1029 tlck = txLock(tid, ip, smp, tlckXTREE | tlckGROW); xtSplitPage()
1069 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitPage()
1089 XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc); xtSplitPage()
1095 BT_MARK_DIRTY(mp, ip); xtSplitPage()
1101 if (!test_cflag(COMMIT_Nolink, ip)) xtSplitPage()
1102 tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK); xtSplitPage()
1140 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitPage()
1170 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitPage()
1179 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitPage()
1198 dquot_free_block(ip, quota_allocation); xtSplitPage()
1217 * struct inode *ip,
1226 struct inode *ip, struct xtsplit * split, struct metapage ** rmpp) xtSplitRoot()
1240 sp = &JFS_IP(ip)->i_xtroot; xtSplitRoot()
1251 rmp = get_metapage(ip, rbn, PSIZE, 1); xtSplitRoot()
1256 rc = dquot_alloc_block(ip, lengthPXD(pxd)); xtSplitRoot()
1262 jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp); xtSplitRoot()
1269 BT_MARK_DIRTY(rmp, ip); xtSplitRoot()
1305 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitRoot()
1306 tlck = txLock(tid, ip, rmp, tlckXTREE | tlckNEW); xtSplitRoot()
1325 BT_MARK_DIRTY(split->mp, ip); xtSplitRoot()
1328 XT_PUTENTRY(xad, XAD_NEW, 0, JFS_SBI(ip->i_sb)->nbperpage, rbn); xtSplitRoot()
1336 if (!test_cflag(COMMIT_Nolink, ip)) { xtSplitRoot()
1337 tlck = txLock(tid, ip, split->mp, tlckXTREE | tlckGROW); xtSplitRoot()
1361 struct inode *ip, s64 xoff, /* delta extent offset */ xtExtend()
1381 if ((rc = xtSearch(ip, xoff - 1, NULL, &cmp, &btstack, XT_INSERT))) xtExtend()
1385 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); xtExtend()
1389 jfs_error(ip->i_sb, "xtSearch did not find extent\n"); xtExtend()
1397 jfs_error(ip->i_sb, "extension is not contiguous\n"); xtExtend()
1406 BT_MARK_DIRTY(mp, ip); xtExtend()
1407 if (!test_cflag(COMMIT_Nolink, ip)) { xtExtend()
1408 tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtExtend()
1440 if ((rc = xtSplitUp(tid, ip, &split, &btstack))) xtExtend()
1444 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtExtend()
1460 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtExtend()
1464 BT_MARK_DIRTY(mp, ip); xtExtend()
1465 if (!test_cflag(COMMIT_Nolink, ip)) { xtExtend()
1466 tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtExtend()
1495 if (!test_cflag(COMMIT_Nolink, ip)) { xtExtend()
1523 struct inode *ip, s64 xoff, /* split/new extent offset */ xtTailgate()
1548 if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, XT_INSERT))) xtTailgate()
1552 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); xtTailgate()
1556 jfs_error(ip->i_sb, "couldn't find extent\n"); xtTailgate()
1564 jfs_error(ip->i_sb, "the entry found is not the last entry\n"); xtTailgate()
1568 BT_MARK_DIRTY(mp, ip); xtTailgate()
1572 if (!test_cflag(COMMIT_Nolink, ip)) { xtTailgate()
1573 tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtTailgate()
1605 if ((rc = xtSplitUp(tid, ip, &split, &btstack))) xtTailgate()
1609 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtTailgate()
1625 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtTailgate()
1629 BT_MARK_DIRTY(mp, ip); xtTailgate()
1630 if (!test_cflag(COMMIT_Nolink, ip)) { xtTailgate()
1631 tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtTailgate()
1659 if (!test_cflag(COMMIT_Nolink, ip)) { xtTailgate()
1660 mtlck = txMaplock(tid, ip, tlckMAP); xtTailgate()
1669 dbFree(ip, addressXAD(xad) + llen, (s64) rlen); xtTailgate()
1678 if (!test_cflag(COMMIT_Nolink, ip)) { xtTailgate()
1705 int xtUpdate(tid_t tid, struct inode *ip, xad_t * nxad) xtUpdate() argument
1729 if ((rc = xtSearch(ip, nxoff, NULL, &cmp, &btstack, XT_INSERT))) xtUpdate()
1733 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0); xtUpdate()
1737 jfs_error(ip->i_sb, "Could not find extent\n"); xtUpdate()
1741 BT_MARK_DIRTY(mp, ip); xtUpdate()
1745 if (!test_cflag(COMMIT_Nolink, ip)) { xtUpdate()
1746 tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtUpdate()
1760 jfs_error(ip->i_sb, xtUpdate()
1910 jfs_error(ip->i_sb, "xoff >= nxoff\n"); xtUpdate()
1938 if ((rc = xtSplitUp(tid, ip, &split, &btstack))) xtUpdate()
1942 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtUpdate()
1958 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtUpdate()
1962 BT_MARK_DIRTY(mp, ip); xtUpdate()
1963 if (!test_cflag(COMMIT_Nolink, ip)) { xtUpdate()
1964 tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtUpdate()
2007 if (!test_cflag(COMMIT_Nolink, ip)) { xtUpdate()
2019 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtUpdate()
2023 BT_MARK_DIRTY(mp, ip); xtUpdate()
2024 if (!test_cflag(COMMIT_Nolink, ip)) { xtUpdate()
2025 tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtUpdate()
2043 if ((rc = xtSearch(ip, nxoff, NULL, &cmp, &btstack, XT_INSERT))) xtUpdate()
2047 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index0); xtUpdate()
2051 jfs_error(ip->i_sb, "xtSearch failed\n"); xtUpdate()
2057 jfs_error(ip->i_sb, "unexpected value of index\n"); xtUpdate()
2091 if ((rc = xtSplitUp(tid, ip, &split, &btstack))) xtUpdate()
2095 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtUpdate()
2112 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtUpdate()
2116 BT_MARK_DIRTY(mp, ip); xtUpdate()
2117 if (!test_cflag(COMMIT_Nolink, ip)) { xtUpdate()
2118 tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtUpdate()
2138 if (!test_cflag(COMMIT_Nolink, ip)) { xtUpdate()
2159 * ip - file object;
2170 struct inode *ip, int xflag, s64 xoff, s32 maxblocks, xtAppend()
2204 if ((rc = xtSearch(ip, xoff, &next, &cmp, &btstack, XT_INSERT))) xtAppend()
2208 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); xtAppend()
2240 nblocks = JFS_SBI(ip->i_sb)->nbperpage; xtAppend()
2242 if ((rc = dbAllocBottomUp(ip, xaddr, (s64) nblocks)) == 0) { xtAppend()
2261 if ((rc = dbAllocBottomUp(ip, xaddr, (s64) xlen))) xtAppend()
2270 if ((rc = xtSplitUp(tid, ip, &split, &btstack))) { xtAppend()
2272 dbFree(ip, *xaddrp, (s64) * xlenp); xtAppend()
2288 if ((rc = dbAllocBottomUp(ip, xaddr, (s64) xlen))) xtAppend()
2291 BT_MARK_DIRTY(mp, ip); xtAppend()
2297 tlck = txLock(tid, ip, mp, tlckXTREE | tlckGROW); xtAppend()
2339 int xtDelete(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag) xtDelete() argument
2354 if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0))) xtDelete()
2357 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); xtDelete()
2374 return (xtDeleteUp(tid, ip, mp, p, &btstack)); xtDelete()
2376 BT_MARK_DIRTY(mp, ip); xtDelete()
2382 tlck = txLock(tid, ip, mp, tlckXTREE); xtDelete()
2410 xtDeleteUp(tid_t tid, struct inode *ip, xtDeleteUp() argument
2440 if ((rc = xtRelink(tid, ip, fp))) { xtDeleteUp()
2448 dbFree(ip, xaddr, (s64) xlen); xtDeleteUp()
2463 XT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc); xtDeleteUp()
2490 if ((rc = xtRelink(tid, ip, p))) xtDeleteUp()
2495 dbFree(ip, xaddr, xtDeleteUp()
2496 (s64) JFS_SBI(ip->i_sb)->nbperpage); xtDeleteUp()
2510 BT_MARK_DIRTY(mp, ip); xtDeleteUp()
2516 tlck = txLock(tid, ip, mp, tlckXTREE); xtDeleteUp()
2559 xtRelocate(tid_t tid, struct inode * ip, xad_t * oxad, /* old XAD */ xtRelocate() argument
2590 offset = xoff << JFS_SBI(ip->i_sb)->l2bsize; xtRelocate()
2591 if (offset >= ip->i_size) xtRelocate()
2603 rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0); xtRelocate()
2608 XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); xtRelocate()
2624 rc = xtSearchNode(ip, oxad, &cmp, &btstack, 0); xtRelocate()
2629 XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); xtRelocate()
2672 offset = xoff << JFS_SBI(ip->i_sb)->l2bsize; xtRelocate()
2674 nbytes = xlen << JFS_SBI(ip->i_sb)->l2bsize; xtRelocate()
2691 if (rc = cmRead(ip, offset, npages, &cp)) xtRelocate()
2698 nblks = nb >> JFS_IP(ip->i_sb)->l2bsize; xtRelocate()
2699 cmSetXD(ip, cp, pno, dxaddr, nblks); xtRelocate()
2709 if ((rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0))) xtRelocate()
2712 XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); xtRelocate()
2719 XT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc); xtRelocate()
2731 XT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc); xtRelocate()
2742 XT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc); xtRelocate()
2758 BT_MARK_DIRTY(lmp, ip); xtRelocate()
2759 tlck = txLock(tid, ip, lmp, tlckXTREE | tlckRELINK); xtRelocate()
2765 BT_MARK_DIRTY(rmp, ip); xtRelocate()
2766 tlck = txLock(tid, ip, rmp, tlckXTREE | tlckRELINK); xtRelocate()
2792 BT_MARK_DIRTY(mp, ip); xtRelocate()
2794 tlck = txLock(tid, ip, mp, tlckXTREE | tlckNEW); xtRelocate()
2806 xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize; xtRelocate()
2827 tlck = txMaplock(tid, ip, tlckMAP); xtRelocate()
2837 tlck = txMaplock(tid, ip, tlckMAP | tlckRELOCATE); xtRelocate()
2853 BT_MARK_DIRTY(pmp, ip); xtRelocate()
2854 tlck = txLock(tid, ip, pmp, tlckXTREE | tlckGROW); xtRelocate()
2880 * ip - file object;
2891 static int xtSearchNode(struct inode *ip, xad_t * xad, /* required XAD entry */ xtSearchNode() argument
2929 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtSearchNode()
3006 * struct inode *ip,
3011 static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * p) xtRelink() argument
3023 XT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc); xtRelink()
3032 BT_MARK_DIRTY(mp, ip); xtRelink()
3033 tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK); xtRelink()
3044 XT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc); xtRelink()
3053 BT_MARK_DIRTY(mp, ip); xtRelink()
3054 tlck = txLock(tid, ip, mp, tlckXTREE | tlckRELINK); xtRelink()
3073 void xtInitRoot(tid_t tid, struct inode *ip) xtInitRoot() argument
3082 txLock(tid, ip, (struct metapage *) &JFS_IP(ip)->bxflag, xtInitRoot()
3084 p = &JFS_IP(ip)->i_xtroot; xtInitRoot()
3089 if (S_ISDIR(ip->i_mode)) xtInitRoot()
3093 ip->i_size = 0; xtInitRoot()
3126 * struct inode *ip,
3162 s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag) xtTruncate() argument
3228 teof = (newsize + (JFS_SBI(ip->i_sb)->bsize - 1)) >> xtTruncate()
3229 JFS_SBI(ip->i_sb)->l2bsize; xtTruncate()
3245 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtTruncate()
3264 tlck = txLock(tid, ip, mp, tlckXTREE|tlckGROW); xtTruncate()
3265 BT_MARK_DIRTY(mp, ip); xtTruncate()
3294 newsize = (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize; xtTruncate()
3297 tlck = txLock(tid, ip, mp, tlckXTREE); xtTruncate()
3302 BT_MARK_DIRTY(mp, ip); xtTruncate()
3318 if (S_ISDIR(ip->i_mode) && (teof == 0)) xtTruncate()
3319 invalidate_xad_metapages(ip, *xad); xtTruncate()
3383 txFreeMap(ip, pxdlock, NULL, COMMIT_WMAP); xtTruncate()
3411 txFreeMap(ip, (struct maplock *) & xadlock, xtTruncate()
3441 txFreeMap(ip, (struct maplock *) & xadlock, NULL, COMMIT_WMAP); xtTruncate()
3483 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtTruncate()
3500 tlck = txLock(tid, ip, mp, tlckXTREE); xtTruncate()
3516 txFreeMap(ip, (struct maplock *) & xadlock, xtTruncate()
3519 BT_MARK_DIRTY(mp, ip); xtTruncate()
3576 tlck = txLock(tid, ip, mp, tlckXTREE); xtTruncate()
3588 txFreeMap(ip, (struct maplock *) & xadlock, NULL, xtTruncate()
3591 BT_MARK_DIRTY(mp, ip); xtTruncate()
3604 JFS_IP(ip)->mode2 |= INLINEEA; xtTruncate()
3652 jfs_error(ip->i_sb, "stack overrun!\n"); xtTruncate()
3677 if (S_ISDIR(ip->i_mode) && !newsize) xtTruncate()
3678 ip->i_size = 1; /* fsck hates zero-length directories */ xtTruncate()
3680 ip->i_size = newsize; xtTruncate()
3683 dquot_free_block(ip, nfreed); xtTruncate()
3689 txFreelock(ip); xtTruncate()
3706 * struct inode *ip,
3718 s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size) xtTruncate_pmap() argument
3744 xoff = (committed_size >> JFS_SBI(ip->i_sb)->l2bsize) - 1; xtTruncate_pmap()
3745 rc = xtSearch(ip, xoff, NULL, &cmp, &btstack, 0); xtTruncate_pmap()
3749 XT_GETSEARCH(ip, btstack.top, bn, mp, p, index); xtTruncate_pmap()
3753 jfs_error(ip->i_sb, "did not find extent\n"); xtTruncate_pmap()
3768 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtTruncate_pmap()
3792 return (xoff + xlen) << JFS_SBI(ip->i_sb)->l2bsize; xtTruncate_pmap()
3794 tlck = txLock(tid, ip, mp, tlckXTREE); xtTruncate_pmap()
3813 XT_GETPAGE(ip, bn, mp, PSIZE, p, rc); xtTruncate_pmap()
3827 tlck = txLock(tid, ip, mp, tlckXTREE); xtTruncate_pmap()
3852 jfs_error(ip->i_sb, "stack overrun!\n"); xtTruncate_pmap()
537 xtInsert(tid_t tid, struct inode *ip, int xflag, s64 xoff, s32 xlen, s64 * xaddrp, int flag) xtInsert() argument
688 xtSplitUp(tid_t tid, struct inode *ip, struct xtsplit * split, struct btstack * btstack) xtSplitUp() argument
1225 xtSplitRoot(tid_t tid, struct inode *ip, struct xtsplit * split, struct metapage ** rmpp) xtSplitRoot() argument
1360 xtExtend(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag) xtExtend() argument
1522 xtTailgate(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, s64 xaddr, int flag) xtTailgate() argument
2169 xtAppend(tid_t tid, struct inode *ip, int xflag, s64 xoff, s32 maxblocks, s32 * xlenp, s64 * xaddrp, int flag) xtAppend() argument
H A Djfs_incore.h110 #define IREAD_LOCK(ip, subclass) \
111 down_read_nested(&JFS_IP(ip)->rdwrlock, subclass)
112 #define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock)
113 #define IWRITE_LOCK(ip, subclass) \
114 down_write_nested(&JFS_IP(ip)->rdwrlock, subclass)
115 #define IWRITE_UNLOCK(ip) up_write(&JFS_IP(ip)->rdwrlock)
153 #define set_cflag(flag, ip) set_bit(flag, &(JFS_IP(ip)->cflag))
154 #define clear_cflag(flag, ip) clear_bit(flag, &(JFS_IP(ip)->cflag))
155 #define test_cflag(flag, ip) test_bit(flag, &(JFS_IP(ip)->cflag))
156 #define test_and_clear_cflag(flag, ip) \
157 test_and_clear_bit(flag, &(JFS_IP(ip)->cflag))
H A Djfs_dtree.c152 static int dtSplitUp(tid_t tid, struct inode *ip,
155 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
158 static int dtExtendPage(tid_t tid, struct inode *ip,
161 static int dtSplitRoot(tid_t tid, struct inode *ip,
164 static int dtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
167 static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p);
169 static int dtReadFirst(struct inode *ip, struct btstack * btstack);
171 static int dtReadNext(struct inode *ip,
249 static struct dir_table_slot *find_index(struct inode *ip, u32 index, find_index() argument
252 struct jfs_inode_info *jfs_ip = JFS_IP(ip); find_index()
272 if (jfs_dirtable_inline(ip)) { find_index()
282 JFS_SBI(ip->i_sb)->l2nbperpage; find_index()
290 *mp = read_index_page(ip, blkno); find_index()
304 static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp, lock_index() argument
311 tlck = txLock(tid, ip, mp, tlckDATA); lock_index()
334 static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot) add_index() argument
336 struct super_block *sb = ip->i_sb; add_index()
338 struct jfs_inode_info *jfs_ip = JFS_IP(ip); add_index()
350 ASSERT(DO_INDEX(ip)); add_index()
364 ip->i_size = (loff_t) (index - 1) << 3; add_index()
374 set_cflag(COMMIT_Dirtable, ip); add_index()
385 if (dquot_alloc_block(ip, sbi->nbperpage)) add_index()
387 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { add_index()
388 dquot_free_block(ip, sbi->nbperpage); add_index()
401 xtInitRoot(tid, ip); add_index()
406 if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) { add_index()
411 dbFree(ip, xaddr, sbi->nbperpage); add_index()
412 dquot_free_block(ip, sbi->nbperpage); add_index()
415 ip->i_size = PSIZE; add_index()
417 mp = get_index_page(ip, 0); add_index()
420 xtTruncate(tid, ip, 0, COMMIT_PWMAP); add_index()
425 tlck = txLock(tid, ip, mp, tlckDATA); add_index()
442 clear_cflag(COMMIT_Dirtable, ip); add_index()
453 if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) { add_index()
457 ip->i_size += PSIZE; add_index()
459 if ((mp = get_index_page(ip, blkno))) add_index()
462 xtTruncate(tid, ip, offset, COMMIT_PWMAP); add_index()
464 mp = read_index_page(ip, blkno); add_index()
471 lock_index(tid, ip, mp, index); add_index()
496 static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next) free_index() argument
502 dirtab_slot = find_index(ip, index, &mp, &lblock); free_index()
512 lock_index(tid, ip, mp, index); free_index()
516 set_cflag(COMMIT_Dirtable, ip); free_index()
524 static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn, modify_index() argument
529 dirtab_slot = find_index(ip, index, mp, lblock); modify_index()
538 lock_index(tid, ip, *mp, index); modify_index()
541 set_cflag(COMMIT_Dirtable, ip); modify_index()
549 static int read_index(struct inode *ip, u32 index, read_index() argument
556 slot = find_index(ip, index, &mp, &lblock); read_index()
580 int dtSearch(struct inode *ip, struct component_name * key, ino_t * data, dtSearch() argument
595 struct super_block *sb = ip->i_sb; dtSearch()
635 DT_GETPAGE(ip, bn, mp, psize, p, rc); dtSearch()
792 psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize; dtSearch()
821 int dtInsert(tid_t tid, struct inode *ip, dtInsert() argument
843 DT_GETSEARCH(ip, btstack->top, bn, mp, p, index); dtInsert()
848 if (DO_INDEX(ip)) { dtInsert()
849 if (JFS_IP(ip)->next_index == DIREND) { dtInsert()
855 data.leaf.ip = ip; dtInsert()
858 data.leaf.ip = NULL; /* signifies legacy directory format */ dtInsert()
875 rc = dtSplitUp(tid, ip, &split, btstack); dtInsert()
884 BT_MARK_DIRTY(mp, ip); dtInsert()
888 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY); dtInsert()
931 struct inode *ip, struct dtsplit * split, struct btstack * btstack) dtSplitUp()
933 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); dtSplitUp()
958 sp = DT_PAGE(ip, smp); dtSplitUp()
986 if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) { dtSplitUp()
997 rc = dtSplitRoot(tid, ip, split, &rmp); dtSplitUp()
1000 dbFree(ip, xaddr, xlen); dtSplitUp()
1006 if (!DO_INDEX(ip)) dtSplitUp()
1007 ip->i_size = xlen << sbi->l2bsize; dtSplitUp()
1031 rc = dquot_alloc_block(ip, n); dtSplitUp()
1046 if ((rc = dtExtendPage(tid, ip, split, btstack))) { dtSplitUp()
1051 dbFree(ip, nxaddr, (s64) xlen); dtSplitUp()
1056 dbFree(ip, xaddr, (s64) n); dtSplitUp()
1058 } else if (!DO_INDEX(ip)) dtSplitUp()
1059 ip->i_size = lengthPXD(pxd) << sbi->l2bsize; dtSplitUp()
1082 if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr)) == 0) { dtSplitUp()
1096 if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) { dtSplitUp()
1103 if (!DO_INDEX(ip)) dtSplitUp()
1104 ip->i_size += PSIZE; dtSplitUp()
1138 DT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc); dtSplitUp()
1248 dtSplitRoot(tid, ip, split, &rmp) : dtSplitUp()
1249 dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd); dtSplitUp()
1261 BT_MARK_DIRTY(smp, ip); dtSplitUp()
1265 tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY); dtSplitUp()
1304 dbFree(ip, addressPXD(pxd), (s64) lengthPXD(pxd)); dtSplitUp()
1311 dquot_free_block(ip, quota_allocation); dtSplitUp()
1330 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split, dtSplitPage() argument
1358 sp = DT_PAGE(ip, smp); dtSplitPage()
1367 rmp = get_metapage(ip, rbn, PSIZE, 1); dtSplitPage()
1372 rc = dquot_alloc_block(ip, lengthPXD(pxd)); dtSplitPage()
1378 jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp); dtSplitPage()
1380 BT_MARK_DIRTY(rmp, ip); dtSplitPage()
1384 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW); dtSplitPage()
1391 BT_MARK_DIRTY(smp, ip); dtSplitPage()
1397 tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY); dtSplitPage()
1475 DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc); dtSplitPage()
1481 BT_MARK_DIRTY(mp, ip); dtSplitPage()
1485 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK); dtSplitPage()
1486 jfs_info("dtSplitPage: tlck = 0x%p, ip = 0x%p, mp=0x%p", dtSplitPage()
1487 tlck, ip, mp); dtSplitPage()
1525 if (DO_INDEX(ip)) dtSplitPage()
1565 dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip)); dtSplitPage()
1581 if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) { dtSplitPage()
1588 modify_index(tid, ip, le32_to_cpu(ldtentry->index), dtSplitPage()
1643 struct inode *ip, struct dtsplit * split, struct btstack * btstack) dtExtendPage()
1645 struct super_block *sb = ip->i_sb; dtExtendPage()
1669 sp = DT_PAGE(ip, smp); dtExtendPage()
1673 DT_GETPAGE(ip, parent->bn, pmp, PSIZE, pp, rc); dtExtendPage()
1696 tlck = txMaplock(tid, ip, tlckDTREE | tlckRELOCATE); dtExtendPage()
1705 if (DO_INDEX(ip)) { dtExtendPage()
1713 modify_index(tid, ip, dtExtendPage()
1727 jfs_info("dtExtendPage: ip:0x%p smp:0x%p sp:0x%p", ip, smp, sp); dtExtendPage()
1729 BT_MARK_DIRTY(smp, ip); dtExtendPage()
1733 tlck = txLock(tid, ip, smp, tlckDTREE | type); dtExtendPage()
1823 BT_MARK_DIRTY(pmp, ip); dtExtendPage()
1839 tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY); dtExtendPage()
1876 struct inode *ip, struct dtsplit * split, struct metapage ** rmpp) dtSplitRoot()
1878 struct super_block *sb = ip->i_sb; dtSplitRoot()
1900 sp = &JFS_IP(ip)->i_dtroot; dtSplitRoot()
1914 rmp = get_metapage(ip, rbn, xsize, 1); dtSplitRoot()
1921 rc = dquot_alloc_block(ip, lengthPXD(pxd)); dtSplitRoot()
1927 BT_MARK_DIRTY(rmp, ip); dtSplitRoot()
1931 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW); dtSplitRoot()
1995 if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) { dtSplitRoot()
2003 modify_index(tid, ip, le32_to_cpu(ldtentry->index), dtSplitRoot()
2024 BT_MARK_DIRTY(smp, ip); dtSplitRoot()
2028 tlck = txLock(tid, ip, smp, tlckDTREE | tlckNEW | tlckBTROOT); dtSplitRoot()
2083 struct inode *ip, struct component_name * key, ino_t * ino, int flag) dtDelete()
2106 if ((rc = dtSearch(ip, key, ino, &btstack, flag))) dtDelete()
2110 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index); dtDelete()
2117 if (DO_INDEX(ip)) { dtDelete()
2130 DT_GETPAGE(ip, le64_to_cpu(p->header.next), dtDelete()
2149 free_index(tid, ip, table_index, next_index); dtDelete()
2156 rc = dtDeleteUp(tid, ip, mp, p, &btstack); dtDelete()
2164 BT_MARK_DIRTY(mp, ip); dtDelete()
2168 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY); dtDelete()
2204 if (DO_INDEX(ip) && index < p->header.nextindex) { dtDelete()
2212 modify_index(tid, ip, dtDelete()
2237 static int dtDeleteUp(tid_t tid, struct inode *ip, dtDeleteUp() argument
2261 dtInitRoot(tid, ip, PARENT(ip)); dtDeleteUp()
2278 tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE); dtDeleteUp()
2285 if ((rc = dtRelink(tid, ip, fp))) { dtDeleteUp()
2293 dquot_free_block(ip, xlen); dtDeleteUp()
2308 DT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc); dtDeleteUp()
2337 dtInitRoot(tid, ip, PARENT(ip)); dtDeleteUp()
2353 txMaplock(tid, ip, dtDeleteUp()
2361 if ((rc = dtRelink(tid, ip, p))) { dtDeleteUp()
2369 dquot_free_block(ip, xlen); dtDeleteUp()
2384 BT_MARK_DIRTY(mp, ip); dtDeleteUp()
2390 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY); dtDeleteUp()
2432 if (!DO_INDEX(ip)) dtDeleteUp()
2433 ip->i_size -= PSIZE; dtDeleteUp()
2445 int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd, dtRelocate() argument
2474 rc = dtSearchNode(ip, lmxaddr, opxd, &btstack); dtRelocate()
2479 DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); dtRelocate()
2486 DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc); dtRelocate()
2499 DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc); dtRelocate()
2510 DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc); dtRelocate()
2526 tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK); dtRelocate()
2540 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK); dtRelocate()
2562 tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW); dtRelocate()
2580 xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize; dtRelocate()
2599 tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE); dtRelocate()
2613 tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY); dtRelocate()
2642 static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd, dtSearchNode() argument
2664 DT_GETPAGE(ip, bn, mp, psize, p, rc); dtSearchNode()
2691 psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize; dtSearchNode()
2728 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); dtSearchNode()
2747 static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p) dtRelink() argument
2761 DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc); dtRelink()
2765 BT_MARK_DIRTY(mp, ip); dtRelink()
2771 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK); dtRelink()
2772 jfs_info("dtRelink nextbn: tlck = 0x%p, ip = 0x%p, mp=0x%p", dtRelink()
2773 tlck, ip, mp); dtRelink()
2790 DT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc); dtRelink()
2794 BT_MARK_DIRTY(mp, ip); dtRelink()
2800 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK); dtRelink()
2801 jfs_info("dtRelink prevbn: tlck = 0x%p, ip = 0x%p, mp=0x%p", dtRelink()
2802 tlck, ip, mp); dtRelink()
2826 void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot) dtInitRoot() argument
2828 struct jfs_inode_info *jfs_ip = JFS_IP(ip); dtInitRoot()
2841 if (DO_INDEX(ip)) { dtInitRoot()
2842 if (!jfs_dirtable_inline(ip)) { dtInitRoot()
2860 xtTruncate(tid, ip, 0, COMMIT_PWMAP); dtInitRoot()
2861 set_cflag(COMMIT_Stale, ip); dtInitRoot()
2865 ip->i_size = 1; dtInitRoot()
2869 ip->i_size = IDATASIZE; dtInitRoot()
2876 tlck = txLock(tid, ip, (struct metapage *) & jfs_ip->bxflag, dtInitRoot()
3007 struct inode *ip = file_inode(file); jfs_readdir() local
3008 struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab; jfs_readdir()
3039 if (DO_INDEX(ip)) { jfs_readdir()
3061 if (dtEmpty(ip) || jfs_readdir()
3062 (dir_index >= JFS_IP(ip)->next_index)) { jfs_readdir()
3068 rc = read_index(ip, dir_index, &dirtab_slot); jfs_readdir()
3074 if (loop_count++ > JFS_IP(ip)->next_index) { jfs_readdir()
3089 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); jfs_readdir()
3106 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) jfs_readdir()
3113 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) jfs_readdir()
3119 if (dtEmpty(ip)) { jfs_readdir()
3124 if ((rc = dtReadFirst(ip, &btstack))) jfs_readdir()
3127 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index); jfs_readdir()
3142 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) jfs_readdir()
3151 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) jfs_readdir()
3162 if (dtEmpty(ip)) { jfs_readdir()
3167 if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) { jfs_readdir()
3174 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index); jfs_readdir()
3225 JFS_IP(ip)->next_index)) { jfs_readdir()
3226 if (!page_fixed && !isReadOnly(ip)) { jfs_readdir()
3264 jfs_error(ip->i_sb, jfs_readdir()
3266 (long)ip->i_ino, jfs_readdir()
3316 add_missing_indices(ip, bn); jfs_readdir()
3325 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); jfs_readdir()
3344 static int dtReadFirst(struct inode *ip, struct btstack * btstack) dtReadFirst() argument
3363 DT_GETPAGE(ip, bn, mp, psize, p, rc); dtReadFirst()
3385 jfs_error(ip->i_sb, "btstack overrun\n"); dtReadFirst()
3398 psize = lengthPXD(xd) << JFS_SBI(ip->i_sb)->l2bsize; dtReadFirst()
3416 static int dtReadNext(struct inode *ip, loff_t * offset, dtReadNext() argument
3437 if ((rc = dtReadFirst(ip, btstack))) dtReadNext()
3441 DT_GETSEARCH(ip, btstack->top, bn, mp, p, index); dtReadNext()
3500 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); dtReadNext()
3521 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); dtReadNext()
3541 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); dtReadNext()
3562 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); dtReadNext()
3947 if (data->leaf.ip) { dtInsertEntry()
3952 data->leaf.ip, dtInsertEntry()
4031 if ((p->header.flag & BT_LEAF) && data->leaf.ip) { dtInsertEntry()
4041 modify_index(data->leaf.tid, data->leaf.ip, dtInsertEntry()
4510 * ip - Inode of parent directory
4521 int dtModify(tid_t tid, struct inode *ip, dtModify() argument
4542 if ((rc = dtSearch(ip, key, orig_ino, &btstack, flag))) dtModify()
4546 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index); dtModify()
4548 BT_MARK_DIRTY(mp, ip); dtModify()
4552 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY); dtModify()
930 dtSplitUp(tid_t tid, struct inode *ip, struct dtsplit * split, struct btstack * btstack) dtSplitUp() argument
1642 dtExtendPage(tid_t tid, struct inode *ip, struct dtsplit * split, struct btstack * btstack) dtExtendPage() argument
1875 dtSplitRoot(tid_t tid, struct inode *ip, struct dtsplit * split, struct metapage ** rmpp) dtSplitRoot() argument
2082 dtDelete(tid_t tid, struct inode *ip, struct component_name * key, ino_t * ino, int flag) dtDelete() argument
H A Djfs_discard.h23 extern void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks);
24 extern int jfs_ioc_trim(struct inode *ip, struct fstrim_range *range);
/linux-4.4.14/drivers/video/fbdev/
H A Dpmag-aa-fb.c229 struct aafb_info *ip = (struct aafb_info *)info; aafb_get_fix() local
233 fix->smem_start = ip->fb_start; aafb_get_fix()
234 fix->smem_len = ip->fb_size; aafb_get_fix()
307 struct aafb_info *ip = (struct aafb_info *)info; aafb_switch() local
308 struct display *old = (currcon < 0) ? &ip->disp : (fb_display + currcon); aafb_switch()
309 struct display *new = (con < 0) ? &ip->disp : (fb_display + con); aafb_switch()
316 aafb_set_disp(new, con, ip); aafb_switch()
380 struct aafb_info *ip = (struct aafb_info *)info; aafb_update_var() local
381 struct display *disp = (con < 0) ? &ip->disp : (fb_display + con); aafb_update_var()
384 aafbcon_cursor(disp, CM_ERASE, ip->cursor.x, ip->cursor.y); aafb_update_var()
393 struct aafb_info *ip = (struct aafb_info *)info; aafb_blank() local
396 bt455_write_cmap_entry(ip->bt455, 1, val, val, val); aafb_blank()
397 aafbcon_cursor(&ip->disp, CM_ERASE, ip->cursor.x, ip->cursor.y); aafb_blank()
413 struct aafb_info *ip = &my_fb_info[slot]; init_one() local
415 memset(ip, 0, sizeof(struct aafb_info)); init_one()
420 ip->bt455 = (struct bt455_regs *) (base_addr + PMAG_AA_BT455_OFFSET); init_one()
421 ip->bt431 = (struct bt431_regs *) (base_addr + PMAG_AA_BT431_OFFSET); init_one()
422 ip->fb_start = base_addr + PMAG_AA_ONBOARD_FBMEM_OFFSET; init_one()
423 ip->fb_size = 2048 * 1024; /* fb_fix_screeninfo.smem_length init_one()
425 ip->fb_line_length = 2048; init_one()
430 strcpy(ip->info.modename, "PMAG-AA"); init_one()
431 ip->info.node = -1; init_one()
432 ip->info.flags = FBINFO_FLAG_DEFAULT; init_one()
433 ip->info.fbops = &aafb_ops; init_one()
434 ip->info.disp = &ip->disp; init_one()
435 ip->info.changevar = NULL; init_one()
436 ip->info.switch_con = &aafb_switch; init_one()
437 ip->info.updatevar = &aafb_update_var; init_one()
438 ip->info.blank = &aafb_blank; init_one()
440 aafb_set_disp(&ip->disp, currcon, ip); init_one()
445 bt455_erase_cursor(ip->bt455); init_one()
448 bt455_write_cmap_entry(ip->bt455, 0, 0x00, 0x00, 0x00); init_one()
449 bt455_write_cmap_entry(ip->bt455, 1, 0x0f, 0x0f, 0x0f); init_one()
452 bt431_init_cursor(ip->bt431); init_one()
453 aafb_cursor_init(ip); init_one()
456 memset ((void *)ip->fb_start, 0, ip->fb_size); init_one()
458 if (register_framebuffer(&ip->info) < 0) init_one()
462 GET_FB_IDX(ip->info.node), ip->info.modename, slot); init_one()
469 struct aafb_info *ip = &my_fb_info[slot]; exit_one() local
471 if (unregister_framebuffer(&ip->info) < 0) exit_one()
/linux-4.4.14/arch/mips/oprofile/
H A Dbacktrace.c31 static inline int is_ra_save_ins(union mips_instruction *ip) is_ra_save_ins() argument
34 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) is_ra_save_ins()
35 && ip->i_format.rs == 29 && ip->i_format.rt == 31; is_ra_save_ins()
38 static inline int is_sp_move_ins(union mips_instruction *ip) is_sp_move_ins() argument
41 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) is_sp_move_ins()
43 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) is_sp_move_ins()
52 static inline int is_end_of_function_marker(union mips_instruction *ip) is_end_of_function_marker() argument
55 if (ip->r_format.func == jr_op && ip->r_format.rs == 31) is_end_of_function_marker()
58 if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28) is_end_of_function_marker()
84 union mips_instruction ip; unwind_user_frame() local
86 if (get_mem(addr, (unsigned long *) &ip)) unwind_user_frame()
89 if (is_sp_move_ins(&ip)) { unwind_user_frame()
90 int stack_adjustment = ip.i_format.simmediate; unwind_user_frame()
96 } else if (is_ra_save_ins(&ip)) { unwind_user_frame()
97 int ra_slot = ip.i_format.simmediate; unwind_user_frame()
102 } else if (is_end_of_function_marker(&ip)) unwind_user_frame()
/linux-4.4.14/fs/xfs/libxfs/
H A Dxfs_inode_fork.h89 #define XFS_IFORK_Q(ip) ((ip)->i_d.di_forkoff != 0)
90 #define XFS_IFORK_BOFF(ip) ((int)((ip)->i_d.di_forkoff << 3))
92 #define XFS_IFORK_PTR(ip,w) \
94 &(ip)->i_df : \
95 (ip)->i_afp)
96 #define XFS_IFORK_DSIZE(ip) \
97 (XFS_IFORK_Q(ip) ? \
98 XFS_IFORK_BOFF(ip) : \
99 XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version))
100 #define XFS_IFORK_ASIZE(ip) \
101 (XFS_IFORK_Q(ip) ? \
102 XFS_LITINO((ip)->i_mount, (ip)->i_d.di_version) - \
103 XFS_IFORK_BOFF(ip) : \
105 #define XFS_IFORK_SIZE(ip,w) \
107 XFS_IFORK_DSIZE(ip) : \
108 XFS_IFORK_ASIZE(ip))
109 #define XFS_IFORK_FORMAT(ip,w) \
111 (ip)->i_d.di_format : \
112 (ip)->i_d.di_aformat)
113 #define XFS_IFORK_FMT_SET(ip,w,n) \
115 ((ip)->i_d.di_format = (n)) : \
116 ((ip)->i_d.di_aformat = (n)))
117 #define XFS_IFORK_NEXTENTS(ip,w) \
119 (ip)->i_d.di_nextents : \
120 (ip)->i_d.di_anextents)
121 #define XFS_IFORK_NEXT_SET(ip,w,n) \
123 ((ip)->i_d.di_nextents = (n)) : \
124 ((ip)->i_d.di_anextents = (n)))
125 #define XFS_IFORK_MAXEXT(ip, w) \
126 (XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
H A Dxfs_bmap.c81 * (xfs_default_attroffset(ip) >> 3) because we could have mounted xfs_bmap_compute_maxlevels()
138 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork) xfs_bmap_needs_btree() argument
140 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && xfs_bmap_needs_btree()
141 XFS_IFORK_NEXTENTS(ip, whichfork) > xfs_bmap_needs_btree()
142 XFS_IFORK_MAXEXT(ip, whichfork); xfs_bmap_needs_btree()
148 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork) xfs_bmap_wants_extents() argument
150 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && xfs_bmap_wants_extents()
151 XFS_IFORK_NEXTENTS(ip, whichfork) <= xfs_bmap_wants_extents()
152 XFS_IFORK_MAXEXT(ip, whichfork); xfs_bmap_wants_extents()
176 * for ip's delayed extent of length "len".
180 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_worst_indlen()
188 mp = ip->i_mount; xfs_bmap_worst_indlen()
210 struct xfs_inode *ip) xfs_default_attroffset()
212 struct xfs_mount *mp = ip->i_mount; xfs_default_attroffset()
216 offset = XFS_LITINO(mp, ip->i_d.di_version) - xfs_default_attroffset()
222 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version)); xfs_default_attroffset()
233 xfs_inode_t *ip, xfs_bmap_forkoff_reset()
237 ip->i_d.di_format != XFS_DINODE_FMT_DEV && xfs_bmap_forkoff_reset()
238 ip->i_d.di_format != XFS_DINODE_FMT_UUID && xfs_bmap_forkoff_reset()
239 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) { xfs_bmap_forkoff_reset()
240 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3; xfs_bmap_forkoff_reset()
242 if (dfl_forkoff > ip->i_d.di_forkoff) xfs_bmap_forkoff_reset()
243 ip->i_d.di_forkoff = dfl_forkoff; xfs_bmap_forkoff_reset()
327 * Check that the extents for the inode ip are in the right order in all
334 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_check_leaf_extents()
351 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { xfs_bmap_check_leaf_extents()
356 mp = ip->i_mount; xfs_bmap_check_leaf_extents()
357 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_check_leaf_extents()
496 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_trace_exlist()
508 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_exlist()
511 trace_xfs_extlist(ip, idx, whichfork, caller_ip); xfs_bmap_trace_exlist()
556 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
662 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_btree_to_extents()
677 mp = ip->i_mount; xfs_bmap_btree_to_extents()
678 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_btree_to_extents()
680 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); xfs_bmap_btree_to_extents()
700 ip->i_d.di_nblocks--; xfs_bmap_btree_to_extents()
701 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); xfs_bmap_btree_to_extents()
705 xfs_iroot_realloc(ip, -1, whichfork); xfs_bmap_btree_to_extents()
708 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); xfs_bmap_btree_to_extents()
720 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_extents_to_btree()
743 mp = ip->i_mount; xfs_bmap_extents_to_btree()
744 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_extents_to_btree()
745 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); xfs_bmap_extents_to_btree()
750 xfs_iroot_realloc(ip, 1, whichfork); xfs_bmap_extents_to_btree()
759 XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino, xfs_bmap_extents_to_btree()
763 XFS_BMAP_MAGIC, 1, 1, ip->i_ino, xfs_bmap_extents_to_btree()
769 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); xfs_bmap_extents_to_btree()
776 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE); xfs_bmap_extents_to_btree()
783 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino); xfs_bmap_extents_to_btree()
795 xfs_iroot_realloc(ip, -1, whichfork); xfs_bmap_extents_to_btree()
809 ip->i_d.di_nblocks++; xfs_bmap_extents_to_btree()
810 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); xfs_bmap_extents_to_btree()
819 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino, xfs_bmap_extents_to_btree()
823 XFS_BMAP_MAGIC, 0, 0, ip->i_ino, xfs_bmap_extents_to_btree()
836 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); xfs_bmap_extents_to_btree()
869 struct xfs_inode *ip, xfs_bmap_local_to_extents_empty()
872 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_local_to_extents_empty()
874 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); xfs_bmap_local_to_extents_empty()
876 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); xfs_bmap_local_to_extents_empty()
878 xfs_bmap_forkoff_reset(ip, whichfork); xfs_bmap_local_to_extents_empty()
881 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); xfs_bmap_local_to_extents_empty()
888 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_local_to_extents()
895 struct xfs_inode *ip, xfs_bmap_local_to_extents()
909 ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK)); xfs_bmap_local_to_extents()
910 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_local_to_extents()
911 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); xfs_bmap_local_to_extents()
914 xfs_bmap_local_to_extents_empty(ip, whichfork); xfs_bmap_local_to_extents()
925 args.mp = ip->i_mount; xfs_bmap_local_to_extents()
932 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino); xfs_bmap_local_to_extents()
958 init_fn(tp, bp, ip, ifp); xfs_bmap_local_to_extents()
961 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); xfs_bmap_local_to_extents()
962 xfs_bmap_local_to_extents_empty(ip, whichfork); xfs_bmap_local_to_extents()
968 trace_xfs_bmap_post_update(ip, 0, xfs_bmap_local_to_extents()
971 XFS_IFORK_NEXT_SET(ip, whichfork, 1); xfs_bmap_local_to_extents()
972 ip->i_d.di_nblocks = 1; xfs_bmap_local_to_extents()
973 xfs_trans_mod_dquot_byino(tp, ip, xfs_bmap_local_to_extents()
988 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_add_attrfork_btree()
998 mp = ip->i_mount; xfs_bmap_add_attrfork_btree()
999 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) xfs_bmap_add_attrfork_btree()
1002 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); xfs_bmap_add_attrfork_btree()
1031 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_add_attrfork_extents()
1039 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip)) xfs_bmap_add_attrfork_extents()
1042 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0, xfs_bmap_add_attrfork_extents()
1066 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_add_attrfork_local()
1073 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) xfs_bmap_add_attrfork_local()
1076 if (S_ISDIR(ip->i_d.di_mode)) { xfs_bmap_add_attrfork_local()
1078 dargs.geo = ip->i_mount->m_dir_geo; xfs_bmap_add_attrfork_local()
1079 dargs.dp = ip; xfs_bmap_add_attrfork_local()
1088 if (S_ISLNK(ip->i_d.di_mode)) xfs_bmap_add_attrfork_local()
1089 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, xfs_bmap_add_attrfork_local()
1100 * Must not be in a transaction, ip must not be locked.
1104 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_add_attrfork()
1118 ASSERT(XFS_IFORK_Q(ip) == 0); xfs_bmap_add_attrfork()
1120 mp = ip->i_mount; xfs_bmap_add_attrfork()
1121 ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); xfs_bmap_add_attrfork()
1131 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_bmap_add_attrfork()
1132 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ? xfs_bmap_add_attrfork()
1137 if (XFS_IFORK_Q(ip)) xfs_bmap_add_attrfork()
1139 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) { xfs_bmap_add_attrfork()
1143 ASSERT(ip->i_d.di_aformat == 0); xfs_bmap_add_attrfork()
1144 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; xfs_bmap_add_attrfork()
1146 ASSERT(ip->i_d.di_anextents == 0); xfs_bmap_add_attrfork()
1148 xfs_trans_ijoin(tp, ip, 0); xfs_bmap_add_attrfork()
1149 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_bmap_add_attrfork()
1151 switch (ip->i_d.di_format) { xfs_bmap_add_attrfork()
1153 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; xfs_bmap_add_attrfork()
1156 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3; xfs_bmap_add_attrfork()
1161 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); xfs_bmap_add_attrfork()
1162 if (!ip->i_d.di_forkoff) xfs_bmap_add_attrfork()
1163 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3; xfs_bmap_add_attrfork()
1173 ASSERT(ip->i_afp == NULL); xfs_bmap_add_attrfork()
1174 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); xfs_bmap_add_attrfork()
1175 ip->i_afp->if_flags = XFS_IFEXTENTS; xfs_bmap_add_attrfork()
1178 switch (ip->i_d.di_format) { xfs_bmap_add_attrfork()
1180 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist, xfs_bmap_add_attrfork()
1184 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock, xfs_bmap_add_attrfork()
1188 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist, xfs_bmap_add_attrfork()
1196 xfs_trans_log_inode(tp, ip, logflags); xfs_bmap_add_attrfork()
1221 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_bmap_add_attrfork()
1228 xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_bmap_add_attrfork()
1245 xfs_inode_t *ip, /* incore inode */ xfs_bmap_read_extents()
1262 mp = ip->i_mount; xfs_bmap_read_extents()
1263 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_read_extents()
1265 XFS_EXTFMT_INODE(ip); xfs_bmap_read_extents()
1312 xfs_warn(ip->i_mount, xfs_bmap_read_extents()
1314 (unsigned long long) ip->i_ino); xfs_bmap_read_extents()
1316 XFS_ERRLEVEL_LOW, ip->i_mount, block); xfs_bmap_read_extents()
1346 ip->i_mount); xfs_bmap_read_extents()
1364 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); xfs_bmap_read_extents()
1365 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); xfs_bmap_read_extents()
1429 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_search_extents()
1440 XFS_STATS_INC(ip->i_mount, xs_look_exlist); xfs_bmap_search_extents()
1441 ifp = XFS_IFORK_PTR(ip, fork); xfs_bmap_search_extents()
1446 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) { xfs_bmap_search_extents()
1447 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, xfs_bmap_search_extents()
1451 (unsigned long long)ip->i_ino, xfs_bmap_search_extents()
1473 xfs_inode_t *ip, /* incore inode */ xfs_bmap_first_unused()
1487 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE || xfs_bmap_first_unused()
1488 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS || xfs_bmap_first_unused()
1489 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL); xfs_bmap_first_unused()
1490 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { xfs_bmap_first_unused()
1494 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_first_unused()
1496 (error = xfs_iread_extents(tp, ip, whichfork))) xfs_bmap_first_unused()
1526 xfs_inode_t *ip, /* incore inode */ xfs_bmap_last_before()
1539 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && xfs_bmap_last_before()
1540 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && xfs_bmap_last_before()
1541 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) xfs_bmap_last_before()
1543 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { xfs_bmap_last_before()
1547 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_last_before()
1549 (error = xfs_iread_extents(tp, ip, whichfork))) xfs_bmap_last_before()
1552 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, xfs_bmap_last_before()
1569 struct xfs_inode *ip, xfs_bmap_last_extent()
1574 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_last_extent()
1579 error = xfs_iread_extents(tp, ip, whichfork); xfs_bmap_last_extent()
1614 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, xfs_bmap_isaeof()
1641 struct xfs_inode *ip, xfs_bmap_last_offset()
1651 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) xfs_bmap_last_offset()
1654 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && xfs_bmap_last_offset()
1655 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) xfs_bmap_last_offset()
1658 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); xfs_bmap_last_offset()
1673 xfs_inode_t *ip, /* incore inode */ xfs_bmap_one_block()
1683 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize; xfs_bmap_one_block()
1685 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1) xfs_bmap_one_block()
1687 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) xfs_bmap_one_block()
1689 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_one_block()
1695 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize); xfs_bmap_one_block()
1729 ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK); xfs_bmap_add_extent_delay_real()
1788 if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { xfs_bmap_add_extent_delay_real()
1822 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1826 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1828 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state); xfs_bmap_add_extent_delay_real()
1829 bma->ip->i_d.di_nextents--; xfs_bmap_add_extent_delay_real()
1865 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1868 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1870 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); xfs_bmap_add_extent_delay_real()
1895 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1899 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1901 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); xfs_bmap_add_extent_delay_real()
1927 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1929 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1931 bma->ip->i_d.di_nextents++; xfs_bmap_add_extent_delay_real()
1955 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1960 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1963 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1983 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), xfs_bmap_add_extent_delay_real()
1986 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
1996 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2000 xfs_iext_insert(bma->ip, bma->idx, 1, new, state); xfs_bmap_add_extent_delay_real()
2001 bma->ip->i_d.di_nextents++; xfs_bmap_add_extent_delay_real()
2019 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { xfs_bmap_add_extent_delay_real()
2020 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, xfs_bmap_add_extent_delay_real()
2027 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), xfs_bmap_add_extent_delay_real()
2032 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2041 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2047 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2067 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), xfs_bmap_add_extent_delay_real()
2069 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2071 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2082 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2084 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state); xfs_bmap_add_extent_delay_real()
2085 bma->ip->i_d.di_nextents++; xfs_bmap_add_extent_delay_real()
2103 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { xfs_bmap_add_extent_delay_real()
2104 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, xfs_bmap_add_extent_delay_real()
2111 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp), xfs_bmap_add_extent_delay_real()
2116 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2144 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2149 (int)xfs_bmap_worst_indlen(bma->ip, temp2)); xfs_bmap_add_extent_delay_real()
2153 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state); xfs_bmap_add_extent_delay_real()
2154 bma->ip->i_d.di_nextents++; xfs_bmap_add_extent_delay_real()
2172 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { xfs_bmap_add_extent_delay_real()
2173 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, xfs_bmap_add_extent_delay_real()
2180 temp = xfs_bmap_worst_indlen(bma->ip, temp); xfs_bmap_add_extent_delay_real()
2181 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2); xfs_bmap_add_extent_delay_real()
2185 error = xfs_mod_fdblocks(bma->ip->i_mount, xfs_bmap_add_extent_delay_real()
2194 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2195 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2198 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_); xfs_bmap_add_extent_delay_real()
2218 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { xfs_bmap_add_extent_delay_real()
2222 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, xfs_bmap_add_extent_delay_real()
2237 xfs_mod_fdblocks(bma->ip->i_mount, xfs_bmap_add_extent_delay_real()
2245 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK); xfs_bmap_add_extent_delay_real()
2260 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_add_extent_unwritten_real()
2285 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); xfs_bmap_add_extent_unwritten_real()
2344 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { xfs_bmap_add_extent_unwritten_real()
2377 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2381 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2383 xfs_iext_remove(ip, *idx + 1, 2, state); xfs_bmap_add_extent_unwritten_real()
2384 ip->i_d.di_nextents -= 2; xfs_bmap_add_extent_unwritten_real()
2421 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2424 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2426 xfs_iext_remove(ip, *idx + 1, 1, state); xfs_bmap_add_extent_unwritten_real()
2427 ip->i_d.di_nextents--; xfs_bmap_add_extent_unwritten_real()
2456 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2460 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2461 xfs_iext_remove(ip, *idx + 1, 1, state); xfs_bmap_add_extent_unwritten_real()
2462 ip->i_d.di_nextents--; xfs_bmap_add_extent_unwritten_real()
2492 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2494 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2517 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2522 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2524 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2529 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2564 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2571 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2573 xfs_iext_insert(ip, *idx, 1, new, state); xfs_bmap_add_extent_unwritten_real()
2574 ip->i_d.di_nextents++; xfs_bmap_add_extent_unwritten_real()
2602 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2605 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2609 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2613 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2644 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2647 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2650 xfs_iext_insert(ip, *idx, 1, new, state); xfs_bmap_add_extent_unwritten_real()
2652 ip->i_d.di_nextents++; xfs_bmap_add_extent_unwritten_real()
2685 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2688 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_unwritten_real()
2698 xfs_iext_insert(ip, *idx, 2, &r[0], state); xfs_bmap_add_extent_unwritten_real()
2700 ip->i_d.di_nextents += 2; xfs_bmap_add_extent_unwritten_real()
2754 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) { xfs_bmap_add_extent_unwritten_real()
2758 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, xfs_bmap_add_extent_unwritten_real()
2771 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK); xfs_bmap_add_extent_unwritten_real()
2785 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_add_extent_hole_delay()
2797 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); xfs_bmap_add_extent_hole_delay()
2816 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { xfs_bmap_add_extent_hole_delay()
2855 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_delay()
2860 newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmap_add_extent_hole_delay()
2863 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_delay()
2865 xfs_iext_remove(ip, *idx + 1, 1, state); xfs_bmap_add_extent_hole_delay()
2877 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_delay()
2881 newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmap_add_extent_hole_delay()
2884 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_delay()
2893 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_delay()
2897 newlen = xfs_bmap_worst_indlen(ip, temp); xfs_bmap_add_extent_hole_delay()
2901 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_delay()
2911 xfs_iext_insert(ip, *idx, 1, new, state); xfs_bmap_add_extent_hole_delay()
2916 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen), xfs_bmap_add_extent_hole_delay()
2943 ifp = XFS_IFORK_PTR(bma->ip, whichfork); xfs_bmap_add_extent_hole_real()
3011 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_real()
3015 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_real()
3017 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state); xfs_bmap_add_extent_hole_real()
3019 XFS_IFORK_NEXT_SET(bma->ip, whichfork, xfs_bmap_add_extent_hole_real()
3020 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1); xfs_bmap_add_extent_hole_real()
3057 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_real()
3060 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_real()
3088 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_real()
3093 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); xfs_bmap_add_extent_hole_real()
3122 xfs_iext_insert(bma->ip, bma->idx, 1, new, state); xfs_bmap_add_extent_hole_real()
3123 XFS_IFORK_NEXT_SET(bma->ip, whichfork, xfs_bmap_add_extent_hole_real()
3124 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1); xfs_bmap_add_extent_hole_real()
3146 if (xfs_bmap_needs_btree(bma->ip, whichfork)) { xfs_bmap_add_extent_hole_real()
3150 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip, xfs_bmap_add_extent_hole_real()
3162 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); xfs_bmap_add_extent_hole_real()
3368 mp = ap->ip->i_mount; xfs_bmap_adjacent()
3370 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; xfs_bmap_adjacent()
3567 struct xfs_mount *mp = ap->ip->i_mount; xfs_bmap_btalloc_nullfb()
3601 struct xfs_mount *mp = ap->ip->i_mount; xfs_bmap_btalloc_filestreams()
3659 mp = ap->ip->i_mount; xfs_bmap_btalloc()
3668 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; xfs_bmap_btalloc()
3681 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) { xfs_bmap_btalloc()
3682 ag = xfs_filestream_lookup_ag(ap->ip); xfs_bmap_btalloc()
3686 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino); xfs_bmap_btalloc()
3720 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) xfs_bmap_btalloc()
3727 if (xfs_inode_is_filestream(ap->ip)) xfs_bmap_btalloc()
3806 args.ip = ap->ip; xfs_bmap_btalloc()
3872 ap->ip->i_d.di_nblocks += args.len; xfs_bmap_btalloc()
3873 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); xfs_bmap_btalloc()
3875 ap->ip->i_delayed_blks -= args.len; xfs_bmap_btalloc()
3880 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, xfs_bmap_btalloc()
3899 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata) xfs_bmap_alloc()
4010 struct xfs_inode *ip, xfs_bmapi_read()
4017 struct xfs_mount *mp = ip->i_mount; xfs_bmapi_read()
4033 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)); xfs_bmapi_read()
4036 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && xfs_bmapi_read()
4037 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), xfs_bmapi_read()
4048 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmapi_read()
4051 error = xfs_iread_extents(NULL, ip, whichfork); xfs_bmapi_read()
4056 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev); xfs_bmapi_read()
4098 struct xfs_inode *ip, xfs_bmapi_reserve_delalloc()
4106 struct xfs_mount *mp = ip->i_mount; xfs_bmapi_reserve_delalloc()
4107 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); xfs_bmapi_reserve_delalloc()
4110 char rt = XFS_IS_REALTIME_INODE(ip); xfs_bmapi_reserve_delalloc()
4119 extsz = xfs_get_extsz_hint(ip); xfs_bmapi_reserve_delalloc()
4134 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0, xfs_bmapi_reserve_delalloc()
4143 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); xfs_bmapi_reserve_delalloc()
4160 ip->i_delayed_blks += alen; xfs_bmapi_reserve_delalloc()
4166 xfs_bmap_add_extent_hole_delay(ip, lastx, got); xfs_bmapi_reserve_delalloc()
4187 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? xfs_bmapi_reserve_delalloc()
4197 struct xfs_inode *ip, /* incore inode */ xfs_bmapi_delay()
4204 struct xfs_mount *mp = ip->i_mount; xfs_bmapi_delay()
4205 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); xfs_bmapi_delay()
4218 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_bmapi_delay()
4221 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && xfs_bmapi_delay()
4222 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), xfs_bmapi_delay()
4234 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); xfs_bmapi_delay()
4239 xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev); xfs_bmapi_delay()
4245 error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got, xfs_bmapi_delay()
4281 struct xfs_mount *mp = bma->ip->i_mount; xfs_bmapi_allocate()
4284 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); xfs_bmapi_allocate()
4344 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork); xfs_bmapi_allocate()
4404 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork); xfs_bmapi_convert_unwritten()
4424 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp, xfs_bmapi_convert_unwritten()
4425 bma->ip, whichfork); xfs_bmapi_convert_unwritten()
4437 error = xfs_zero_extent(bma->ip, mval->br_startblock, xfs_bmapi_convert_unwritten()
4443 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx, xfs_bmapi_convert_unwritten()
4489 struct xfs_inode *ip, /* incore inode */ xfs_bmapi_write()
4500 struct xfs_mount *mp = ip->i_mount; xfs_bmapi_write()
4533 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL); xfs_bmapi_write()
4534 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_bmapi_write()
4549 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && xfs_bmapi_write()
4550 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), xfs_bmapi_write()
4559 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmapi_write()
4564 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE) xfs_bmapi_write()
4573 error = xfs_iread_extents(tp, ip, whichfork); xfs_bmapi_write()
4578 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got, xfs_bmapi_write()
4585 bma.ip = ip; xfs_bmapi_write()
4662 if (xfs_bmap_wants_extents(ip, whichfork)) { xfs_bmapi_write()
4666 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, xfs_bmapi_write()
4673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || xfs_bmapi_write()
4674 XFS_IFORK_NEXTENTS(ip, whichfork) > xfs_bmapi_write()
4675 XFS_IFORK_MAXEXT(ip, whichfork)); xfs_bmapi_write()
4683 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) xfs_bmapi_write()
4686 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) xfs_bmapi_write()
4694 xfs_trans_log_inode(tp, ip, bma.logflags); xfs_bmapi_write()
4723 xfs_inode_t *ip, /* incore inode pointer */ xfs_bmap_del_extent()
4754 mp = ip->i_mount; xfs_bmap_del_extent()
4760 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_del_extent()
4783 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { xfs_bmap_del_extent()
4838 xfs_iext_remove(ip, *idx, 1, xfs_bmap_del_extent()
4844 XFS_IFORK_NEXT_SET(ip, whichfork, xfs_bmap_del_extent()
4845 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); xfs_bmap_del_extent()
4860 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_del_extent()
4865 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), xfs_bmap_del_extent()
4868 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_del_extent()
4873 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_del_extent()
4889 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_del_extent()
4892 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), xfs_bmap_del_extent()
4895 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_del_extent()
4899 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_del_extent()
4916 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); xfs_bmap_del_extent()
4978 XFS_IFORK_NEXT_SET(ip, whichfork, xfs_bmap_del_extent()
4979 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); xfs_bmap_del_extent()
4982 temp = xfs_bmap_worst_indlen(ip, temp); xfs_bmap_del_extent()
4984 temp2 = xfs_bmap_worst_indlen(ip, temp2); xfs_bmap_del_extent()
5004 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); xfs_bmap_del_extent()
5005 xfs_iext_insert(ip, *idx + 1, 1, &new, state); xfs_bmap_del_extent()
5019 ip->i_d.di_nblocks -= nblks; xfs_bmap_del_extent()
5024 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); xfs_bmap_del_extent()
5047 struct xfs_inode *ip, /* incore inode */ xfs_bunmapi()
5078 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); xfs_bunmapi()
5082 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bunmapi()
5084 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && xfs_bunmapi()
5085 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { xfs_bunmapi()
5087 ip->i_mount); xfs_bunmapi()
5090 mp = ip->i_mount; xfs_bunmapi()
5094 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_bunmapi()
5099 (error = xfs_iread_extents(tp, ip, whichfork))) xfs_bunmapi()
5107 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); xfs_bunmapi()
5110 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, xfs_bunmapi()
5124 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); xfs_bunmapi()
5125 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); xfs_bunmapi()
5218 error = xfs_bmap_add_extent_unwritten_real(tp, ip, xfs_bunmapi()
5277 ip, &lastx, &cur, &prev, xfs_bunmapi()
5286 ip, &lastx, &cur, &del, xfs_bunmapi()
5303 ip, -((long)del.br_blockcount), 0, xfs_bunmapi()
5309 ip, -((long)del.br_blockcount), 0, xfs_bunmapi()
5312 ip->i_delayed_blks -= del.br_blockcount; xfs_bunmapi()
5330 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && xfs_bunmapi()
5331 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */ xfs_bunmapi()
5332 XFS_IFORK_MAXEXT(ip, whichfork) && xfs_bunmapi()
5339 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, xfs_bunmapi()
5367 if (xfs_bmap_needs_btree(ip, whichfork)) { xfs_bunmapi()
5369 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, xfs_bunmapi()
5378 else if (xfs_bmap_wants_extents(ip, whichfork)) { xfs_bunmapi()
5380 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags, xfs_bunmapi()
5396 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) xfs_bunmapi()
5399 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) xfs_bunmapi()
5406 xfs_trans_log_inode(tp, ip, logflags); xfs_bunmapi()
5456 struct xfs_inode *ip, xfs_bmse_merge()
5469 struct xfs_mount *mp = ip->i_mount; xfs_bmse_merge()
5475 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); xfs_bmse_merge()
5476 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_bmse_merge()
5485 xfs_iext_remove(ip, current_ext, 1, 0); xfs_bmse_merge()
5491 XFS_IFORK_NEXT_SET(ip, whichfork, xfs_bmse_merge()
5492 XFS_IFORK_NEXTENTS(ip, whichfork) - 1); xfs_bmse_merge()
5529 struct xfs_inode *ip, xfs_bmse_shift_one()
5548 mp = ip->i_mount; xfs_bmse_shift_one()
5549 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmse_shift_one()
5584 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb, xfs_bmse_shift_one()
5652 struct xfs_inode *ip, xfs_bmap_shift_extents()
5665 struct xfs_mount *mp = ip->i_mount; xfs_bmap_shift_extents()
5676 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && xfs_bmap_shift_extents()
5677 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), xfs_bmap_shift_extents()
5687 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); xfs_bmap_shift_extents()
5688 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_bmap_shift_extents()
5692 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_shift_extents()
5695 error = xfs_iread_extents(tp, ip, whichfork); xfs_bmap_shift_extents()
5701 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); xfs_bmap_shift_extents()
5762 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb, xfs_bmap_shift_extents()
5795 xfs_trans_log_inode(tp, ip, logflags); xfs_bmap_shift_extents()
5809 struct xfs_inode *ip, xfs_bmap_split_extent_at()
5819 struct xfs_mount *mp = ip->i_mount; xfs_bmap_split_extent_at()
5828 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && xfs_bmap_split_extent_at()
5829 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), xfs_bmap_split_extent_at()
5839 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_split_extent_at()
5842 error = xfs_iread_extents(tp, ip, whichfork); xfs_bmap_split_extent_at()
5872 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); xfs_bmap_split_extent_at()
5901 xfs_iext_insert(ip, current_ext, 1, &new, 0); xfs_bmap_split_extent_at()
5902 XFS_IFORK_NEXT_SET(ip, whichfork, xfs_bmap_split_extent_at()
5903 XFS_IFORK_NEXTENTS(ip, whichfork) + 1); xfs_bmap_split_extent_at()
5923 if (xfs_bmap_needs_btree(ip, whichfork)) { xfs_bmap_split_extent_at()
5927 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, free_list, xfs_bmap_split_extent_at()
5940 xfs_trans_log_inode(tp, ip, logflags); xfs_bmap_split_extent_at()
5946 struct xfs_inode *ip, xfs_bmap_split_extent()
5949 struct xfs_mount *mp = ip->i_mount; xfs_bmap_split_extent()
5964 xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_bmap_split_extent()
5965 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_bmap_split_extent()
5969 error = xfs_bmap_split_extent_at(tp, ip, split_fsb, xfs_bmap_split_extent()
179 xfs_bmap_worst_indlen( xfs_inode_t *ip, xfs_filblks_t len) xfs_bmap_worst_indlen() argument
209 xfs_default_attroffset( struct xfs_inode *ip) xfs_default_attroffset() argument
232 xfs_bmap_forkoff_reset( xfs_inode_t *ip, int whichfork) xfs_bmap_forkoff_reset() argument
332 xfs_bmap_check_leaf_extents( xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork) xfs_bmap_check_leaf_extents() argument
495 xfs_bmap_trace_exlist( xfs_inode_t *ip, xfs_extnum_t cnt, int whichfork, unsigned long caller_ip) xfs_bmap_trace_exlist() argument
660 xfs_bmap_btree_to_extents( xfs_trans_t *tp, xfs_inode_t *ip, xfs_btree_cur_t *cur, int *logflagsp, int whichfork) xfs_bmap_btree_to_extents() argument
718 xfs_bmap_extents_to_btree( xfs_trans_t *tp, xfs_inode_t *ip, xfs_fsblock_t *firstblock, xfs_bmap_free_t *flist, xfs_btree_cur_t **curp, int wasdel, int *logflagsp, int whichfork) xfs_bmap_extents_to_btree() argument
868 xfs_bmap_local_to_extents_empty( struct xfs_inode *ip, int whichfork) xfs_bmap_local_to_extents_empty() argument
886 xfs_bmap_local_to_extents( xfs_trans_t *tp, xfs_inode_t *ip, xfs_fsblock_t *firstblock, xfs_extlen_t total, int *logflagsp, int whichfork, void (*init_fn)(struct xfs_trans *tp, struct xfs_buf *bp, struct xfs_inode *ip, struct xfs_ifork *ifp)) xfs_bmap_local_to_extents() argument
986 xfs_bmap_add_attrfork_btree( xfs_trans_t *tp, xfs_inode_t *ip, xfs_fsblock_t *firstblock, xfs_bmap_free_t *flist, int *flags) xfs_bmap_add_attrfork_btree() argument
1029 xfs_bmap_add_attrfork_extents( xfs_trans_t *tp, xfs_inode_t *ip, xfs_fsblock_t *firstblock, xfs_bmap_free_t *flist, int *flags) xfs_bmap_add_attrfork_extents() argument
1064 xfs_bmap_add_attrfork_local( xfs_trans_t *tp, xfs_inode_t *ip, xfs_fsblock_t *firstblock, xfs_bmap_free_t *flist, int *flags) xfs_bmap_add_attrfork_local() argument
1103 xfs_bmap_add_attrfork( xfs_inode_t *ip, int size, int rsvd) xfs_bmap_add_attrfork() argument
1243 xfs_bmap_read_extents( xfs_trans_t *tp, xfs_inode_t *ip, int whichfork) xfs_bmap_read_extents() argument
1428 xfs_bmap_search_extents( xfs_inode_t *ip, xfs_fileoff_t bno, int fork, int *eofp, xfs_extnum_t *lastxp, xfs_bmbt_irec_t *gotp, xfs_bmbt_irec_t *prevp) xfs_bmap_search_extents() argument
1471 xfs_bmap_first_unused( xfs_trans_t *tp, xfs_inode_t *ip, xfs_extlen_t len, xfs_fileoff_t *first_unused, int whichfork) xfs_bmap_first_unused() argument
1524 xfs_bmap_last_before( xfs_trans_t *tp, xfs_inode_t *ip, xfs_fileoff_t *last_block, int whichfork) xfs_bmap_last_before() argument
1567 xfs_bmap_last_extent( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *rec, int *is_empty) xfs_bmap_last_extent() argument
1640 xfs_bmap_last_offset( struct xfs_inode *ip, xfs_fileoff_t *last_block, int whichfork) xfs_bmap_last_offset() argument
1672 xfs_bmap_one_block( xfs_inode_t *ip, int whichfork) xfs_bmap_one_block() argument
2258 xfs_bmap_add_extent_unwritten_real( struct xfs_trans *tp, xfs_inode_t *ip, xfs_extnum_t *idx, xfs_btree_cur_t **curp, xfs_bmbt_irec_t *new, xfs_fsblock_t *first, xfs_bmap_free_t *flist, int *logflagsp) xfs_bmap_add_extent_unwritten_real() argument
2784 xfs_bmap_add_extent_hole_delay( xfs_inode_t *ip, xfs_extnum_t *idx, xfs_bmbt_irec_t *new) xfs_bmap_add_extent_hole_delay() argument
4009 xfs_bmapi_read( struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, struct xfs_bmbt_irec *mval, int *nmap, int flags) xfs_bmapi_read() argument
4097 xfs_bmapi_reserve_delalloc( struct xfs_inode *ip, xfs_fileoff_t aoff, xfs_filblks_t len, struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *prev, xfs_extnum_t *lastx, int eof) xfs_bmapi_reserve_delalloc() argument
4196 xfs_bmapi_delay( struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, struct xfs_bmbt_irec *mval, int *nmap, int flags) xfs_bmapi_delay() argument
4487 xfs_bmapi_write( struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, int flags, xfs_fsblock_t *firstblock, xfs_extlen_t total, struct xfs_bmbt_irec *mval, int *nmap, struct xfs_bmap_free *flist) xfs_bmapi_write() argument
4722 xfs_bmap_del_extent( xfs_inode_t *ip, xfs_trans_t *tp, xfs_extnum_t *idx, xfs_bmap_free_t *flist, xfs_btree_cur_t *cur, xfs_bmbt_irec_t *del, int *logflagsp, int whichfork) xfs_bmap_del_extent() argument
5045 xfs_bunmapi( xfs_trans_t *tp, struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, int flags, xfs_extnum_t nexts, xfs_fsblock_t *firstblock, xfs_bmap_free_t *flist, int *done) xfs_bunmapi() argument
5455 xfs_bmse_merge( struct xfs_inode *ip, int whichfork, xfs_fileoff_t shift, int current_ext, struct xfs_bmbt_rec_host *gotp, struct xfs_bmbt_rec_host *leftp, struct xfs_btree_cur *cur, int *logflags) xfs_bmse_merge() argument
5528 xfs_bmse_shift_one( struct xfs_inode *ip, int whichfork, xfs_fileoff_t offset_shift_fsb, int *current_ext, struct xfs_bmbt_rec_host *gotp, struct xfs_btree_cur *cur, int *logflags, enum shift_direction direction) xfs_bmse_shift_one() argument
5650 xfs_bmap_shift_extents( struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb, int *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock, struct xfs_bmap_free *flist, enum shift_direction direction, int num_exts) xfs_bmap_shift_extents() argument
5807 xfs_bmap_split_extent_at( struct xfs_trans *tp, struct xfs_inode *ip, xfs_fileoff_t split_fsb, xfs_fsblock_t *firstfsb, struct xfs_bmap_free *free_list) xfs_bmap_split_extent_at() argument
5945 xfs_bmap_split_extent( struct xfs_inode *ip, xfs_fileoff_t split_fsb) xfs_bmap_split_extent() argument
H A Dxfs_inode_buf.c296 struct xfs_inode *ip, xfs_dinode_verify()
311 if (be64_to_cpu(dip->di_ino) != ip->i_ino) xfs_dinode_verify()
350 xfs_inode_t *ip, xfs_iread()
360 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); xfs_iread()
369 memset(&ip->i_d, 0, sizeof(ip->i_d)); xfs_iread()
370 ip->i_d.di_magic = XFS_DINODE_MAGIC; xfs_iread()
371 ip->i_d.di_gen = prandom_u32(); xfs_iread()
373 ip->i_d.di_version = 3; xfs_iread()
374 ip->i_d.di_ino = ip->i_ino; xfs_iread()
375 uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid); xfs_iread()
377 ip->i_d.di_version = 2; xfs_iread()
384 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags); xfs_iread()
389 if (!xfs_dinode_verify(mp, ip, dip)) { xfs_iread()
391 __func__, ip->i_ino); xfs_iread()
406 xfs_dinode_from_disk(&ip->i_d, dip); xfs_iread()
407 error = xfs_iformat_fork(ip, dip); xfs_iread()
420 ip->i_d.di_magic = be16_to_cpu(dip->di_magic); xfs_iread()
421 ip->i_d.di_version = dip->di_version; xfs_iread()
422 ip->i_d.di_gen = be32_to_cpu(dip->di_gen); xfs_iread()
423 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); xfs_iread()
426 ip->i_d.di_ino = be64_to_cpu(dip->di_ino); xfs_iread()
427 uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid); xfs_iread()
437 ip->i_d.di_mode = 0; xfs_iread()
448 if (ip->i_d.di_version == 1) { xfs_iread()
449 ip->i_d.di_version = 2; xfs_iread()
450 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); xfs_iread()
451 ip->i_d.di_nlink = ip->i_d.di_onlink; xfs_iread()
452 ip->i_d.di_onlink = 0; xfs_iread()
453 xfs_set_projid(ip, 0); xfs_iread()
456 ip->i_delayed_blks = 0; xfs_iread()
294 xfs_dinode_verify( struct xfs_mount *mp, struct xfs_inode *ip, struct xfs_dinode *dip) xfs_dinode_verify() argument
347 xfs_iread( xfs_mount_t *mp, xfs_trans_t *tp, xfs_inode_t *ip, uint iget_flags) xfs_iread() argument
H A Dxfs_bmap.h37 struct xfs_inode *ip; /* incore inode pointer */ member in struct:xfs_bmalloca
184 void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
186 #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
187 xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
189 #define XFS_BMAP_TRACE_EXLIST(ip,c,w)
192 int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
193 void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
200 int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
202 int xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip,
204 int xfs_bmap_last_offset(struct xfs_inode *ip, xfs_fileoff_t *unused,
206 int xfs_bmap_one_block(struct xfs_inode *ip, int whichfork);
207 int xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip,
209 int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
212 int xfs_bmapi_delay(struct xfs_inode *ip, xfs_fileoff_t bno,
215 int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
220 int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
226 uint xfs_default_attroffset(struct xfs_inode *ip);
227 int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
232 int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
H A Dxfs_inode_fork.c81 xfs_inode_t *ip, xfs_iformat_fork()
92 xfs_warn(ip->i_mount, xfs_iformat_fork()
94 (unsigned long long)ip->i_ino, xfs_iformat_fork()
100 ip->i_mount, dip); xfs_iformat_fork()
104 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { xfs_iformat_fork()
105 xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.", xfs_iformat_fork()
106 (unsigned long long)ip->i_ino, xfs_iformat_fork()
109 ip->i_mount, dip); xfs_iformat_fork()
113 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && xfs_iformat_fork()
114 !ip->i_mount->m_rtdev_targp)) { xfs_iformat_fork()
115 xfs_warn(ip->i_mount, xfs_iformat_fork()
117 ip->i_ino); xfs_iformat_fork()
119 XFS_ERRLEVEL_LOW, ip->i_mount, dip); xfs_iformat_fork()
123 switch (ip->i_d.di_mode & S_IFMT) { xfs_iformat_fork()
130 ip->i_mount, dip); xfs_iformat_fork()
133 ip->i_d.di_size = 0; xfs_iformat_fork()
134 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); xfs_iformat_fork()
146 xfs_warn(ip->i_mount, xfs_iformat_fork()
148 (unsigned long long) ip->i_ino); xfs_iformat_fork()
151 ip->i_mount, dip); xfs_iformat_fork()
157 di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { xfs_iformat_fork()
158 xfs_warn(ip->i_mount, xfs_iformat_fork()
160 (unsigned long long) ip->i_ino, xfs_iformat_fork()
164 ip->i_mount, dip); xfs_iformat_fork()
169 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size); xfs_iformat_fork()
172 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK); xfs_iformat_fork()
175 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK); xfs_iformat_fork()
179 ip->i_mount); xfs_iformat_fork()
185 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); xfs_iformat_fork()
194 ASSERT(ip->i_afp == NULL); xfs_iformat_fork()
195 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); xfs_iformat_fork()
203 xfs_warn(ip->i_mount, xfs_iformat_fork()
205 (unsigned long long) ip->i_ino, xfs_iformat_fork()
209 ip->i_mount, dip); xfs_iformat_fork()
213 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); xfs_iformat_fork()
216 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); xfs_iformat_fork()
219 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); xfs_iformat_fork()
226 kmem_zone_free(xfs_ifork_zone, ip->i_afp); xfs_iformat_fork()
227 ip->i_afp = NULL; xfs_iformat_fork()
228 xfs_idestroy_fork(ip, XFS_DATA_FORK); xfs_iformat_fork()
245 xfs_inode_t *ip, xfs_iformat_local()
258 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { xfs_iformat_local()
259 xfs_warn(ip->i_mount, xfs_iformat_local()
261 (unsigned long long) ip->i_ino, size, xfs_iformat_local()
262 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); xfs_iformat_local()
264 ip->i_mount, dip); xfs_iformat_local()
267 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_iformat_local()
297 xfs_inode_t *ip, xfs_iformat_extents()
307 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_iformat_extents()
316 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { xfs_iformat_extents()
317 xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).", xfs_iformat_extents()
318 (unsigned long long) ip->i_ino, nex); xfs_iformat_extents()
320 ip->i_mount, dip); xfs_iformat_extents()
335 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip)); xfs_iformat_extents()
341 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); xfs_iformat_extents()
343 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) xfs_iformat_extents()
348 ip->i_mount); xfs_iformat_extents()
366 xfs_inode_t *ip, xfs_iformat_btree()
370 struct xfs_mount *mp = ip->i_mount; xfs_iformat_btree()
377 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_iformat_btree()
389 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= xfs_iformat_btree()
390 XFS_IFORK_MAXEXT(ip, whichfork) || xfs_iformat_btree()
393 XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { xfs_iformat_btree()
395 (unsigned long long) ip->i_ino); xfs_iformat_btree()
408 xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), xfs_iformat_btree()
423 xfs_inode_t *ip, xfs_iread_extents()
430 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_iread_extents()
432 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { xfs_iread_extents()
434 ip->i_mount); xfs_iread_extents()
437 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); xfs_iread_extents()
438 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_iread_extents()
446 error = xfs_bmap_read_extents(tp, ip, whichfork); xfs_iread_extents()
452 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip)); xfs_iread_extents()
469 * ip -- the inode whose if_broot area is changing
475 xfs_inode_t *ip, xfs_iroot_realloc()
479 struct xfs_mount *mp = ip->i_mount; xfs_iroot_realloc()
495 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_iroot_realloc()
526 XFS_IFORK_SIZE(ip, whichfork)); xfs_iroot_realloc()
550 XFS_BMBT_BLOCK_LEN(ip->i_mount)); xfs_iroot_realloc()
581 XFS_IFORK_SIZE(ip, whichfork)); xfs_iroot_realloc()
597 * ip -- the inode whose if_data area is changing
603 xfs_inode_t *ip, xfs_idata_realloc()
615 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_idata_realloc()
675 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); xfs_idata_realloc()
680 xfs_inode_t *ip, xfs_idestroy_fork()
685 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_idestroy_fork()
697 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { xfs_idestroy_fork()
716 kmem_zone_free(xfs_ifork_zone, ip->i_afp); xfs_idestroy_fork()
717 ip->i_afp = NULL; xfs_idestroy_fork()
735 xfs_inode_t *ip, xfs_iextents_copy()
745 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_iextents_copy()
746 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); xfs_iextents_copy()
750 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); xfs_iextents_copy()
777 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip)); xfs_iextents_copy()
794 xfs_inode_t *ip, xfs_iflush_fork()
811 ifp = XFS_IFORK_PTR(ip, whichfork); xfs_iflush_fork()
821 mp = ip->i_mount; xfs_iflush_fork()
822 switch (XFS_IFORK_FORMAT(ip, whichfork)) { xfs_iflush_fork()
827 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); xfs_iflush_fork()
838 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); xfs_iflush_fork()
839 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, xfs_iflush_fork()
849 XFS_IFORK_SIZE(ip, whichfork)); xfs_iflush_fork()
859 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); xfs_iflush_fork()
867 &ip->i_df.if_u2.if_uuid, xfs_iflush_fork()
911 xfs_inode_t *ip, /* incore inode pointer */ xfs_iext_insert()
917 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; xfs_iext_insert()
920 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_); xfs_iext_insert()
1162 xfs_inode_t *ip, /* incore inode pointer */ xfs_iext_remove()
1167 xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df; xfs_iext_remove()
1171 trace_xfs_iext_remove(ip, idx, state, _RET_IP_); xfs_iext_remove()
80 xfs_iformat_fork( xfs_inode_t *ip, xfs_dinode_t *dip) xfs_iformat_fork() argument
244 xfs_iformat_local( xfs_inode_t *ip, xfs_dinode_t *dip, int whichfork, int size) xfs_iformat_local() argument
296 xfs_iformat_extents( xfs_inode_t *ip, xfs_dinode_t *dip, int whichfork) xfs_iformat_extents() argument
365 xfs_iformat_btree( xfs_inode_t *ip, xfs_dinode_t *dip, int whichfork) xfs_iformat_btree() argument
421 xfs_iread_extents( xfs_trans_t *tp, xfs_inode_t *ip, int whichfork) xfs_iread_extents() argument
474 xfs_iroot_realloc( xfs_inode_t *ip, int rec_diff, int whichfork) xfs_iroot_realloc() argument
602 xfs_idata_realloc( xfs_inode_t *ip, int byte_diff, int whichfork) xfs_idata_realloc() argument
679 xfs_idestroy_fork( xfs_inode_t *ip, int whichfork) xfs_idestroy_fork() argument
734 xfs_iextents_copy( xfs_inode_t *ip, xfs_bmbt_rec_t *dp, int whichfork) xfs_iextents_copy() argument
793 xfs_iflush_fork( xfs_inode_t *ip, xfs_dinode_t *dip, xfs_inode_log_item_t *iip, int whichfork) xfs_iflush_fork() argument
910 xfs_iext_insert( xfs_inode_t *ip, xfs_extnum_t idx, xfs_extnum_t count, xfs_bmbt_irec_t *new, int state) xfs_iext_insert() argument
1161 xfs_iext_remove( xfs_inode_t *ip, xfs_extnum_t idx, int ext_diff, int state) xfs_iext_remove() argument
/linux-4.4.14/arch/x86/kernel/
H A Ddumpstack_32.c139 u8 *ip; show_regs() local
146 ip = (u8 *)regs->ip - code_prologue; show_regs()
147 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { show_regs()
149 ip = (u8 *)regs->ip; show_regs()
152 for (i = 0; i < code_len; i++, ip++) { show_regs()
153 if (ip < (u8 *)PAGE_OFFSET || show_regs()
154 probe_kernel_address(ip, c)) { show_regs()
158 if (ip == (u8 *)regs->ip) show_regs()
167 int is_valid_bugaddr(unsigned long ip) is_valid_bugaddr() argument
171 if (ip < PAGE_OFFSET) is_valid_bugaddr()
173 if (probe_kernel_address((unsigned short *)ip, ud2)) is_valid_bugaddr()
H A Dftrace.c56 static int ftrace_calc_offset(long ip, long addr) ftrace_calc_offset() argument
58 return (int)(addr - ip); ftrace_calc_offset()
61 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) ftrace_call_replace() argument
66 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); ftrace_call_replace()
81 static unsigned long text_ip_addr(unsigned long ip) text_ip_addr() argument
91 if (within(ip, (unsigned long)_text, (unsigned long)_etext)) text_ip_addr()
92 ip = (unsigned long)__va(__pa_symbol(ip)); text_ip_addr()
94 return ip; text_ip_addr()
103 ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code, ftrace_modify_code_direct() argument
119 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) ftrace_modify_code_direct()
126 ip = text_ip_addr(ip); ftrace_modify_code_direct()
129 if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) ftrace_modify_code_direct()
141 unsigned long ip = rec->ip; ftrace_make_nop() local
143 old = ftrace_call_replace(ip, addr); ftrace_make_nop()
155 return ftrace_modify_code_direct(rec->ip, old, new); ftrace_make_nop()
165 unsigned long ip = rec->ip; ftrace_make_call() local
168 new = ftrace_call_replace(ip, addr); ftrace_make_call()
171 return ftrace_modify_code_direct(rec->ip, old, new); ftrace_make_call()
208 ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
228 static int update_ftrace_func(unsigned long ip, void *new) update_ftrace_func() argument
233 memcpy(old, (void *)ip, MCOUNT_INSN_SIZE); update_ftrace_func()
235 ftrace_update_func = ip; update_ftrace_func()
242 ret = ftrace_modify_code(ip, old, new); update_ftrace_func()
251 unsigned long ip = (unsigned long)(&ftrace_call); ftrace_update_ftrace_func() local
255 new = ftrace_call_replace(ip, (unsigned long)func); ftrace_update_ftrace_func()
256 ret = update_ftrace_func(ip, new); ftrace_update_ftrace_func()
260 ip = (unsigned long)(&ftrace_regs_call); ftrace_update_ftrace_func()
261 new = ftrace_call_replace(ip, (unsigned long)func); ftrace_update_ftrace_func()
262 ret = update_ftrace_func(ip, new); ftrace_update_ftrace_func()
268 static int is_ftrace_caller(unsigned long ip) is_ftrace_caller() argument
270 if (ip == ftrace_update_func) is_ftrace_caller()
285 unsigned long ip; ftrace_int3_handler() local
290 ip = regs->ip - 1; ftrace_int3_handler()
291 if (!ftrace_location(ip) && !is_ftrace_caller(ip)) ftrace_int3_handler()
294 regs->ip += MCOUNT_INSN_SIZE - 1; ftrace_int3_handler()
299 static int ftrace_write(unsigned long ip, const char *val, int size) ftrace_write() argument
301 ip = text_ip_addr(ip); ftrace_write()
303 if (probe_kernel_write((void *)ip, val, size)) ftrace_write()
309 static int add_break(unsigned long ip, const char *old) add_break() argument
314 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) add_break()
321 return ftrace_write(ip, &brk, 1); add_break()
327 unsigned long ip = rec->ip; add_brk_on_call() local
329 old = ftrace_call_replace(ip, addr); add_brk_on_call()
331 return add_break(rec->ip, old); add_brk_on_call()
341 return add_break(rec->ip, old); add_brk_on_nop()
383 unsigned long ip = rec->ip; remove_breakpoint() local
386 if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE)) remove_breakpoint()
407 nop = ftrace_call_replace(ip, ftrace_addr); remove_breakpoint()
414 nop = ftrace_call_replace(ip, ftrace_addr); remove_breakpoint()
421 return ftrace_write(ip, nop, 1); remove_breakpoint()
424 static int add_update_code(unsigned long ip, unsigned const char *new) add_update_code() argument
427 ip++; add_update_code()
429 return ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1); add_update_code()
434 unsigned long ip = rec->ip; add_update_call() local
437 new = ftrace_call_replace(ip, addr); add_update_call()
438 return add_update_code(ip, new); add_update_call()
443 unsigned long ip = rec->ip; add_update_nop() local
447 return add_update_code(ip, new); add_update_nop()
478 unsigned long ip = rec->ip; finish_update_call() local
481 new = ftrace_call_replace(ip, addr); finish_update_call()
483 return ftrace_write(ip, new, 1); finish_update_call()
488 unsigned long ip = rec->ip; finish_update_nop() local
493 return ftrace_write(ip, new, 1); finish_update_nop()
604 ftrace_modify_code(unsigned long ip, unsigned const char *old_code, ftrace_modify_code() argument
609 ret = add_break(ip, old_code); ftrace_modify_code()
615 ret = add_update_code(ip, new_code); ftrace_modify_code()
621 ret = ftrace_write(ip, new_code, 1); ftrace_modify_code()
633 if (ftrace_write(ip, old_code, 1)) ftrace_modify_code()
654 static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) ftrace_jmp_replace() argument
660 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); ftrace_jmp_replace()
728 unsigned long ip; create_trampoline() local
766 ip = (unsigned long)trampoline + size; create_trampoline()
769 jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return); create_trampoline()
828 unsigned long ip; arch_ftrace_update_trampoline() local
847 ip = ops->trampoline + offset; arch_ftrace_update_trampoline()
852 new = ftrace_call_replace(ip, (unsigned long)func); arch_ftrace_update_trampoline()
853 ret = update_ftrace_func(ip, new); arch_ftrace_update_trampoline()
942 static int ftrace_mod_jmp(unsigned long ip, void *func) ftrace_mod_jmp() argument
946 new = ftrace_jmp_replace(ip, (unsigned long)func); ftrace_mod_jmp()
948 return update_ftrace_func(ip, new); ftrace_mod_jmp()
953 unsigned long ip = (unsigned long)(&ftrace_graph_call); ftrace_enable_ftrace_graph_caller() local
955 return ftrace_mod_jmp(ip, &ftrace_graph_caller); ftrace_enable_ftrace_graph_caller()
960 unsigned long ip = (unsigned long)(&ftrace_graph_call); ftrace_disable_ftrace_graph_caller() local
962 return ftrace_mod_jmp(ip, &ftrace_stub); ftrace_disable_ftrace_graph_caller()
H A Djump_label.c27 static void bug_at(unsigned char *ip, int line) bug_at() argument
35 ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line); bug_at()
H A Ddumpstack_64.c317 u8 *ip; show_regs() local
325 ip = (u8 *)regs->ip - code_prologue; show_regs()
326 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { show_regs()
328 ip = (u8 *)regs->ip; show_regs()
331 for (i = 0; i < code_len; i++, ip++) { show_regs()
332 if (ip < (u8 *)PAGE_OFFSET || show_regs()
333 probe_kernel_address(ip, c)) { show_regs()
337 if (ip == (u8 *)regs->ip) show_regs()
346 int is_valid_bugaddr(unsigned long ip) is_valid_bugaddr() argument
350 if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2))) is_valid_bugaddr()
/linux-4.4.14/arch/x86/kernel/kprobes/
H A Dftrace.c33 * Emulate singlestep (and also recover regs->ip) __skip_singlestep()
36 regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; __skip_singlestep()
43 regs->ip = orig_ip; __skip_singlestep()
58 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, kprobe_ftrace_handler() argument
68 p = get_kprobe((kprobe_opcode_t *)ip); kprobe_ftrace_handler()
76 unsigned long orig_ip = regs->ip; kprobe_ftrace_handler()
77 /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ kprobe_ftrace_handler()
78 regs->ip = ip + sizeof(kprobe_opcode_t); kprobe_ftrace_handler()
85 * If pre_handler returns !0, it sets regs->ip and kprobe_ftrace_handler()
H A Dcommon.h8 /* Skip cs, ip, orig_ax. */ \
41 /* Skip orig_ax, ip, cs */ \
45 /* Skip cs, ip, orig_ax and gs. */ \
65 /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
/linux-4.4.14/arch/mips/kernel/
H A Dftrace.c47 static inline int in_kernel_space(unsigned long ip) in_kernel_space() argument
49 if (ip >= (unsigned long)_stext && in_kernel_space()
50 ip <= (unsigned long)_etext) in_kernel_space()
57 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
90 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) ftrace_modify_code() argument
95 /* *(unsigned int *)ip = new_code; */ ftrace_modify_code()
96 safe_store_code(new_code, ip, faulted); ftrace_modify_code()
103 flush_icache_range(ip, ip + 8); ftrace_modify_code()
110 static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, ftrace_modify_code_2() argument
116 safe_store_code(new_code1, ip, faulted); ftrace_modify_code_2()
120 ip += 4; ftrace_modify_code_2()
121 safe_store_code(new_code2, ip, faulted); ftrace_modify_code_2()
125 ip -= 4; ftrace_modify_code_2()
128 flush_icache_range(ip, ip + 8); ftrace_modify_code_2()
134 static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, ftrace_modify_code_2r() argument
140 ip += 4; ftrace_modify_code_2r()
141 safe_store_code(new_code2, ip, faulted); ftrace_modify_code_2r()
145 ip -= 4; ftrace_modify_code_2r()
146 safe_store_code(new_code1, ip, faulted); ftrace_modify_code_2r()
152 flush_icache_range(ip, ip + 8); ftrace_modify_code_2r()
195 unsigned long ip = rec->ip; ftrace_make_nop() local
198 * If ip is in kernel space, no long call, otherwise, long call is ftrace_make_nop()
201 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; ftrace_make_nop()
203 return ftrace_modify_code(ip, new); ftrace_make_nop()
212 return ftrace_modify_code_2(ip, new, INSN_NOP); ftrace_make_nop()
219 unsigned long ip = rec->ip; ftrace_make_call() local
221 new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; ftrace_make_call()
224 return ftrace_modify_code(ip, new); ftrace_make_call()
226 return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? ftrace_make_call()
283 unsigned long sp, ip, tmp; ftrace_get_parent_ra_addr() local
288 * For module, move the ip from the return address after the ftrace_get_parent_ra_addr()
292 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); ftrace_get_parent_ra_addr()
299 /* get the code at "ip": code = *(unsigned int *)ip; */ ftrace_get_parent_ra_addr()
300 safe_load_code(code, ip, faulted); ftrace_get_parent_ra_addr()
313 ip -= 4; ftrace_get_parent_ra_addr()
392 * Get the recorded ip of the current mcount calling site in the prepare_ftrace_return()
H A Dprocess.c194 static inline int is_ra_save_ins(union mips_instruction *ip) is_ra_save_ins() argument
208 if (mm_insn_16bit(ip->halfword[0])) { is_ra_save_ins()
209 mmi.word = (ip->halfword[0] << 16); is_ra_save_ins()
216 mmi.halfword[0] = ip->halfword[1]; is_ra_save_ins()
217 mmi.halfword[1] = ip->halfword[0]; is_ra_save_ins()
228 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && is_ra_save_ins()
229 ip->i_format.rs == 29 && is_ra_save_ins()
230 ip->i_format.rt == 31; is_ra_save_ins()
234 static inline int is_jump_ins(union mips_instruction *ip) is_jump_ins() argument
247 mmi.word = (ip->halfword[0] << 16); is_jump_ins()
251 ip->j_format.opcode == mm_jal32_op) is_jump_ins()
253 if (ip->r_format.opcode != mm_pool32a_op || is_jump_ins()
254 ip->r_format.func != mm_pool32axf_op) is_jump_ins()
256 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op; is_jump_ins()
258 if (ip->j_format.opcode == j_op) is_jump_ins()
260 if (ip->j_format.opcode == jal_op) is_jump_ins()
262 if (ip->r_format.opcode != spec_op) is_jump_ins()
264 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; is_jump_ins()
268 static inline int is_sp_move_ins(union mips_instruction *ip) is_sp_move_ins() argument
279 if (mm_insn_16bit(ip->halfword[0])) { is_sp_move_ins()
282 mmi.word = (ip->halfword[0] << 16); is_sp_move_ins()
288 return ip->mm_i_format.opcode == mm_addiu32_op && is_sp_move_ins()
289 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; is_sp_move_ins()
292 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) is_sp_move_ins()
294 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) is_sp_move_ins()
303 union mips_instruction *ip = (void *) (((char *) info->func) - 1); get_frame_info() local
305 union mips_instruction *ip = info->func; get_frame_info()
313 if (!ip) get_frame_info()
320 for (i = 0; i < max_insns; i++, ip++) { get_frame_info()
322 if (is_jump_ins(ip)) get_frame_info()
325 if (is_sp_move_ins(ip)) get_frame_info()
328 if (mm_insn_16bit(ip->halfword[0])) get_frame_info()
332 if (ip->halfword[0] & mm_addiusp_func) get_frame_info()
334 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2); get_frame_info()
337 tmp = (ip->halfword[0] >> 1); get_frame_info()
340 ip = (void *) &ip->halfword[1]; get_frame_info()
341 ip--; get_frame_info()
344 info->frame_size = - ip->i_format.simmediate; get_frame_info()
348 if (info->pc_offset == -1 && is_ra_save_ins(ip)) { get_frame_info()
350 ip->i_format.simmediate / sizeof(long); get_frame_info()
373 union mips_instruction *ip = (void *)schedule; get___schedule_addr() local
377 for (i = 0; i < max_insns; i++, ip++) { get___schedule_addr()
378 if (ip->j_format.opcode == j_op) get___schedule_addr()
379 return J_TARGET(ip, ip->j_format.target); get___schedule_addr()
/linux-4.4.14/arch/powerpc/kernel/
H A Dftrace.c32 ftrace_call_replace(unsigned long ip, unsigned long addr, int link) ftrace_call_replace() argument
39 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); ftrace_call_replace()
45 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) ftrace_modify_code() argument
60 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) ftrace_modify_code()
68 if (patch_instruction((unsigned int *)ip, new)) ftrace_modify_code()
77 static int test_24bit_addr(unsigned long ip, unsigned long addr) test_24bit_addr() argument
82 return create_branch((unsigned int *)ip, addr, 0); test_24bit_addr()
92 static unsigned long find_bl_target(unsigned long ip, unsigned int op) find_bl_target() argument
101 return ip + (long)offset; find_bl_target()
111 unsigned long ip = rec->ip; __ftrace_make_nop() local
115 if (probe_kernel_read(&op, (void *)ip, sizeof(int))) __ftrace_make_nop()
125 tramp = (void *)find_bl_target(ip, op); __ftrace_make_nop()
127 pr_devel("ip:%lx jumps to %p", ip, tramp); __ftrace_make_nop()
163 if (patch_instruction((unsigned int *)ip, op)) __ftrace_make_nop()
176 unsigned long ip = rec->ip; __ftrace_make_nop() local
179 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) __ftrace_make_nop()
189 tramp = find_bl_target(ip, op); __ftrace_make_nop()
199 pr_devel("ip:%lx jumps to %lx", ip, tramp); __ftrace_make_nop()
233 if (patch_instruction((unsigned int *)ip, op)) __ftrace_make_nop()
244 unsigned long ip = rec->ip; ftrace_make_nop() local
252 if (test_24bit_addr(ip, addr)) { ftrace_make_nop()
254 old = ftrace_call_replace(ip, addr, 1); ftrace_make_nop()
256 return ftrace_modify_code(ip, old, new); ftrace_make_nop()
294 void *ip = (void *)rec->ip; __ftrace_make_call() local
297 if (probe_kernel_read(op, ip, sizeof(op))) __ftrace_make_call()
321 if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { __ftrace_make_call()
326 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { __ftrace_make_call()
338 unsigned long ip = rec->ip; __ftrace_make_call() local
341 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) __ftrace_make_call()
357 op = create_branch((unsigned int *)ip, __ftrace_make_call()
364 pr_devel("write to %lx\n", rec->ip); __ftrace_make_call()
366 if (patch_instruction((unsigned int *)ip, op)) __ftrace_make_call()
376 unsigned long ip = rec->ip; ftrace_make_call() local
384 if (test_24bit_addr(ip, addr)) { ftrace_make_call()
387 new = ftrace_call_replace(ip, addr, 1); ftrace_make_call()
388 return ftrace_modify_code(ip, old, new); ftrace_make_call()
411 unsigned long ip = (unsigned long)(&ftrace_call); ftrace_update_ftrace_func() local
416 new = ftrace_call_replace(ip, (unsigned long)func, 1); ftrace_update_ftrace_func()
417 ret = ftrace_modify_code(ip, old, new); ftrace_update_ftrace_func()
488 unsigned long ip = (unsigned long)(&ftrace_graph_call); ftrace_enable_ftrace_graph_caller() local
493 old = ftrace_call_replace(ip, stub, 0); ftrace_enable_ftrace_graph_caller()
494 new = ftrace_call_replace(ip, addr, 0); ftrace_enable_ftrace_graph_caller()
496 return ftrace_modify_code(ip, old, new); ftrace_enable_ftrace_graph_caller()
501 unsigned long ip = (unsigned long)(&ftrace_graph_call); ftrace_disable_ftrace_graph_caller() local
506 old = ftrace_call_replace(ip, addr, 0); ftrace_disable_ftrace_graph_caller()
507 new = ftrace_call_replace(ip, stub, 0); ftrace_disable_ftrace_graph_caller()
509 return ftrace_modify_code(ip, old, new); ftrace_disable_ftrace_graph_caller()
517 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) prepare_ftrace_return() argument
530 trace.func = ip; prepare_ftrace_return()
537 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) prepare_ftrace_return()
H A Dstacktrace.c27 unsigned long newsp, ip; save_context_stack() local
33 ip = stack[STACK_FRAME_LR_SAVE]; save_context_stack()
35 if (savesched || !in_sched_functions(ip)) { save_context_stack()
37 trace->entries[trace->nr_entries++] = ip; save_context_stack()
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dmmap.c66 struct hfi1_mmap_info *ip = hfi1_release_mmap_info() local
68 struct hfi1_ibdev *dev = to_idev(ip->context->device); hfi1_release_mmap_info()
71 list_del(&ip->pending_mmaps); hfi1_release_mmap_info()
74 vfree(ip->obj); hfi1_release_mmap_info()
75 kfree(ip); hfi1_release_mmap_info()
84 struct hfi1_mmap_info *ip = vma->vm_private_data; hfi1_vma_open() local
86 kref_get(&ip->ref); hfi1_vma_open()
91 struct hfi1_mmap_info *ip = vma->vm_private_data; hfi1_vma_close() local
93 kref_put(&ip->ref, hfi1_release_mmap_info); hfi1_vma_close()
112 struct hfi1_mmap_info *ip, *pp; hfi1_mmap() local
121 list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, hfi1_mmap()
124 if (context != ip->context || (__u64) offset != ip->offset) hfi1_mmap()
127 if (size > ip->size) hfi1_mmap()
130 list_del_init(&ip->pending_mmaps); hfi1_mmap()
133 ret = remap_vmalloc_range(vma, ip->obj, 0); hfi1_mmap()
137 vma->vm_private_data = ip; hfi1_mmap()
153 struct hfi1_mmap_info *ip; hfi1_create_mmap_info() local
155 ip = kmalloc(sizeof(*ip), GFP_KERNEL); hfi1_create_mmap_info()
156 if (!ip) hfi1_create_mmap_info()
164 ip->offset = dev->mmap_offset; hfi1_create_mmap_info()
168 INIT_LIST_HEAD(&ip->pending_mmaps); hfi1_create_mmap_info()
169 ip->size = size; hfi1_create_mmap_info()
170 ip->context = context; hfi1_create_mmap_info()
171 ip->obj = obj; hfi1_create_mmap_info()
172 kref_init(&ip->ref); hfi1_create_mmap_info()
175 return ip; hfi1_create_mmap_info()
178 void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip, hfi1_update_mmap_info() argument
186 ip->offset = dev->mmap_offset; hfi1_update_mmap_info()
190 ip->size = size; hfi1_update_mmap_info()
191 ip->obj = obj; hfi1_update_mmap_info()
H A Dsrq.c167 srq->ip = hfi1_create_srq()
170 if (!srq->ip) { hfi1_create_srq()
175 err = ib_copy_to_udata(udata, &srq->ip->offset, hfi1_create_srq()
176 sizeof(srq->ip->offset)); hfi1_create_srq()
182 srq->ip = NULL; hfi1_create_srq()
202 if (srq->ip) { hfi1_create_srq()
204 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); hfi1_create_srq()
212 kfree(srq->ip); hfi1_create_srq()
322 if (srq->ip) { hfi1_modify_srq()
323 struct hfi1_mmap_info *ip = srq->ip; hfi1_modify_srq() local
327 hfi1_update_mmap_info(dev, ip, s, wq); hfi1_modify_srq()
334 ret = ib_copy_to_udata(udata, &ip->offset, hfi1_modify_srq()
335 sizeof(ip->offset)); hfi1_modify_srq()
345 if (list_empty(&ip->pending_mmaps)) hfi1_modify_srq()
346 list_add(&ip->pending_mmaps, hfi1_modify_srq()
390 if (srq->ip) hfi1_destroy_srq()
391 kref_put(&srq->ip->ref, hfi1_release_mmap_info); hfi1_destroy_srq()
/linux-4.4.14/include/uapi/linux/netfilter_bridge/
H A Debt_log.h6 #define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
H A Debt_ip.h10 * added ip-sport and ip-dport
28 #define EBT_IP_MATCH "ip"
/linux-4.4.14/arch/sparc/include/asm/
H A Dperf_event.h7 #define perf_arch_fetch_caller_regs(regs, ip) \
22 (regs)->tpc = (ip); \
/linux-4.4.14/tools/lib/lockdep/uinclude/linux/
H A Dkallsyms.h21 static inline void print_ip_sym(unsigned long ip) print_ip_sym() argument
25 name = backtrace_symbols((void **)&ip, 1); print_ip_sym()
/linux-4.4.14/arch/x86/include/asm/trace/
H A Dexceptions.h21 __field( unsigned long, ip )
27 __entry->ip = regs->ip;
31 TP_printk("address=%pf ip=%pf error_code=0x%lx",
32 (void *)__entry->address, (void *)__entry->ip,
/linux-4.4.14/arch/mips/rb532/
H A Dirq.c92 static inline void enable_local_irq(unsigned int ip) enable_local_irq() argument
94 int ipnum = 0x100 << ip; enable_local_irq()
99 static inline void disable_local_irq(unsigned int ip) disable_local_irq() argument
101 int ipnum = 0x100 << ip; disable_local_irq()
106 static inline void ack_local_irq(unsigned int ip) ack_local_irq() argument
108 int ipnum = 0x100 << ip; ack_local_irq()
116 int ip = irq_nr - GROUP0_IRQ_BASE; rb532_enable_irq() local
119 if (ip < 0) rb532_enable_irq()
122 group = ip >> 5; rb532_enable_irq()
124 ip &= (1 << 5) - 1; rb532_enable_irq()
125 intr_bit = 1 << ip; rb532_enable_irq()
137 int ip = irq_nr - GROUP0_IRQ_BASE; rb532_disable_irq() local
140 if (ip < 0) { rb532_disable_irq()
143 group = ip >> 5; rb532_disable_irq()
145 ip &= (1 << 5) - 1; rb532_disable_irq()
146 intr_bit = 1 << ip; rb532_disable_irq()
216 unsigned int ip, pend, group; plat_irq_dispatch() local
223 ip = (cp0_cause & 0x7c00); plat_irq_dispatch()
224 if (ip) { plat_irq_dispatch()
225 group = 21 + (fls(ip) - 32); plat_irq_dispatch()
/linux-4.4.14/arch/microblaze/kernel/
H A Dftrace.c146 imm = *(unsigned int *)rec->ip; ftrace_make_nop()
149 bralid = *(unsigned int *)(rec->ip + 4); ftrace_make_nop()
155 ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); ftrace_make_nop()
156 ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); ftrace_make_nop()
158 ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); ftrace_make_nop()
167 pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", ftrace_make_call()
168 __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ftrace_make_call()
169 ret = ftrace_modify_code(rec->ip, imm); ftrace_make_call()
172 ret += ftrace_modify_code(rec->ip + 4, bralid); ftrace_make_call()
184 unsigned long ip = (unsigned long)(&ftrace_call); ftrace_update_ftrace_func() local
193 pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", ftrace_update_ftrace_func()
194 __func__, (unsigned int)func, (unsigned int)ip, upper, lower); ftrace_update_ftrace_func()
197 ret = ftrace_modify_code(ip, upper); ftrace_update_ftrace_func()
198 ret += ftrace_modify_code(ip + 4, lower); ftrace_update_ftrace_func()
213 unsigned long ip = (unsigned long)(&ftrace_call_graph); ftrace_enable_ftrace_graph_caller() local
215 old_jump = *(unsigned int *)ip; /* save jump over instruction */ ftrace_enable_ftrace_graph_caller()
216 ret = ftrace_modify_code(ip, MICROBLAZE_NOP); ftrace_enable_ftrace_graph_caller()
225 unsigned long ip = (unsigned long)(&ftrace_call_graph); ftrace_disable_ftrace_graph_caller() local
227 ret = ftrace_modify_code(ip, old_jump); ftrace_disable_ftrace_graph_caller()
/linux-4.4.14/arch/sh/kernel/
H A Dftrace.c40 * 8c01106c: 68 24 .word 0x2468 <--- ip
42 * 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE
47 static unsigned char *ftrace_nop_replace(unsigned long ip) ftrace_nop_replace() argument
49 __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop); ftrace_nop_replace()
53 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) ftrace_call_replace() argument
185 do_ftrace_mod_code(unsigned long ip, void *new_code) do_ftrace_mod_code() argument
187 mod_code_ip = (void *)ip; do_ftrace_mod_code()
209 static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, ftrace_modify_code() argument
225 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) ftrace_modify_code()
233 if (do_ftrace_mod_code(ip, new_code)) ftrace_modify_code()
236 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); ftrace_modify_code()
243 unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET; ftrace_update_ftrace_func() local
246 memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE); ftrace_update_ftrace_func()
247 new = ftrace_call_replace(ip, (unsigned long)func); ftrace_update_ftrace_func()
249 return ftrace_modify_code(ip, old, new); ftrace_update_ftrace_func()
256 unsigned long ip = rec->ip; ftrace_make_nop() local
258 old = ftrace_call_replace(ip, addr); ftrace_make_nop()
259 new = ftrace_nop_replace(ip); ftrace_make_nop()
261 return ftrace_modify_code(rec->ip, old, new); ftrace_make_nop()
267 unsigned long ip = rec->ip; ftrace_make_call() local
269 old = ftrace_nop_replace(ip); ftrace_make_call()
270 new = ftrace_call_replace(ip, addr); ftrace_make_call()
272 return ftrace_modify_code(rec->ip, old, new); ftrace_make_call()
285 static int ftrace_mod(unsigned long ip, unsigned long old_addr, ftrace_mod() argument
290 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) ftrace_mod()
296 __raw_writel(new_addr, ip); ftrace_mod()
302 unsigned long ip, old_addr, new_addr; ftrace_enable_ftrace_graph_caller() local
304 ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; ftrace_enable_ftrace_graph_caller()
308 return ftrace_mod(ip, old_addr, new_addr); ftrace_enable_ftrace_graph_caller()
313 unsigned long ip, old_addr, new_addr; ftrace_disable_ftrace_graph_caller() local
315 ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; ftrace_disable_ftrace_graph_caller()
319 return ftrace_mod(ip, old_addr, new_addr); ftrace_disable_ftrace_graph_caller()
/linux-4.4.14/fs/isofs/
H A Djoliet.c19 __be16 *ip, ch; uni16_to_x8() local
22 ip = uni; uni16_to_x8()
25 while ((ch = get_unaligned(ip)) && len) { uni16_to_x8()
32 ip++; uni16_to_x8()
/linux-4.4.14/tools/build/feature/
H A Dtest-libunwind-debug-frame.c6 unw_word_t ip, unw_word_t segbase,
H A Dtest-libunwind.c5 unw_word_t ip,
/linux-4.4.14/tools/perf/util/
H A Dunwind-libdw.c23 static int __report_module(struct addr_location *al, u64 ip, __report_module() argument
31 MAP__FUNCTION, ip, al); __report_module()
39 mod = dwfl_addrmodule(ui->dwfl, ip); __report_module()
45 return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1; __report_module()
48 static int report_module(u64 ip, struct unwind_info *ui) report_module() argument
52 return __report_module(&al, ip, ui); report_module()
55 static int entry(u64 ip, struct unwind_info *ui) entry() argument
61 if (__report_module(&al, ip, ui)) entry()
64 e.ip = ip; entry()
68 pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", entry()
70 ip, entry()
71 al.map ? al.map->map_ip(al.map, ip) : (u64) 0); entry()
179 Dwarf_Word ip; unwind__get_entries() local
189 err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP); unwind__get_entries()
193 err = report_module(ip, &ui); unwind__get_entries()
H A Dunwind-libunwind.c38 unw_word_t ip,
47 unw_word_t ip,
316 static struct map *find_map(unw_word_t ip, struct unwind_info *ui) find_map() argument
321 MAP__FUNCTION, ip, &al); find_map()
326 find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, find_proc_info() argument
335 map = find_map(ip, ui); find_proc_info()
352 ret = dwarf_search_unwind_table(as, ip, &di, pi, find_proc_info()
371 if (dwarf_find_debug_frame(0, &di, ip, base, symfile, find_proc_info()
373 return dwarf_search_unwind_table(as, ip, &di, pi, find_proc_info()
524 static int entry(u64 ip, struct thread *thread, entry() argument
531 MAP__FUNCTION, ip, &al); entry()
533 e.ip = ip; entry()
537 pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", entry()
539 ip, entry()
540 al.map ? al.map->map_ip(al.map, ip) : (u64) 0); entry()
630 unw_word_t ip; get_entries() local
632 unw_get_reg(&c, UNW_REG_IP, &ip); get_entries()
633 ret = ip ? entry(ip, ui->thread, cb, arg) : 0; get_entries()
643 u64 ip; unwind__get_entries() local
654 ret = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP); unwind__get_entries()
658 ret = entry(ip, thread, cb, arg); unwind__get_entries()
/linux-4.4.14/arch/ia64/include/asm/
H A Dftrace.h12 #define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
13 #define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
/linux-4.4.14/drivers/scsi/
H A Dscsicam.c58 * @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
61 * SCSI-CAM system, storing the results in ip as required
67 int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip) scsicam_bios_param() argument
78 ret = scsi_partsize(p, (unsigned long)capacity, (unsigned int *)ip + 2, scsicam_bios_param()
79 (unsigned int *)ip + 0, (unsigned int *)ip + 1); scsicam_bios_param()
86 ret = setsize((unsigned long)capacity, (unsigned int *)ip + 2, scsicam_bios_param()
87 (unsigned int *)ip + 0, (unsigned int *)ip + 1); scsicam_bios_param()
92 if (ret || ip[0] > 255 || ip[1] > 63) { scsicam_bios_param()
94 ip[0] = 255; scsicam_bios_param()
95 ip[1] = 63; scsicam_bios_param()
97 ip[0] = 64; scsicam_bios_param()
98 ip[1] = 32; scsicam_bios_param()
102 ip[2] = 65535; scsicam_bios_param()
104 ip[2] = (unsigned long)capacity / (ip[0] * ip[1]); scsicam_bios_param()
/linux-4.4.14/arch/unicore32/mm/
H A Dcache-ucv2.S72 2: mov ip, #0
73 movc p0.c5, ip, #14 @ Dcache flush all
76 3: mov ip, #0
77 movc p0.c5, ip, #20 @ Icache invalidate all
122 2: mov ip, #0
123 movc p0.c5, ip, #10 @ Dcache clean all
126 3: mov ip, #0
127 movc p0.c5, ip, #20 @ Icache invalidate all
139 mov ip, #0
140 movc p0.c5, ip, #14 @ Dcache flush all
175 2: mov ip, #0
176 movc p0.c5, ip, #10 @ Dcache clean all
207 2: mov ip, #0
208 movc p0.c5, ip, #14 @ Dcache flush all
/linux-4.4.14/tools/testing/selftests/x86/
H A Dunwind_vdso.c92 unsigned long ip; /* trap source */ member in struct:unwind_state
99 unsigned long ip = _Unwind_GetIP(ctx); trace_fn() local
102 if (ip == state->ip) trace_fn()
107 printf("\t 0x%lx\n", ip); trace_fn()
109 if (ip == return_address) { trace_fn()
139 unsigned long ip = ctx->uc_mcontext.gregs[REG_EIP]; sigtrap() local
141 if (!got_sysinfo && ip == sysinfo) { sigtrap()
148 ip, return_address); sigtrap()
154 if (ip == return_address) { sigtrap()
160 printf("\tSIGTRAP at 0x%lx\n", ip); sigtrap()
162 state.ip = ip; sigtrap()
/linux-4.4.14/net/netfilter/
H A Dxt_iprange.c14 #include <linux/ip.h>
27 m = ntohl(iph->saddr) < ntohl(info->src_min.ip); iprange_mt4()
28 m |= ntohl(iph->saddr) > ntohl(info->src_max.ip); iprange_mt4()
34 &info->src_min.ip, iprange_mt4()
35 &info->src_max.ip); iprange_mt4()
40 m = ntohl(iph->daddr) < ntohl(info->dst_min.ip); iprange_mt4()
41 m |= ntohl(iph->daddr) > ntohl(info->dst_max.ip); iprange_mt4()
47 &info->dst_min.ip, iprange_mt4()
48 &info->dst_max.ip); iprange_mt4()
H A Dxt_HMARK.c19 #include <net/ip.h>
258 struct iphdr *ip, _ip; hmark_pkt_set_htuple_ipv4() local
261 ip = (struct iphdr *) (skb->data + nhoff); hmark_pkt_set_htuple_ipv4()
262 if (ip->protocol == IPPROTO_ICMP) { hmark_pkt_set_htuple_ipv4()
264 if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) { hmark_pkt_set_htuple_ipv4()
265 ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip); hmark_pkt_set_htuple_ipv4()
266 if (ip == NULL) hmark_pkt_set_htuple_ipv4()
271 t->src = ip->saddr & info->src_mask.ip; hmark_pkt_set_htuple_ipv4()
272 t->dst = ip->daddr & info->dst_mask.ip; hmark_pkt_set_htuple_ipv4()
277 t->proto = ip->protocol; hmark_pkt_set_htuple_ipv4()
284 if (ip->frag_off & htons(IP_MF | IP_OFFSET)) hmark_pkt_set_htuple_ipv4()
287 hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info); hmark_pkt_set_htuple_ipv4()
H A Dxt_ecn.c13 #include <linux/ip.h>
14 #include <net/ip.h>
90 const struct ipt_ip *ip = par->entryinfo; ecn_mt_check4() local
99 (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { ecn_mt_check4()
132 const struct ip6t_ip6 *ip = par->entryinfo; ecn_mt_check6() local
141 (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { ecn_mt_check6()
/linux-4.4.14/tools/perf/tests/
H A Dhists_link.c15 u64 ip; member in struct:sample
24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
26 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
28 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
30 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
32 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
38 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
40 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
42 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
44 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
46 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
50 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
52 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
54 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
56 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
58 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
87 sample.ip = fake_common_samples[k].ip; evlist__for_each()
113 sample.ip = fake_samples[i][k].ip; evlist__for_each()
H A Dhists_filter.c15 u64 ip; member in struct:sample
25 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, .socket = 0 },
27 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, .socket = 0 },
29 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, .socket = 0 },
31 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, .socket = 0 }, /* will be merged */
33 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, .socket = 1 },
35 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 1 },
37 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, .socket = 2 },
39 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, .socket = 2 },
41 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, .socket = 3 },
43 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 3 },
81 sample.ip = fake_samples[i].ip; evlist__for_each()
/linux-4.4.14/arch/s390/include/asm/
H A Dftrace.h71 unsigned long ip) ftrace_generate_call_insn()
77 target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR; ftrace_generate_call_insn()
79 insn->disp = (target - ip) / 2; ftrace_generate_call_insn()
70 ftrace_generate_call_insn(struct ftrace_insn *insn, unsigned long ip) ftrace_generate_call_insn() argument
H A Dlivepatch.h35 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) klp_arch_set_pc() argument
37 regs->psw.addr = ip; klp_arch_set_pc()
/linux-4.4.14/tools/lib/lockdep/include/liblockdep/
H A Dcommon.h35 unsigned long ip; member in struct:lockdep_map
43 struct lockdep_map *nest_lock, unsigned long ip);
45 unsigned long ip);
/linux-4.4.14/arch/c6x/kernel/
H A Dmodule.c17 static inline int fixup_pcr(u32 *ip, Elf32_Addr dest, u32 maskbits, int shift) fixup_pcr() argument
20 long ep = (long)ip & ~31; fixup_pcr()
26 opcode = *ip; fixup_pcr()
29 *ip = opcode; fixup_pcr()
32 maskbits, ip, (void *)dest, opcode); fixup_pcr()
37 maskbits, ip, (void *)dest); fixup_pcr()
/linux-4.4.14/arch/parisc/kernel/
H A Dunwind.c233 e = find_unwind_entry(info->ip); unwind_frame_regs()
237 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); unwind_frame_regs()
245 kallsyms_lookup(info->ip, NULL, NULL, &modname, unwind_frame_regs()
248 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname); unwind_frame_regs()
255 info->ip, info->prev_sp, unwind_frame_regs()
288 "prev_sp=%lx prev_ip=%lx\n", info->ip, unwind_frame_regs()
301 npc < info->ip; unwind_frame_regs()
312 "%lx, frame_size = %ld\n", info->ip, unwind_frame_regs()
319 "%lx, frame_size = %ld\n", info->ip, unwind_frame_regs()
326 "-20(sp) @ %lx\n", info->ip, npc); unwind_frame_regs()
332 "-16(sp) @ %lx\n", info->ip, npc); unwind_frame_regs()
347 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, unwind_frame_regs()
358 info->ip = regs->iaoq[0]; unwind_frame_init()
362 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", unwind_frame_init()
363 t ? (int)t->pid : -1, info->sp, info->ip); unwind_frame_init()
395 next_frame->ip = next_frame->prev_ip; unwind_once()
399 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", unwind_once()
401 next_frame->sp, next_frame->ip); unwind_once()
412 } while (!ret && !(info->ip & 3)); unwind_to_user()
434 if (unwind_once(&info) < 0 || info.ip == 0) return_address()
436 if (!kernel_text_address(info.ip)) return_address()
438 } while (info.ip && level--); return_address()
440 return info.ip; return_address()
/linux-4.4.14/arch/metag/kernel/
H A Dftrace.c100 unsigned long ip = rec->ip; ftrace_make_nop() local
102 old = ftrace_call_replace(ip, addr); ftrace_make_nop()
105 return ftrace_modify_code(ip, old, new); ftrace_make_nop()
111 unsigned long ip = rec->ip; ftrace_make_call() local
114 new = ftrace_call_replace(ip, addr); ftrace_make_call()
116 return ftrace_modify_code(ip, old, new); ftrace_make_call()
/linux-4.4.14/arch/arm/kernel/
H A Dentry-v7m.S102 add ip, r1, #TI_CPU_SAVE
103 stmia ip!, {r4 - r11} @ Store most regs on stack
104 str sp, [ip], #4
105 str lr, [ip], #4
111 mov ip, r4
113 ldmia ip!, {r4 - r11} @ Load all regs saved previously
114 ldr sp, [ip]
115 ldr pc, [ip, #4]!
H A Dftrace.c157 unsigned long ip = rec->ip; ftrace_make_call() local
160 new = ftrace_call_replace(ip, adjust_address(rec, addr)); ftrace_make_call()
162 return ftrace_modify_code(rec->ip, old, new, true); ftrace_make_call()
168 unsigned long ip = rec->ip; ftrace_make_nop() local
173 old = ftrace_call_replace(ip, adjust_address(rec, addr)); ftrace_make_nop()
175 ret = ftrace_modify_code(ip, old, new, true); ftrace_make_nop()
181 old = ftrace_call_replace(ip, adjust_address(rec, addr)); ftrace_make_nop()
183 ret = ftrace_modify_code(ip, old, new, true); ftrace_make_nop()
/linux-4.4.14/arch/s390/kernel/
H A Dftrace.c109 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) ftrace_make_nop()
127 ftrace_generate_call_insn(&orig, rec->ip); ftrace_make_nop()
133 s390_kernel_write((void *) rec->ip, &new, sizeof(new)); ftrace_make_nop()
141 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) ftrace_make_call()
156 ftrace_generate_call_insn(&new, rec->ip); ftrace_make_call()
161 s390_kernel_write((void *) rec->ip, &new, sizeof(new)); ftrace_make_call()
177 unsigned int *ip; ftrace_plt_init() local
182 ip = (unsigned int *) ftrace_plt; ftrace_plt_init()
183 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ ftrace_plt_init()
184 ip[1] = 0x100a0004; ftrace_plt_init()
185 ip[2] = 0x07f10000; ftrace_plt_init()
186 ip[3] = FTRACE_ADDR >> 32; ftrace_plt_init()
187 ip[4] = FTRACE_ADDR & 0xffffffff; ftrace_plt_init()
198 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) prepare_ftrace_return() argument
206 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; prepare_ftrace_return()
207 trace.func = ip; prepare_ftrace_return()
212 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) prepare_ftrace_return()
/linux-4.4.14/arch/arm/crypto/
H A Daes-ce-core.S94 vld1.8 {q10-q11}, [ip]!
96 vld1.8 {q12-q13}, [ip]!
98 vld1.8 {q10-q11}, [ip]!
100 vld1.8 {q12-q13}, [ip]!
103 vld1.8 {q10-q11}, [ip]!
106 vld1.8 {q12-q13}, [ip]
117 * transforms. These should preserve all registers except q0 - q2 and ip
130 add ip, r2, #32 @ 3rd round key
137 add ip, r2, #32 @ 3rd round key
143 add ip, r2, #32 @ 3rd round key
149 add ip, r2, #32 @ 3rd round key
154 add ip, \rk, \rounds, lsl #4
156 vld1.8 {q14}, [ip] @ load last round key
296 rev ip, r6
299 vmov s7, ip
300 rev ip, r6
302 vmov s11, ip
309 rev ip, r6
312 vmov s27, ip
327 rev ip, r6
328 vmov s27, ip
344 vmov ip, \sreg @ load next word of ctr
345 rev ip, ip @ ... to handle the carry
346 adds ip, ip, #1
347 rev ip, ip
348 vmov \sreg, ip
389 add ip, r6, #32 @ 3rd round key of key 2
482 add ip, r2, #32 @ 3rd round key
/linux-4.4.14/drivers/net/slip/
H A Dslhc.c44 * Use ip_fast_csum from ip.h
71 #include <net/ip.h>
238 struct iphdr *ip; slhc_compress() local
250 ip = (struct iphdr *) icp; slhc_compress()
253 if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) { slhc_compress()
255 if(ip->protocol != IPPROTO_TCP) slhc_compress()
263 th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4); slhc_compress()
264 hlen = ip->ihl*4 + th->doff*4; slhc_compress()
291 if( ip->saddr == cs->cs_ip.saddr slhc_compress()
292 && ip->daddr == cs->cs_ip.daddr slhc_compress()
347 if(ip->version != cs->cs_ip.version || ip->ihl != cs->cs_ip.ihl slhc_compress()
348 || ip->tos != cs->cs_ip.tos slhc_compress()
349 || (ip->frag_off & htons(0x4000)) != (cs->cs_ip.frag_off & htons(0x4000)) slhc_compress()
350 || ip->ttl != cs->cs_ip.ttl slhc_compress()
352 || (ip->ihl > 5 && memcmp(ip+1,cs->cs_ipopt,((ip->ihl)-5)*4) != 0) slhc_compress()
399 if(ip->tot_len != cs->cs_ip.tot_len && slhc_compress()
425 deltaS = ntohs(ip->id) - ntohs(cs->cs_ip.id); slhc_compress()
436 memcpy(&cs->cs_ip,ip,20); slhc_compress()
466 * a regular ip/tcp packet but with the 'conversation id' we hope slhc_compress()
470 memcpy(&cs->cs_ip,ip,20); slhc_compress()
472 if (ip->ihl > 5) slhc_compress()
473 memcpy(cs->cs_ipopt, ip+1, ((ip->ihl) - 5) * 4); slhc_compress()
492 register struct iphdr *ip; slhc_uncompress() local
525 ip = &cs->cs_ip; slhc_uncompress()
537 hdrlen = ip->ihl * 4 + thp->doff * 4; slhc_uncompress()
543 i = ntohs(ip->tot_len) - hdrlen; slhc_uncompress()
551 ntohs(ip->tot_len) - hdrlen); slhc_uncompress()
587 ip->id = htons (ntohs (ip->id) + x); slhc_uncompress()
589 ip->id = htons (ntohs (ip->id) + 1); slhc_uncompress()
601 ip->tot_len = htons(len); slhc_uncompress()
602 ip->check = 0; slhc_uncompress()
607 memcpy(cp, ip, 20); slhc_uncompress()
610 if (ip->ihl > 5) { slhc_uncompress()
611 memcpy(cp, cs->cs_ipopt, (ip->ihl - 5) * 4); slhc_uncompress()
612 cp += (ip->ihl - 5) * 4; slhc_uncompress()
615 put_unaligned(ip_fast_csum(icp, ip->ihl), slhc_uncompress()
/linux-4.4.14/drivers/net/appletalk/
H A Dipddp.h17 __be32 ip; /* IP address */ member in struct:ipddp_route
/linux-4.4.14/kernel/
H A Dstacktrace.c32 unsigned long ip; snprint_stack_trace() local
40 ip = trace->entries[i]; snprint_stack_trace()
42 1 + spaces, ' ', (void *) ip, (void *) ip); snprint_stack_trace()
/linux-4.4.14/include/net/
H A Dtso.h4 #include <net/ip.h>
/linux-4.4.14/arch/mips/mm/
H A Duasm-mips.c185 struct insn *ip = NULL; build_insn() local
192 ip = &insn_table[i]; build_insn()
196 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) build_insn()
199 op = ip->match; build_insn()
201 if (ip->fields & RS) build_insn()
203 if (ip->fields & RT) build_insn()
205 if (ip->fields & RD) build_insn()
207 if (ip->fields & RE) build_insn()
209 if (ip->fields & SIMM) build_insn()
211 if (ip->fields & UIMM) build_insn()
213 if (ip->fields & BIMM) build_insn()
215 if (ip->fields & JIMM) build_insn()
217 if (ip->fields & FUNC) build_insn()
219 if (ip->fields & SET) build_insn()
221 if (ip->fields & SCIMM) build_insn()
223 if (ip->fields & SIMM9) build_insn()
H A Duasm-micromips.c152 struct insn *ip = NULL; build_insn() local
159 ip = &insn_table_MM[i]; build_insn()
163 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) build_insn()
166 op = ip->match; build_insn()
168 if (ip->fields & RS) { build_insn()
174 if (ip->fields & RT) { build_insn()
180 if (ip->fields & RD) build_insn()
182 if (ip->fields & RE) build_insn()
184 if (ip->fields & SIMM) build_insn()
186 if (ip->fields & UIMM) build_insn()
188 if (ip->fields & BIMM) build_insn()
190 if (ip->fields & JIMM) build_insn()
192 if (ip->fields & FUNC) build_insn()
194 if (ip->fields & SET) build_insn()
196 if (ip->fields & SCIMM) build_insn()
/linux-4.4.14/drivers/scsi/arm/
H A Dacornscsi-io.S37 ldmia r0!, {r5, r6, r7, ip}
41 orr r6, r6, ip, lsl #16
84 ldmia r1!, {r4, r6, ip, lr}
94 mov r3, ip, lsl #16
96 mov r4, ip, lsr #16
98 mov ip, lr, lsl #16
99 orr ip, ip, ip, lsr #16
102 stmia r0!, {r3, r4, ip, lr}
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/gr/
H A Dctxnv40.h80 int ip = 0; _cp_bra() local
83 ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT; _cp_bra()
84 if (ip == 0) _cp_bra()
85 ip = 0xff000000 | (name << CP_BRA_IP_SHIFT); _cp_bra()
88 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag | _cp_bra()
/linux-4.4.14/net/atm/
H A Dmpoa_proc.c246 unsigned char ip[4]; parse_qos() local
254 ip, ip+1, ip+2, ip+3) == 4) { parse_qos()
255 ipaddr = *(__be32 *)ip; parse_qos()
260 ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu) == 6) { parse_qos()
264 ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8) parse_qos()
267 ipaddr = *(__be32 *)ip; parse_qos()
/linux-4.4.14/kernel/trace/
H A Dtrace_functions.c24 function_trace_call(unsigned long ip, unsigned long parent_ip,
27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
124 function_trace_call(unsigned long ip, unsigned long parent_ip, function_trace_call() argument
148 trace_function(tr, ip, parent_ip, flags, pc); function_trace_call()
157 function_stack_trace_call(unsigned long ip, unsigned long parent_ip, function_stack_trace_call() argument
181 trace_function(tr, ip, parent_ip, flags, pc); function_stack_trace_call()
323 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceon_count() argument
329 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceoff_count() argument
335 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceon() argument
344 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceoff() argument
362 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) ftrace_stacktrace() argument
368 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_stacktrace_count() argument
416 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data) ftrace_dump_probe() argument
424 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data) ftrace_cpudump_probe() argument
432 unsigned long ip, void *data) ftrace_probe_print()
436 seq_printf(m, "%ps:%s", (void *)ip, name); ftrace_probe_print()
447 ftrace_traceon_print(struct seq_file *m, unsigned long ip, ftrace_traceon_print() argument
450 return ftrace_probe_print("traceon", m, ip, data); ftrace_traceon_print()
454 ftrace_traceoff_print(struct seq_file *m, unsigned long ip, ftrace_traceoff_print() argument
457 return ftrace_probe_print("traceoff", m, ip, data); ftrace_traceoff_print()
461 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, ftrace_stacktrace_print() argument
464 return ftrace_probe_print("stacktrace", m, ip, data); ftrace_stacktrace_print()
468 ftrace_dump_print(struct seq_file *m, unsigned long ip, ftrace_dump_print() argument
471 return ftrace_probe_print("dump", m, ip, data); ftrace_dump_print()
475 ftrace_cpudump_print(struct seq_file *m, unsigned long ip, ftrace_cpudump_print() argument
478 return ftrace_probe_print("cpudump", m, ip, data); ftrace_cpudump_print()
431 ftrace_probe_print(const char *name, struct seq_file *m, unsigned long ip, void *data) ftrace_probe_print() argument
/linux-4.4.14/net/ipv4/netfilter/
H A Dnf_nat_l3proto_ipv4.c14 #include <linux/ip.h>
21 #include <net/ip.h>
42 fl4->daddr = t->dst.u3.ip; nf_nat_ipv4_decode_session()
54 fl4->saddr = t->src.u3.ip; nf_nat_ipv4_decode_session()
68 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && nf_nat_ipv4_in_range()
69 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); nf_nat_ipv4_in_range()
75 return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport); nf_nat_ipv4_secure_port()
99 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); nf_nat_ipv4_manip_pkt()
100 iph->saddr = target->src.u3.ip; nf_nat_ipv4_manip_pkt()
102 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip); nf_nat_ipv4_manip_pkt()
103 iph->daddr = target->dst.u3.ip; nf_nat_ipv4_manip_pkt()
118 newip = t->src.u3.ip; nf_nat_ipv4_csum_update()
121 newip = t->dst.u3.ip; nf_nat_ipv4_csum_update()
162 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]); nf_nat_ipv4_nlattr_to_range()
167 range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]); nf_nat_ipv4_nlattr_to_range()
169 range->max_addr.ip = range->min_addr.ip; nf_nat_ipv4_nlattr_to_range()
197 struct iphdr ip; nf_nat_icmp_reply_translation() member in struct:__anon14952
233 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol); nf_nat_icmp_reply_translation()
394 if ((ct->tuplehash[dir].tuple.src.u3.ip != nf_nat_ipv4_out()
395 ct->tuplehash[!dir].tuple.dst.u3.ip) || nf_nat_ipv4_out()
432 if (ct->tuplehash[dir].tuple.dst.u3.ip != nf_nat_ipv4_local_fn()
433 ct->tuplehash[!dir].tuple.src.u3.ip) { nf_nat_ipv4_local_fn()
H A Dnf_nat_h323.c26 unsigned int addroff, __be32 ip, __be16 port) set_addr()
31 __be32 ip; set_addr() member in struct:__anon14951
37 buf.ip = ip; set_addr()
77 return set_addr(skb, protoff, data, dataoff, taddr->ipAddress.ip, set_h225_addr()
78 addr->ip, port); set_h225_addr()
89 addr->ip, port); set_h245_addr()
106 if (addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && set_sig_addr()
114 (ntohl(addr.ip) & 0xff000000) == 0x7f000000) set_sig_addr()
118 &addr.ip, port, set_sig_addr()
119 &ct->tuplehash[!dir].tuple.dst.u3.ip, set_sig_addr()
126 } else if (addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && set_sig_addr()
130 &addr.ip, port, set_sig_addr()
131 &ct->tuplehash[!dir].tuple.src.u3.ip, set_sig_addr()
158 addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && set_ras_addr()
161 &addr.ip, ntohs(port), set_ras_addr()
162 &ct->tuplehash[!dir].tuple.dst.u3.ip, set_ras_addr()
267 &rtp_exp->tuple.src.u3.ip, nat_rtp_rtcp()
269 &rtp_exp->tuple.dst.u3.ip, nat_rtp_rtcp()
272 &rtcp_exp->tuple.src.u3.ip, nat_rtp_rtcp()
274 &rtcp_exp->tuple.dst.u3.ip, nat_rtp_rtcp()
323 &exp->tuple.src.u3.ip, nat_t120()
325 &exp->tuple.dst.u3.ip, nat_t120()
383 &exp->tuple.src.u3.ip, nat_h245()
385 &exp->tuple.dst.u3.ip, nat_h245()
400 if (this->tuple.src.u3.ip != 0) { /* Only accept calls from GK */ ip_nat_q931_expect()
473 (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { nat_q931()
485 &exp->tuple.src.u3.ip, nat_q931()
487 &exp->tuple.dst.u3.ip, nat_q931()
528 exp->tuple.dst.u3.ip = ct->tuplehash[!dir].tuple.dst.u3.ip; nat_callforwarding()
562 &exp->tuple.src.u3.ip, nat_callforwarding()
564 &exp->tuple.dst.u3.ip, nat_callforwarding()
24 set_addr(struct sk_buff *skb, unsigned int protoff, unsigned char **data, int dataoff, unsigned int addroff, __be32 ip, __be16 port) set_addr() argument
/linux-4.4.14/fs/ocfs2/dlmfs/
H A Ddlmfs.c137 struct dlmfs_inode_private *ip; dlmfs_file_open() local
160 ip = DLMFS_I(inode); dlmfs_file_open()
162 status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags); dlmfs_file_open()
183 struct dlmfs_inode_private *ip = DLMFS_I(inode); dlmfs_file_release() local
195 user_dlm_cluster_unlock(&ip->ip_lockres, level); dlmfs_file_release()
227 struct dlmfs_inode_private *ip = DLMFS_I(inode); dlmfs_file_poll() local
229 poll_wait(file, &ip->ip_lockres.l_event, wait); dlmfs_file_poll()
231 spin_lock(&ip->ip_lockres.l_lock); dlmfs_file_poll()
232 if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED) dlmfs_file_poll()
234 spin_unlock(&ip->ip_lockres.l_lock); dlmfs_file_poll()
333 struct dlmfs_inode_private *ip = dlmfs_init_once() local
336 ip->ip_conn = NULL; dlmfs_init_once()
337 ip->ip_parent = NULL; dlmfs_init_once()
339 inode_init_once(&ip->ip_vfs_inode); dlmfs_init_once()
344 struct dlmfs_inode_private *ip; dlmfs_alloc_inode() local
346 ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS); dlmfs_alloc_inode()
347 if (!ip) dlmfs_alloc_inode()
350 return &ip->ip_vfs_inode; dlmfs_alloc_inode()
367 struct dlmfs_inode_private *ip; dlmfs_evict_inode() local
373 ip = DLMFS_I(inode); dlmfs_evict_inode()
376 status = user_dlm_destroy_lock(&ip->ip_lockres); dlmfs_evict_inode()
379 iput(ip->ip_parent); dlmfs_evict_inode()
383 mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn); dlmfs_evict_inode()
386 if (ip->ip_conn) dlmfs_evict_inode()
387 user_dlm_unregister(ip->ip_conn); dlmfs_evict_inode()
389 ip->ip_parent = NULL; dlmfs_evict_inode()
390 ip->ip_conn = NULL; dlmfs_evict_inode()
417 struct dlmfs_inode_private *ip; dlmfs_get_inode() local
426 ip = DLMFS_I(inode); dlmfs_get_inode()
427 ip->ip_conn = DLMFS_I(parent)->ip_conn; dlmfs_get_inode()
441 user_dlm_lock_res_init(&ip->ip_lockres, dentry); dlmfs_get_inode()
447 ip->ip_parent = igrab(parent); dlmfs_get_inode()
448 BUG_ON(!ip->ip_parent); dlmfs_get_inode()
473 struct dlmfs_inode_private *ip; dlmfs_mkdir() local
492 ip = DLMFS_I(inode); dlmfs_mkdir()
501 ip->ip_conn = conn; dlmfs_mkdir()
/linux-4.4.14/drivers/dma/ipu/
H A Dipu_idmac.c273 struct chan_param_mem_interleaved ip; member in union:chan_param_mem
313 params->ip.bpp = 2; ipu_ch_param_set_size()
314 params->ip.pfs = 4; ipu_ch_param_set_size()
315 params->ip.npb = 15; ipu_ch_param_set_size()
316 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
317 params->ip.ofs0 = 0; /* Red bit offset */ ipu_ch_param_set_size()
318 params->ip.ofs1 = 5; /* Green bit offset */ ipu_ch_param_set_size()
319 params->ip.ofs2 = 11; /* Blue bit offset */ ipu_ch_param_set_size()
320 params->ip.ofs3 = 16; /* Alpha bit offset */ ipu_ch_param_set_size()
321 params->ip.wid0 = 4; /* Red bit width - 1 */ ipu_ch_param_set_size()
322 params->ip.wid1 = 5; /* Green bit width - 1 */ ipu_ch_param_set_size()
323 params->ip.wid2 = 4; /* Blue bit width - 1 */ ipu_ch_param_set_size()
326 params->ip.bpp = 1; /* 24 BPP & RGB PFS */ ipu_ch_param_set_size()
327 params->ip.pfs = 4; ipu_ch_param_set_size()
328 params->ip.npb = 7; ipu_ch_param_set_size()
329 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
330 params->ip.ofs0 = 0; /* Red bit offset */ ipu_ch_param_set_size()
331 params->ip.ofs1 = 8; /* Green bit offset */ ipu_ch_param_set_size()
332 params->ip.ofs2 = 16; /* Blue bit offset */ ipu_ch_param_set_size()
333 params->ip.ofs3 = 24; /* Alpha bit offset */ ipu_ch_param_set_size()
334 params->ip.wid0 = 7; /* Red bit width - 1 */ ipu_ch_param_set_size()
335 params->ip.wid1 = 7; /* Green bit width - 1 */ ipu_ch_param_set_size()
336 params->ip.wid2 = 7; /* Blue bit width - 1 */ ipu_ch_param_set_size()
339 params->ip.bpp = 1; /* 24 BPP & RGB PFS */ ipu_ch_param_set_size()
340 params->ip.pfs = 4; ipu_ch_param_set_size()
341 params->ip.npb = 7; ipu_ch_param_set_size()
342 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
343 params->ip.ofs0 = 16; /* Red bit offset */ ipu_ch_param_set_size()
344 params->ip.ofs1 = 8; /* Green bit offset */ ipu_ch_param_set_size()
345 params->ip.ofs2 = 0; /* Blue bit offset */ ipu_ch_param_set_size()
346 params->ip.ofs3 = 24; /* Alpha bit offset */ ipu_ch_param_set_size()
347 params->ip.wid0 = 7; /* Red bit width - 1 */ ipu_ch_param_set_size()
348 params->ip.wid1 = 7; /* Green bit width - 1 */ ipu_ch_param_set_size()
349 params->ip.wid2 = 7; /* Blue bit width - 1 */ ipu_ch_param_set_size()
354 params->ip.bpp = 0; ipu_ch_param_set_size()
355 params->ip.pfs = 4; ipu_ch_param_set_size()
356 params->ip.npb = 7; ipu_ch_param_set_size()
357 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
358 params->ip.ofs0 = 8; /* Red bit offset */ ipu_ch_param_set_size()
359 params->ip.ofs1 = 16; /* Green bit offset */ ipu_ch_param_set_size()
360 params->ip.ofs2 = 24; /* Blue bit offset */ ipu_ch_param_set_size()
361 params->ip.ofs3 = 0; /* Alpha bit offset */ ipu_ch_param_set_size()
362 params->ip.wid0 = 7; /* Red bit width - 1 */ ipu_ch_param_set_size()
363 params->ip.wid1 = 7; /* Green bit width - 1 */ ipu_ch_param_set_size()
364 params->ip.wid2 = 7; /* Blue bit width - 1 */ ipu_ch_param_set_size()
365 params->ip.wid3 = 7; /* Alpha bit width - 1 */ ipu_ch_param_set_size()
369 params->ip.bpp = 0; ipu_ch_param_set_size()
370 params->ip.pfs = 4; ipu_ch_param_set_size()
371 params->ip.npb = 7; ipu_ch_param_set_size()
372 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
373 params->ip.ofs0 = 24; /* Red bit offset */ ipu_ch_param_set_size()
374 params->ip.ofs1 = 16; /* Green bit offset */ ipu_ch_param_set_size()
375 params->ip.ofs2 = 8; /* Blue bit offset */ ipu_ch_param_set_size()
376 params->ip.ofs3 = 0; /* Alpha bit offset */ ipu_ch_param_set_size()
377 params->ip.wid0 = 7; /* Red bit width - 1 */ ipu_ch_param_set_size()
378 params->ip.wid1 = 7; /* Green bit width - 1 */ ipu_ch_param_set_size()
379 params->ip.wid2 = 7; /* Blue bit width - 1 */ ipu_ch_param_set_size()
380 params->ip.wid3 = 7; /* Alpha bit width - 1 */ ipu_ch_param_set_size()
383 params->ip.bpp = 2; ipu_ch_param_set_size()
384 params->ip.pfs = 6; ipu_ch_param_set_size()
385 params->ip.npb = 7; ipu_ch_param_set_size()
386 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
390 params->ip.bpp = 3; ipu_ch_param_set_size()
391 params->ip.pfs = 3; ipu_ch_param_set_size()
392 params->ip.npb = 7; ipu_ch_param_set_size()
393 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
399 params->ip.bpp = 3; ipu_ch_param_set_size()
400 params->ip.pfs = 2; ipu_ch_param_set_size()
401 params->ip.npb = 7; ipu_ch_param_set_size()
402 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
408 params->ip.bpp = 3; ipu_ch_param_set_size()
409 params->ip.pfs = 2; ipu_ch_param_set_size()
410 params->ip.npb = 7; ipu_ch_param_set_size()
411 params->ip.sat = 2; /* SAT = 32-bit access */ ipu_ch_param_set_size()
/linux-4.4.14/drivers/mtd/
H A Dinftlmount.c52 struct INFTLPartition *ip; find_boot_record() local
190 ip = &mh->Partitions[i]; find_boot_record()
191 ip->virtualUnits = le32_to_cpu(ip->virtualUnits); find_boot_record()
192 ip->firstUnit = le32_to_cpu(ip->firstUnit); find_boot_record()
193 ip->lastUnit = le32_to_cpu(ip->lastUnit); find_boot_record()
194 ip->flags = le32_to_cpu(ip->flags); find_boot_record()
195 ip->spareUnits = le32_to_cpu(ip->spareUnits); find_boot_record()
196 ip->Reserved0 = le32_to_cpu(ip->Reserved0); find_boot_record()
204 i, ip->virtualUnits, ip->firstUnit, find_boot_record()
205 ip->lastUnit, ip->flags, find_boot_record()
206 ip->spareUnits); find_boot_record()
208 if (ip->Reserved0 != ip->firstUnit) { find_boot_record()
220 instr->addr = ip->Reserved0 * inftl->EraseSize; find_boot_record()
224 if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) { find_boot_record()
228 "virtualUnits %d\n", i, ip->lastUnit, find_boot_record()
229 ip->firstUnit, ip->Reserved0); find_boot_record()
232 if (ip->Reserved1 != 0) { find_boot_record()
236 i, ip->Reserved1); find_boot_record()
240 if (ip->flags & INFTL_BDTL) find_boot_record()
251 inftl->nb_boot_blocks = ip->firstUnit; find_boot_record()
252 inftl->numvunits = ip->virtualUnits; find_boot_record()
270 inftl->firstEUN = ip->firstUnit; find_boot_record()
271 inftl->lastEUN = ip->lastUnit; find_boot_record()
272 inftl->nb_blocks = ip->lastUnit + 1; find_boot_record()
/linux-4.4.14/drivers/isdn/hardware/mISDN/
H A DmISDNipac.c41 #define ReadHSCX(h, o) (h->ip->read_reg(h->ip->hw, h->off + o))
42 #define WriteHSCX(h, o, v) (h->ip->write_reg(h->ip->hw, h->off + o, v))
43 #define ReadIPAC(ip, o) (ip->read_reg(ip->hw, o))
44 #define WriteIPAC(ip, o, v) (ip->write_reg(ip->hw, o, v))
901 pr_debug("%s: B%1d CEC %d us\n", hx->ip->name, hx->bch.nr, waitforCEC()
904 pr_info("%s: B%1d CEC timeout\n", hx->ip->name, hx->bch.nr); waitforCEC()
921 pr_debug("%s: B%1d XFW %d us\n", hx->ip->name, hx->bch.nr, waitforXFW()
924 pr_info("%s: B%1d XFW timeout\n", hx->ip->name, hx->bch.nr); waitforXFW()
930 if (hx->ip->type & IPAC_TYPE_IPACX) hscx_cmdr()
944 pr_debug("%s: B%1d %d\n", hscx->ip->name, hscx->bch.nr, count); hscx_empty_fifo()
956 hscx->ip->name, hscx->bch.nr, count); hscx_empty_fifo()
961 if (hscx->ip->type & IPAC_TYPE_IPACX) hscx_empty_fifo()
962 hscx->ip->read_fifo(hscx->ip->hw, hscx_empty_fifo()
965 hscx->ip->read_fifo(hscx->ip->hw, hscx_empty_fifo()
972 hscx->bch.nr, hscx->ip->name, count); hscx_empty_fifo()
1001 pr_debug("%s: B%1d %d/%d/%d\n", hscx->ip->name, hscx->bch.nr, hscx_fill_fifo()
1005 if (hscx->ip->type & IPAC_TYPE_IPACX) hscx_fill_fifo()
1006 hscx->ip->write_fifo(hscx->ip->hw, hscx_fill_fifo()
1010 hscx->ip->write_fifo(hscx->ip->hw, hscx_fill_fifo()
1017 hscx->bch.nr, hscx->ip->name, count); hscx_fill_fifo()
1045 if (hx->ip->type & IPAC_TYPE_IPACX) ipac_rme()
1049 pr_debug("%s: B%1d RSTAB %02x\n", hx->ip->name, hx->bch.nr, rstab); ipac_rme()
1055 hx->ip->name, hx->bch.nr); ipac_rme()
1060 hx->ip->name, hx->bch.nr, ipac_rme()
1066 hx->ip->name, hx->bch.nr); ipac_rme()
1071 if (hx->ip->type & IPAC_TYPE_IPACX) ipac_rme()
1083 hx->ip->name, hx->bch.nr, hx->bch.rx_skb->len); ipac_rme()
1096 if (hx->ip->type & IPAC_TYPE_IPACX) ipac_irq()
1098 else if (hx->ip->type & IPAC_TYPE_IPAC) { ipac_irq()
1103 pr_debug("%s: B%1d EXIRB %02x\n", hx->ip->name, ipac_irq()
1108 ipac_irq(&hx->ip->hscx[0], ista); ipac_irq()
1111 pr_debug("%s: B%1d EXIRB %02x\n", hx->ip->name, ipac_irq()
1119 pr_debug("%s: B%1d EXIRB %02x\n", hx->ip->name, ipac_irq()
1128 pr_debug("%s: B%1d ISTAB %02x\n", hx->ip->name, hx->bch.nr, istab); ipac_irq()
1143 pr_debug("%s: B%1d RFO error\n", hx->ip->name, hx->bch.nr); ipac_irq()
1157 pr_debug("%s: B%1d XDU error at len %d\n", hx->ip->name, ipac_irq()
1231 pr_debug("%s: HSCX %c protocol %x-->%x ch %d\n", hscx->ip->name, hscx_mode()
1233 if (hscx->ip->type & IPAC_TYPE_IPACX) { hscx_mode()
1235 WriteIPAC(hscx->ip, ISACX_BCHA_TSDP_BC1, 0x80); hscx_mode()
1236 WriteIPAC(hscx->ip, ISACX_BCHA_CR, 0x88); hscx_mode()
1238 WriteIPAC(hscx->ip, ISACX_BCHB_TSDP_BC1, 0x81); hscx_mode()
1239 WriteIPAC(hscx->ip, ISACX_BCHB_CR, 0x88); hscx_mode()
1265 pr_info("%s: protocol not known %x\n", hscx->ip->name, hscx_mode()
1269 } else if (hscx->ip->type & IPAC_TYPE_IPAC) { /* IPAC */ hscx_mode()
1301 pr_info("%s: protocol not known %x\n", hscx->ip->name, hscx_mode()
1305 } else if (hscx->ip->type & IPAC_TYPE_HSCX) { /* HSCX */ hscx_mode()
1337 pr_info("%s: protocol not known %x\n", hscx->ip->name, hscx_mode()
1358 spin_lock_irqsave(hx->ip->hwlock, flags); hscx_l2l1()
1364 spin_unlock_irqrestore(hx->ip->hwlock, flags); hscx_l2l1()
1367 spin_lock_irqsave(hx->ip->hwlock, flags); hscx_l2l1()
1372 spin_unlock_irqrestore(hx->ip->hwlock, flags); hscx_l2l1()
1378 spin_lock_irqsave(hx->ip->hwlock, flags); hscx_l2l1()
1381 spin_unlock_irqrestore(hx->ip->hwlock, flags); hscx_l2l1()
1388 hx->ip->name, __func__, hh->prim, hh->id); hscx_l2l1()
1410 pr_debug("%s: %s cmd:%x %p\n", hx->ip->name, __func__, cmd, arg); hscx_bctrl()
1415 spin_lock_irqsave(hx->ip->hwlock, flags); hscx_bctrl()
1418 spin_unlock_irqrestore(hx->ip->hwlock, flags); hscx_bctrl()
1421 module_put(hx->ip->owner); hscx_bctrl()
1429 hx->ip->name, __func__, cmd); hscx_bctrl()
1455 if (hx->ip->type & IPAC_TYPE_HSCX) { hscx_init()
1458 pr_debug("%s: HSCX VSTR %02x\n", hx->ip->name, val); hscx_init()
1460 pr_notice("%s: HSCX version %s\n", hx->ip->name, hscx_init()
1624 ipac->hscx[i].ip = ipac; mISDNipac_init()
/linux-4.4.14/net/openvswitch/
H A Dflow.c33 #include <linux/ip.h>
43 #include <net/ip.h>
268 key->ip.proto = NEXTHDR_NONE; parse_ipv6hdr()
269 key->ip.tos = ipv6_get_dsfield(nh); parse_ipv6hdr()
270 key->ip.ttl = nh->hop_limit; parse_ipv6hdr()
279 key->ip.frag = OVS_FRAG_TYPE_LATER; parse_ipv6hdr()
281 key->ip.frag = OVS_FRAG_TYPE_FIRST; parse_ipv6hdr()
283 key->ip.frag = OVS_FRAG_TYPE_NONE; parse_ipv6hdr()
288 * used to set key->ip.frag above. parse_ipv6hdr()
295 key->ip.proto = nexthdr; parse_ipv6hdr()
505 memset(&key->ip, 0, sizeof(key->ip)); key_extract()
518 key->ip.proto = nh->protocol; key_extract()
519 key->ip.tos = nh->tos; key_extract()
520 key->ip.ttl = nh->ttl; key_extract()
524 key->ip.frag = OVS_FRAG_TYPE_LATER; key_extract()
529 key->ip.frag = OVS_FRAG_TYPE_FIRST; key_extract()
531 key->ip.frag = OVS_FRAG_TYPE_NONE; key_extract()
534 if (key->ip.proto == IPPROTO_TCP) { key_extract()
544 } else if (key->ip.proto == IPPROTO_UDP) { key_extract()
552 } else if (key->ip.proto == IPPROTO_SCTP) { key_extract()
560 } else if (key->ip.proto == IPPROTO_ICMP) { key_extract()
588 key->ip.proto = ntohs(arp->ar_op); key_extract()
590 key->ip.proto = 0; key_extract()
597 memset(&key->ip, 0, sizeof(key->ip)); key_extract()
634 memset(&key->ip, 0, sizeof(key->ip)); key_extract()
647 if (key->ip.frag == OVS_FRAG_TYPE_LATER) key_extract()
650 key->ip.frag = OVS_FRAG_TYPE_FIRST; key_extract()
653 if (key->ip.proto == NEXTHDR_TCP) { key_extract()
662 } else if (key->ip.proto == NEXTHDR_UDP) { key_extract()
670 } else if (key->ip.proto == NEXTHDR_SCTP) { key_extract()
678 } else if (key->ip.proto == NEXTHDR_ICMP) { key_extract()
/linux-4.4.14/drivers/media/usb/usbtv/
H A Dusbtv-video.c336 static void usbtv_iso_cb(struct urb *ip) usbtv_iso_cb() argument
340 struct usbtv *usbtv = (struct usbtv *)ip->context; usbtv_iso_cb()
342 switch (ip->status) { usbtv_iso_cb()
358 for (i = 0; i < ip->number_of_packets; i++) { usbtv_iso_cb()
359 int size = ip->iso_frame_desc[i].actual_length; usbtv_iso_cb()
360 unsigned char *data = ip->transfer_buffer + usbtv_iso_cb()
361 ip->iso_frame_desc[i].offset; usbtv_iso_cb()
370 ret = usb_submit_urb(ip, GFP_ATOMIC); usbtv_iso_cb()
377 struct urb *ip; usbtv_setup_iso_transfer() local
381 ip = usb_alloc_urb(USBTV_ISOC_PACKETS, GFP_KERNEL); usbtv_setup_iso_transfer()
382 if (ip == NULL) usbtv_setup_iso_transfer()
385 ip->dev = usbtv->udev; usbtv_setup_iso_transfer()
386 ip->context = usbtv; usbtv_setup_iso_transfer()
387 ip->pipe = usb_rcvisocpipe(usbtv->udev, USBTV_VIDEO_ENDP); usbtv_setup_iso_transfer()
388 ip->interval = 1; usbtv_setup_iso_transfer()
389 ip->transfer_flags = URB_ISO_ASAP; usbtv_setup_iso_transfer()
390 ip->transfer_buffer = kzalloc(size * USBTV_ISOC_PACKETS, usbtv_setup_iso_transfer()
392 ip->complete = usbtv_iso_cb; usbtv_setup_iso_transfer()
393 ip->number_of_packets = USBTV_ISOC_PACKETS; usbtv_setup_iso_transfer()
394 ip->transfer_buffer_length = size * USBTV_ISOC_PACKETS; usbtv_setup_iso_transfer()
396 ip->iso_frame_desc[i].offset = size * i; usbtv_setup_iso_transfer()
397 ip->iso_frame_desc[i].length = size; usbtv_setup_iso_transfer()
400 return ip; usbtv_setup_iso_transfer()
410 struct urb *ip = usbtv->isoc_urbs[i]; usbtv_stop() local
412 if (ip == NULL) usbtv_stop()
414 usb_kill_urb(ip); usbtv_stop()
415 kfree(ip->transfer_buffer); usbtv_stop()
416 usb_free_urb(ip); usbtv_stop()
453 struct urb *ip; usbtv_start() local
455 ip = usbtv_setup_iso_transfer(usbtv); usbtv_start()
456 if (ip == NULL) { usbtv_start()
460 usbtv->isoc_urbs[i] = ip; usbtv_start()
462 ret = usb_submit_urb(ip, GFP_KERNEL); usbtv_start()
/linux-4.4.14/arch/unicore32/kernel/
H A Dentry.S368 cff ip, s31
369 cand.a ip, #0x08000000 @ FPU execption traps?
372 ldw ip, [sp+], #S_PC
373 add ip, ip, #4
374 stw ip, [sp+], #S_PC
498 add ip, r1, #TI_CPU_SAVE
499 stm.w (r4 - r15), [ip]+
500 stm.w (r16 - r27, sp, lr), [ip]+
503 add ip, r1, #TI_FPSTATE
504 sfm.w (f0 - f7 ), [ip]+
505 sfm.w (f8 - f15), [ip]+
506 sfm.w (f16 - f23), [ip]+
507 sfm.w (f24 - f31), [ip]+
509 stw r4, [ip]
511 add ip, r2, #TI_FPSTATE
512 lfm.w (f0 - f7 ), [ip]+
513 lfm.w (f8 - f15), [ip]+
514 lfm.w (f16 - f23), [ip]+
515 lfm.w (f24 - f31), [ip]+
516 ldw r4, [ip]
519 add ip, r2, #TI_CPU_SAVE
520 ldm.w (r4 - r15), [ip]+
521 ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
607 sub ip, lr, #4
608 ldw.u scno, [ip] @ get SWI instruction
611 ldw ip, __cr_alignment
612 ldw ip, [ip]
613 movc p0.c1, ip, #0 @ update control register
615 enable_irq ip
624 ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing
625 cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
/linux-4.4.14/arch/tile/kernel/
H A Dftrace.c151 unsigned long ip = rec->ip; ftrace_make_call() local
154 new = ftrace_call_replace(ip, addr); ftrace_make_call()
156 return ftrace_modify_code(rec->ip, old, new); ftrace_make_call()
162 unsigned long ip = rec->ip; ftrace_make_nop() local
167 old = ftrace_call_replace(ip, addr); ftrace_make_nop()
169 ret = ftrace_modify_code(ip, old, new); ftrace_make_nop()
/linux-4.4.14/arch/mips/lasat/
H A Dsysctl.c88 unsigned int ip; proc_lasat_ip() local
116 ip = in_aton(ipbuf); proc_lasat_ip()
117 *(unsigned int *)(table->data) = ip; proc_lasat_ip()
120 ip = *(unsigned int *)(table->data); proc_lasat_ip()
122 (ip) & 0xff, proc_lasat_ip()
123 (ip >> 8) & 0xff, proc_lasat_ip()
124 (ip >> 16) & 0xff, proc_lasat_ip()
125 (ip >> 24) & 0xff); proc_lasat_ip()

Completed in 4551 milliseconds

1234567