This source file includes following definitions.
- csum_and_copy_from_user
- csum_add
- csum_sub
- csum16_add
- csum16_sub
- csum_block_add
- csum_block_add_ext
- csum_block_sub
- csum_unfold
- csum_partial_ext
- csum_replace_by_diff
- csum_replace4
- csum_replace2
- inet_proto_csum_replace2
- remcsum_adjust
- remcsum_unadjust
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #ifndef _CHECKSUM_H
16 #define _CHECKSUM_H
17
18 #include <linux/errno.h>
19 #include <asm/types.h>
20 #include <asm/byteorder.h>
21 #include <linux/uaccess.h>
22 #include <asm/checksum.h>
23
24 #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
25 static inline
26 __wsum csum_and_copy_from_user (const void __user *src, void *dst,
27 int len, __wsum sum, int *err_ptr)
28 {
29 if (access_ok(src, len))
30 return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
31
32 if (len)
33 *err_ptr = -EFAULT;
34
35 return sum;
36 }
37 #endif
38
39 #ifndef HAVE_CSUM_COPY_USER
40 static __inline__ __wsum csum_and_copy_to_user
41 (const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
42 {
43 sum = csum_partial(src, len, sum);
44
45 if (access_ok(dst, len)) {
46 if (copy_to_user(dst, src, len) == 0)
47 return sum;
48 }
49 if (len)
50 *err_ptr = -EFAULT;
51
52 return (__force __wsum)-1;
53 }
54 #endif
55
56 #ifndef HAVE_ARCH_CSUM_ADD
57 static inline __wsum csum_add(__wsum csum, __wsum addend)
58 {
59 u32 res = (__force u32)csum;
60 res += (__force u32)addend;
61 return (__force __wsum)(res + (res < (__force u32)addend));
62 }
63 #endif
64
65 static inline __wsum csum_sub(__wsum csum, __wsum addend)
66 {
67 return csum_add(csum, ~addend);
68 }
69
70 static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
71 {
72 u16 res = (__force u16)csum;
73
74 res += (__force u16)addend;
75 return (__force __sum16)(res + (res < (__force u16)addend));
76 }
77
78 static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
79 {
80 return csum16_add(csum, ~addend);
81 }
82
83 static inline __wsum
84 csum_block_add(__wsum csum, __wsum csum2, int offset)
85 {
86 u32 sum = (__force u32)csum2;
87
88
89 if (offset & 1)
90 sum = ror32(sum, 8);
91
92 return csum_add(csum, (__force __wsum)sum);
93 }
94
95 static inline __wsum
96 csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
97 {
98 return csum_block_add(csum, csum2, offset);
99 }
100
101 static inline __wsum
102 csum_block_sub(__wsum csum, __wsum csum2, int offset)
103 {
104 return csum_block_add(csum, ~csum2, offset);
105 }
106
107 static inline __wsum csum_unfold(__sum16 n)
108 {
109 return (__force __wsum)n;
110 }
111
112 static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
113 {
114 return csum_partial(buff, len, sum);
115 }
116
117 #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
118
119 static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
120 {
121 *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
122 }
123
124 static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
125 {
126 __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
127
128 *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
129 }
130
131
132
133
134
135
136
137 static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
138 {
139 *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
140 }
141
142 struct sk_buff;
143 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
144 __be32 from, __be32 to, bool pseudohdr);
145 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
146 const __be32 *from, const __be32 *to,
147 bool pseudohdr);
148 void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
149 __wsum diff, bool pseudohdr);
150
151 static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
152 __be16 from, __be16 to,
153 bool pseudohdr)
154 {
155 inet_proto_csum_replace4(sum, skb, (__force __be32)from,
156 (__force __be32)to, pseudohdr);
157 }
158
159 static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
160 int start, int offset)
161 {
162 __sum16 *psum = (__sum16 *)(ptr + offset);
163 __wsum delta;
164
165
166 csum = csum_sub(csum, csum_partial(ptr, start, 0));
167
168
169 delta = csum_sub((__force __wsum)csum_fold(csum),
170 (__force __wsum)*psum);
171 *psum = csum_fold(csum);
172
173 return delta;
174 }
175
176 static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
177 {
178 *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
179 }
180
181 #endif