root/arch/sparc/include/asm/checksum_32.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. csum_partial_copy_nocheck
  2. csum_partial_copy_from_user
  3. csum_partial_copy_to_user
  4. ip_fast_csum
  5. csum_fold
  6. csum_tcpudp_nofold
  7. csum_tcpudp_magic
  8. csum_ipv6_magic
  9. ip_compute_csum
  10. csum_add

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __SPARC_CHECKSUM_H
   3 #define __SPARC_CHECKSUM_H
   4 
   5 /*  checksum.h:  IP/UDP/TCP checksum routines on the Sparc.
   6  *
   7  *  Copyright(C) 1995 Linus Torvalds
   8  *  Copyright(C) 1995 Miguel de Icaza
   9  *  Copyright(C) 1996 David S. Miller
  10  *  Copyright(C) 1996 Eddie C. Dost
  11  *  Copyright(C) 1997 Jakub Jelinek
  12  *
  13  * derived from:
  14  *      Alpha checksum c-code
  15  *      ix86 inline assembly
  16  *      RFC1071 Computing the Internet Checksum
  17  */
  18 
  19 #include <linux/in6.h>
  20 #include <linux/uaccess.h>
  21 
  22 /* computes the checksum of a memory block at buff, length len,
  23  * and adds in "sum" (32-bit)
  24  *
  25  * returns a 32-bit number suitable for feeding into itself
  26  * or csum_tcpudp_magic
  27  *
  28  * this function must be called with even lengths, except
  29  * for the last fragment, which may be odd
  30  *
  31  * it's best to have buff aligned on a 32-bit boundary
  32  */
  33 __wsum csum_partial(const void *buff, int len, __wsum sum);
  34 
  35 /* the same as csum_partial, but copies from fs:src while it
  36  * checksums
  37  *
  38  * here even more important to align src and dst on a 32-bit (or even
  39  * better 64-bit) boundary
  40  */
  41 
  42 unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
  43 
  44 static inline __wsum
  45 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
  46 {
  47         register unsigned int ret asm("o0") = (unsigned int)src;
  48         register char *d asm("o1") = dst;
  49         register int l asm("g1") = len;
  50 
  51         __asm__ __volatile__ (
  52                 "call __csum_partial_copy_sparc_generic\n\t"
  53                 " mov %6, %%g7\n"
  54         : "=&r" (ret), "=&r" (d), "=&r" (l)
  55         : "0" (ret), "1" (d), "2" (l), "r" (sum)
  56         : "o2", "o3", "o4", "o5", "o7",
  57           "g2", "g3", "g4", "g5", "g7",
  58           "memory", "cc");
  59         return (__force __wsum)ret;
  60 }
  61 
  62 static inline __wsum
  63 csum_partial_copy_from_user(const void __user *src, void *dst, int len,
  64                             __wsum sum, int *err)
  65   {
  66         register unsigned long ret asm("o0") = (unsigned long)src;
  67         register char *d asm("o1") = dst;
  68         register int l asm("g1") = len;
  69         register __wsum s asm("g7") = sum;
  70 
  71         __asm__ __volatile__ (
  72         ".section __ex_table,#alloc\n\t"
  73         ".align 4\n\t"
  74         ".word 1f,2\n\t"
  75         ".previous\n"
  76         "1:\n\t"
  77         "call __csum_partial_copy_sparc_generic\n\t"
  78         " st %8, [%%sp + 64]\n"
  79         : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
  80         : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
  81         : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
  82           "cc", "memory");
  83         return (__force __wsum)ret;
  84 }
  85 
  86 static inline __wsum
  87 csum_partial_copy_to_user(const void *src, void __user *dst, int len,
  88                           __wsum sum, int *err)
  89 {
  90         if (!access_ok(dst, len)) {
  91                 *err = -EFAULT;
  92                 return sum;
  93         } else {
  94                 register unsigned long ret asm("o0") = (unsigned long)src;
  95                 register char __user *d asm("o1") = dst;
  96                 register int l asm("g1") = len;
  97                 register __wsum s asm("g7") = sum;
  98 
  99                 __asm__ __volatile__ (
 100                 ".section __ex_table,#alloc\n\t"
 101                 ".align 4\n\t"
 102                 ".word 1f,1\n\t"
 103                 ".previous\n"
 104                 "1:\n\t"
 105                 "call __csum_partial_copy_sparc_generic\n\t"
 106                 " st %8, [%%sp + 64]\n"
 107                 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
 108                 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
 109                 : "o2", "o3", "o4", "o5", "o7",
 110                   "g2", "g3", "g4", "g5",
 111                   "cc", "memory");
 112                 return (__force __wsum)ret;
 113         }
 114 }
 115 
 116 #define HAVE_CSUM_COPY_USER
 117 #define csum_and_copy_to_user csum_partial_copy_to_user
 118 
 119 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
 120  * the majority of the time.
 121  */
 122 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 123 {
 124         __sum16 sum;
 125 
 126         /* Note: We must read %2 before we touch %0 for the first time,
 127          *       because GCC can legitimately use the same register for
 128          *       both operands.
 129          */
 130         __asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
 131                              "ld\t[%1 + 0x00], %0\n\t"
 132                              "ld\t[%1 + 0x04], %%g2\n\t"
 133                              "ld\t[%1 + 0x08], %%g3\n\t"
 134                              "addcc\t%%g2, %0, %0\n\t"
 135                              "addxcc\t%%g3, %0, %0\n\t"
 136                              "ld\t[%1 + 0x0c], %%g2\n\t"
 137                              "ld\t[%1 + 0x10], %%g3\n\t"
 138                              "addxcc\t%%g2, %0, %0\n\t"
 139                              "addx\t%0, %%g0, %0\n"
 140                              "1:\taddcc\t%%g3, %0, %0\n\t"
 141                              "add\t%1, 4, %1\n\t"
 142                              "addxcc\t%0, %%g0, %0\n\t"
 143                              "subcc\t%%g4, 1, %%g4\n\t"
 144                              "be,a\t2f\n\t"
 145                              "sll\t%0, 16, %%g2\n\t"
 146                              "b\t1b\n\t"
 147                              "ld\t[%1 + 0x10], %%g3\n"
 148                              "2:\taddcc\t%0, %%g2, %%g2\n\t"
 149                              "srl\t%%g2, 16, %0\n\t"
 150                              "addx\t%0, %%g0, %0\n\t"
 151                              "xnor\t%%g0, %0, %0"
 152                              : "=r" (sum), "=&r" (iph)
 153                              : "r" (ihl), "1" (iph)
 154                              : "g2", "g3", "g4", "cc", "memory");
 155         return sum;
 156 }
 157 
 158 /* Fold a partial checksum without adding pseudo headers. */
 159 static inline __sum16 csum_fold(__wsum sum)
 160 {
 161         unsigned int tmp;
 162 
 163         __asm__ __volatile__("addcc\t%0, %1, %1\n\t"
 164                              "srl\t%1, 16, %1\n\t"
 165                              "addx\t%1, %%g0, %1\n\t"
 166                              "xnor\t%%g0, %1, %0"
 167                              : "=&r" (sum), "=r" (tmp)
 168                              : "0" (sum), "1" ((__force u32)sum<<16)
 169                              : "cc");
 170         return (__force __sum16)sum;
 171 }
 172 
 173 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 174                                         __u32 len, __u8 proto,
 175                                         __wsum sum)
 176 {
 177         __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
 178                              "addxcc\t%2, %0, %0\n\t"
 179                              "addxcc\t%3, %0, %0\n\t"
 180                              "addx\t%0, %%g0, %0\n\t"
 181                              : "=r" (sum), "=r" (saddr)
 182                              : "r" (daddr), "r" (proto + len), "0" (sum),
 183                                "1" (saddr)
 184                              : "cc");
 185         return sum;
 186 }
 187 
 188 /*
 189  * computes the checksum of the TCP/UDP pseudo-header
 190  * returns a 16-bit checksum, already complemented
 191  */
 192 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 193                                         __u32 len, __u8 proto,
 194                                         __wsum sum)
 195 {
 196         return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 197 }
 198 
 199 #define _HAVE_ARCH_IPV6_CSUM
 200 
 201 static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
 202                                       const struct in6_addr *daddr,
 203                                       __u32 len, __u8 proto, __wsum sum)
 204 {
 205         __asm__ __volatile__ (
 206                 "addcc  %3, %4, %%g4\n\t"
 207                 "addxcc %5, %%g4, %%g4\n\t"
 208                 "ld     [%2 + 0x0c], %%g2\n\t"
 209                 "ld     [%2 + 0x08], %%g3\n\t"
 210                 "addxcc %%g2, %%g4, %%g4\n\t"
 211                 "ld     [%2 + 0x04], %%g2\n\t"
 212                 "addxcc %%g3, %%g4, %%g4\n\t"
 213                 "ld     [%2 + 0x00], %%g3\n\t"
 214                 "addxcc %%g2, %%g4, %%g4\n\t"
 215                 "ld     [%1 + 0x0c], %%g2\n\t"
 216                 "addxcc %%g3, %%g4, %%g4\n\t"
 217                 "ld     [%1 + 0x08], %%g3\n\t"
 218                 "addxcc %%g2, %%g4, %%g4\n\t"
 219                 "ld     [%1 + 0x04], %%g2\n\t"
 220                 "addxcc %%g3, %%g4, %%g4\n\t"
 221                 "ld     [%1 + 0x00], %%g3\n\t"
 222                 "addxcc %%g2, %%g4, %%g4\n\t"
 223                 "addxcc %%g3, %%g4, %0\n\t"
 224                 "addx   0, %0, %0\n"
 225                 : "=&r" (sum)
 226                 : "r" (saddr), "r" (daddr),
 227                   "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
 228                 : "g2", "g3", "g4", "cc");
 229 
 230         return csum_fold(sum);
 231 }
 232 
 233 /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
 234 static inline __sum16 ip_compute_csum(const void *buff, int len)
 235 {
 236         return csum_fold(csum_partial(buff, len, 0));
 237 }
 238 
 239 #define HAVE_ARCH_CSUM_ADD
 240 static inline __wsum csum_add(__wsum csum, __wsum addend)
 241 {
 242         __asm__ __volatile__(
 243                 "addcc   %0, %1, %0\n"
 244                 "addx    %0, %%g0, %0"
 245                 : "=r" (csum)
 246                 : "r" (addend), "0" (csum));
 247 
 248         return csum;
 249 }
 250 
 251 #endif /* !(__SPARC_CHECKSUM_H) */

/* [<][>][^][v][top][bottom][index][help] */