root/arch/arm/include/asm/checksum.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. csum_fold
  2. ip_fast_csum
  3. csum_tcpudp_nofold
  4. csum_tcpudp_magic
  5. ip_compute_csum
  6. csum_ipv6_magic

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  *  arch/arm/include/asm/checksum.h
   4  *
   5  * IP checksum routines
   6  *
   7  * Copyright (C) Original authors of ../asm-i386/checksum.h
   8  * Copyright (C) 1996-1999 Russell King
   9  */
  10 #ifndef __ASM_ARM_CHECKSUM_H
  11 #define __ASM_ARM_CHECKSUM_H
  12 
  13 #include <linux/in6.h>
  14 
  15 /*
  16  * computes the checksum of a memory block at buff, length len,
  17  * and adds in "sum" (32-bit)
  18  *
  19  * returns a 32-bit number suitable for feeding into itself
  20  * or csum_tcpudp_magic
  21  *
  22  * this function must be called with even lengths, except
  23  * for the last fragment, which may be odd
  24  *
  25  * it's best to have buff aligned on a 32-bit boundary
  26  */
  27 __wsum csum_partial(const void *buff, int len, __wsum sum);
  28 
  29 /*
  30  * the same as csum_partial, but copies from src while it
  31  * checksums, and handles user-space pointer exceptions correctly, when needed.
  32  *
  33  * here even more important to align src and dst on a 32-bit (or even
  34  * better 64-bit) boundary
  35  */
  36 
  37 __wsum
  38 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
  39 
  40 __wsum
  41 csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
  42 
  43 /*
  44  *      Fold a partial checksum without adding pseudo headers
  45  */
  46 static inline __sum16 csum_fold(__wsum sum)
  47 {
  48         __asm__(
  49         "add    %0, %1, %1, ror #16     @ csum_fold"
  50         : "=r" (sum)
  51         : "r" (sum)
  52         : "cc");
  53         return (__force __sum16)(~(__force u32)sum >> 16);
  54 }
  55 
  56 /*
  57  *      This is a version of ip_compute_csum() optimized for IP headers,
  58  *      which always checksum on 4 octet boundaries.
  59  */
  60 static inline __sum16
  61 ip_fast_csum(const void *iph, unsigned int ihl)
  62 {
  63         unsigned int tmp1;
  64         __wsum sum;
  65 
  66         __asm__ __volatile__(
  67         "ldr    %0, [%1], #4            @ ip_fast_csum          \n\
  68         ldr     %3, [%1], #4                                    \n\
  69         sub     %2, %2, #5                                      \n\
  70         adds    %0, %0, %3                                      \n\
  71         ldr     %3, [%1], #4                                    \n\
  72         adcs    %0, %0, %3                                      \n\
  73         ldr     %3, [%1], #4                                    \n\
  74 1:      adcs    %0, %0, %3                                      \n\
  75         ldr     %3, [%1], #4                                    \n\
  76         tst     %2, #15                 @ do this carefully     \n\
  77         subne   %2, %2, #1              @ without destroying    \n\
  78         bne     1b                      @ the carry flag        \n\
  79         adcs    %0, %0, %3                                      \n\
  80         adc     %0, %0, #0"
  81         : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
  82         : "1" (iph), "2" (ihl)
  83         : "cc", "memory");
  84         return csum_fold(sum);
  85 }
  86 
  87 static inline __wsum
  88 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
  89                    __u8 proto, __wsum sum)
  90 {
  91         u32 lenprot = len + proto;
  92         if (__builtin_constant_p(sum) && sum == 0) {
  93                 __asm__(
  94                 "adds   %0, %1, %2      @ csum_tcpudp_nofold0   \n\t"
  95 #ifdef __ARMEB__
  96                 "adcs   %0, %0, %3                              \n\t"
  97 #else
  98                 "adcs   %0, %0, %3, ror #8                      \n\t"
  99 #endif
 100                 "adc    %0, %0, #0"
 101                 : "=&r" (sum)
 102                 : "r" (daddr), "r" (saddr), "r" (lenprot)
 103                 : "cc");
 104         } else {
 105                 __asm__(
 106                 "adds   %0, %1, %2      @ csum_tcpudp_nofold    \n\t"
 107                 "adcs   %0, %0, %3                              \n\t"
 108 #ifdef __ARMEB__
 109                 "adcs   %0, %0, %4                              \n\t"
 110 #else
 111                 "adcs   %0, %0, %4, ror #8                      \n\t"
 112 #endif
 113                 "adc    %0, %0, #0"
 114                 : "=&r"(sum)
 115                 : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
 116                 : "cc");
 117         }
 118         return sum;
 119 }       
 120 /*
 121  * computes the checksum of the TCP/UDP pseudo-header
 122  * returns a 16-bit checksum, already complemented
 123  */
 124 static inline __sum16
 125 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
 126                   __u8 proto, __wsum sum)
 127 {
 128         return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
 129 }
 130 
 131 
 132 /*
 133  * this routine is used for miscellaneous IP-like checksums, mainly
 134  * in icmp.c
 135  */
 136 static inline __sum16
 137 ip_compute_csum(const void *buff, int len)
 138 {
 139         return csum_fold(csum_partial(buff, len, 0));
 140 }
 141 
 142 #define _HAVE_ARCH_IPV6_CSUM
 143 extern __wsum
 144 __csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
 145                 __be32 proto, __wsum sum);
 146 
 147 static inline __sum16
 148 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
 149                 __u32 len, __u8 proto, __wsum sum)
 150 {
 151         return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
 152                                            htonl(proto), sum));
 153 }
 154 #endif

/* [<][>][^][v][top][bottom][index][help] */