root/lib/raid6/recov_ssse3.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. raid6_has_ssse3
  2. raid6_2data_recov_ssse3
  3. raid6_datap_recov_ssse3

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2012 Intel Corporation
   4  */
   5 
   6 #ifdef CONFIG_AS_SSSE3
   7 
   8 #include <linux/raid/pq.h>
   9 #include "x86.h"
  10 
  11 static int raid6_has_ssse3(void)
  12 {
  13         return boot_cpu_has(X86_FEATURE_XMM) &&
  14                 boot_cpu_has(X86_FEATURE_XMM2) &&
  15                 boot_cpu_has(X86_FEATURE_SSSE3);
  16 }
  17 
  18 static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
  19                 int failb, void **ptrs)
  20 {
  21         u8 *p, *q, *dp, *dq;
  22         const u8 *pbmul;        /* P multiplier table for B data */
  23         const u8 *qmul;         /* Q multiplier table (for both) */
  24         static const u8 __aligned(16) x0f[16] = {
  25                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
  26                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
  27 
  28         p = (u8 *)ptrs[disks-2];
  29         q = (u8 *)ptrs[disks-1];
  30 
  31         /* Compute syndrome with zero for the missing data pages
  32            Use the dead data pages as temporary storage for
  33            delta p and delta q */
  34         dp = (u8 *)ptrs[faila];
  35         ptrs[faila] = (void *)raid6_empty_zero_page;
  36         ptrs[disks-2] = dp;
  37         dq = (u8 *)ptrs[failb];
  38         ptrs[failb] = (void *)raid6_empty_zero_page;
  39         ptrs[disks-1] = dq;
  40 
  41         raid6_call.gen_syndrome(disks, bytes, ptrs);
  42 
  43         /* Restore pointer table */
  44         ptrs[faila]   = dp;
  45         ptrs[failb]   = dq;
  46         ptrs[disks-2] = p;
  47         ptrs[disks-1] = q;
  48 
  49         /* Now, pick the proper data tables */
  50         pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
  51         qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
  52                 raid6_gfexp[failb]]];
  53 
  54         kernel_fpu_begin();
  55 
  56         asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
  57 
  58 #ifdef CONFIG_X86_64
  59         asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
  60         asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
  61         asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
  62 #endif
  63 
  64         /* Now do it... */
  65         while (bytes) {
  66 #ifdef CONFIG_X86_64
  67                 /* xmm6, xmm14, xmm15 */
  68 
  69                 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
  70                 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
  71                 asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
  72                 asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
  73                 asm volatile("pxor   %0,%%xmm1" : : "m" (dq[0]));
  74                 asm volatile("pxor   %0,%%xmm9" : : "m" (dq[16]));
  75                 asm volatile("pxor   %0,%%xmm0" : : "m" (dp[0]));
  76                 asm volatile("pxor   %0,%%xmm8" : : "m" (dp[16]));
  77 
  78                 /* xmm0/8 = px */
  79 
  80                 asm volatile("movdqa %xmm6,%xmm4");
  81                 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
  82                 asm volatile("movdqa %xmm6,%xmm12");
  83                 asm volatile("movdqa %xmm5,%xmm13");
  84                 asm volatile("movdqa %xmm1,%xmm3");
  85                 asm volatile("movdqa %xmm9,%xmm11");
  86                 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
  87                 asm volatile("movdqa %xmm8,%xmm10");
  88                 asm volatile("psraw  $4,%xmm1");
  89                 asm volatile("psraw  $4,%xmm9");
  90                 asm volatile("pand   %xmm7,%xmm3");
  91                 asm volatile("pand   %xmm7,%xmm11");
  92                 asm volatile("pand   %xmm7,%xmm1");
  93                 asm volatile("pand   %xmm7,%xmm9");
  94                 asm volatile("pshufb %xmm3,%xmm4");
  95                 asm volatile("pshufb %xmm11,%xmm12");
  96                 asm volatile("pshufb %xmm1,%xmm5");
  97                 asm volatile("pshufb %xmm9,%xmm13");
  98                 asm volatile("pxor   %xmm4,%xmm5");
  99                 asm volatile("pxor   %xmm12,%xmm13");
 100 
 101                 /* xmm5/13 = qx */
 102 
 103                 asm volatile("movdqa %xmm14,%xmm4");
 104                 asm volatile("movdqa %xmm15,%xmm1");
 105                 asm volatile("movdqa %xmm14,%xmm12");
 106                 asm volatile("movdqa %xmm15,%xmm9");
 107                 asm volatile("movdqa %xmm2,%xmm3");
 108                 asm volatile("movdqa %xmm10,%xmm11");
 109                 asm volatile("psraw  $4,%xmm2");
 110                 asm volatile("psraw  $4,%xmm10");
 111                 asm volatile("pand   %xmm7,%xmm3");
 112                 asm volatile("pand   %xmm7,%xmm11");
 113                 asm volatile("pand   %xmm7,%xmm2");
 114                 asm volatile("pand   %xmm7,%xmm10");
 115                 asm volatile("pshufb %xmm3,%xmm4");
 116                 asm volatile("pshufb %xmm11,%xmm12");
 117                 asm volatile("pshufb %xmm2,%xmm1");
 118                 asm volatile("pshufb %xmm10,%xmm9");
 119                 asm volatile("pxor   %xmm4,%xmm1");
 120                 asm volatile("pxor   %xmm12,%xmm9");
 121 
 122                 /* xmm1/9 = pbmul[px] */
 123                 asm volatile("pxor   %xmm5,%xmm1");
 124                 asm volatile("pxor   %xmm13,%xmm9");
 125                 /* xmm1/9 = db = DQ */
 126                 asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
 127                 asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
 128 
 129                 asm volatile("pxor   %xmm1,%xmm0");
 130                 asm volatile("pxor   %xmm9,%xmm8");
 131                 asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
 132                 asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
 133 
 134                 bytes -= 32;
 135                 p += 32;
 136                 q += 32;
 137                 dp += 32;
 138                 dq += 32;
 139 #else
 140                 asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
 141                 asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
 142                 asm volatile("pxor   %0,%%xmm1" : : "m" (*dq));
 143                 asm volatile("pxor   %0,%%xmm0" : : "m" (*dp));
 144 
 145                 /* 1 = dq ^ q
 146                  * 0 = dp ^ p
 147                  */
 148                 asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
 149                 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
 150 
 151                 asm volatile("movdqa %xmm1,%xmm3");
 152                 asm volatile("psraw  $4,%xmm1");
 153                 asm volatile("pand   %xmm7,%xmm3");
 154                 asm volatile("pand   %xmm7,%xmm1");
 155                 asm volatile("pshufb %xmm3,%xmm4");
 156                 asm volatile("pshufb %xmm1,%xmm5");
 157                 asm volatile("pxor   %xmm4,%xmm5");
 158 
 159                 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
 160 
 161                 /* xmm5 = qx */
 162 
 163                 asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
 164                 asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
 165                 asm volatile("movdqa %xmm2,%xmm3");
 166                 asm volatile("psraw  $4,%xmm2");
 167                 asm volatile("pand   %xmm7,%xmm3");
 168                 asm volatile("pand   %xmm7,%xmm2");
 169                 asm volatile("pshufb %xmm3,%xmm4");
 170                 asm volatile("pshufb %xmm2,%xmm1");
 171                 asm volatile("pxor   %xmm4,%xmm1");
 172 
 173                 /* xmm1 = pbmul[px] */
 174                 asm volatile("pxor   %xmm5,%xmm1");
 175                 /* xmm1 = db = DQ */
 176                 asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
 177 
 178                 asm volatile("pxor   %xmm1,%xmm0");
 179                 asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
 180 
 181                 bytes -= 16;
 182                 p += 16;
 183                 q += 16;
 184                 dp += 16;
 185                 dq += 16;
 186 #endif
 187         }
 188 
 189         kernel_fpu_end();
 190 }
 191 
 192 
 193 static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
 194                 void **ptrs)
 195 {
 196         u8 *p, *q, *dq;
 197         const u8 *qmul;         /* Q multiplier table */
 198         static const u8 __aligned(16) x0f[16] = {
 199                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
 200                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
 201 
 202         p = (u8 *)ptrs[disks-2];
 203         q = (u8 *)ptrs[disks-1];
 204 
 205         /* Compute syndrome with zero for the missing data page
 206            Use the dead data page as temporary storage for delta q */
 207         dq = (u8 *)ptrs[faila];
 208         ptrs[faila] = (void *)raid6_empty_zero_page;
 209         ptrs[disks-1] = dq;
 210 
 211         raid6_call.gen_syndrome(disks, bytes, ptrs);
 212 
 213         /* Restore pointer table */
 214         ptrs[faila]   = dq;
 215         ptrs[disks-1] = q;
 216 
 217         /* Now, pick the proper data tables */
 218         qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
 219 
 220         kernel_fpu_begin();
 221 
 222         asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
 223 
 224         while (bytes) {
 225 #ifdef CONFIG_X86_64
 226                 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
 227                 asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
 228                 asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
 229                 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
 230 
 231                 /* xmm3 = q[0] ^ dq[0] */
 232 
 233                 asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
 234                 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
 235 
 236                 /* xmm4 = q[16] ^ dq[16] */
 237 
 238                 asm volatile("movdqa %xmm3, %xmm6");
 239                 asm volatile("movdqa %xmm4, %xmm8");
 240 
 241                 /* xmm4 = xmm8 = q[16] ^ dq[16] */
 242 
 243                 asm volatile("psraw $4, %xmm3");
 244                 asm volatile("pand %xmm7, %xmm6");
 245                 asm volatile("pand %xmm7, %xmm3");
 246                 asm volatile("pshufb %xmm6, %xmm0");
 247                 asm volatile("pshufb %xmm3, %xmm1");
 248                 asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
 249                 asm volatile("pxor %xmm0, %xmm1");
 250                 asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
 251 
 252                 /* xmm1 = qmul[q[0] ^ dq[0]] */
 253 
 254                 asm volatile("psraw $4, %xmm4");
 255                 asm volatile("pand %xmm7, %xmm8");
 256                 asm volatile("pand %xmm7, %xmm4");
 257                 asm volatile("pshufb %xmm8, %xmm10");
 258                 asm volatile("pshufb %xmm4, %xmm11");
 259                 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
 260                 asm volatile("pxor %xmm10, %xmm11");
 261                 asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
 262 
 263                 /* xmm11 = qmul[q[16] ^ dq[16]] */
 264 
 265                 asm volatile("pxor %xmm1, %xmm2");
 266 
 267                 /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
 268 
 269                 asm volatile("pxor %xmm11, %xmm12");
 270 
 271                 /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
 272 
 273                 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
 274                 asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
 275 
 276                 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
 277                 asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
 278 
 279                 bytes -= 32;
 280                 p += 32;
 281                 q += 32;
 282                 dq += 32;
 283 
 284 #else
 285                 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
 286                 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
 287                 asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
 288                 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
 289 
 290                 /* xmm3 = *q ^ *dq */
 291 
 292                 asm volatile("movdqa %xmm3, %xmm6");
 293                 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
 294                 asm volatile("psraw $4, %xmm3");
 295                 asm volatile("pand %xmm7, %xmm6");
 296                 asm volatile("pand %xmm7, %xmm3");
 297                 asm volatile("pshufb %xmm6, %xmm0");
 298                 asm volatile("pshufb %xmm3, %xmm1");
 299                 asm volatile("pxor %xmm0, %xmm1");
 300 
 301                 /* xmm1 = qmul[*q ^ *dq */
 302 
 303                 asm volatile("pxor %xmm1, %xmm2");
 304 
 305                 /* xmm2 = *p ^ qmul[*q ^ *dq] */
 306 
 307                 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
 308                 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
 309 
 310                 bytes -= 16;
 311                 p += 16;
 312                 q += 16;
 313                 dq += 16;
 314 #endif
 315         }
 316 
 317         kernel_fpu_end();
 318 }
 319 
 320 const struct raid6_recov_calls raid6_recov_ssse3 = {
 321         .data2 = raid6_2data_recov_ssse3,
 322         .datap = raid6_datap_recov_ssse3,
 323         .valid = raid6_has_ssse3,
 324 #ifdef CONFIG_X86_64
 325         .name = "ssse3x2",
 326 #else
 327         .name = "ssse3x1",
 328 #endif
 329         .priority = 1,
 330 };
 331 
 332 #else
 333 #warning "your version of binutils lacks SSSE3 support"
 334 #endif

/* [<][>][^][v][top][bottom][index][help] */