/linux-4.1.27/drivers/s390/cio/ |
H A D | idset.c | 25 struct idset *set; idset_new() local 27 set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id)); idset_new() 28 if (set) { idset_new() 29 set->num_ssid = num_ssid; idset_new() 30 set->num_id = num_id; idset_new() 31 memset(set->bitmap, 0, bitmap_size(num_ssid, num_id)); idset_new() 33 return set; idset_new() 36 void idset_free(struct idset *set) idset_free() argument 38 vfree(set); idset_free() 41 void idset_fill(struct idset *set) idset_fill() argument 43 memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); idset_fill() 46 static inline void idset_add(struct idset *set, int ssid, int id) idset_add() argument 48 set_bit(ssid * set->num_id + id, set->bitmap); idset_add() 51 static inline void idset_del(struct idset *set, int ssid, int id) idset_del() argument 53 clear_bit(ssid * set->num_id + id, set->bitmap); idset_del() 56 static inline int idset_contains(struct idset *set, int ssid, int id) idset_contains() argument 58 return test_bit(ssid * set->num_id + id, set->bitmap); idset_contains() 61 static inline int idset_get_first(struct idset *set, int *ssid, int *id) idset_get_first() argument 65 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); idset_get_first() 66 if (bitnum >= set->num_ssid * set->num_id) idset_get_first() 68 *ssid = bitnum / set->num_id; idset_get_first() 69 *id = bitnum % set->num_id; idset_get_first() 78 void idset_sch_add(struct idset *set, struct subchannel_id schid) idset_sch_add() argument 80 idset_add(set, schid.ssid, schid.sch_no); idset_sch_add() 83 void idset_sch_del(struct idset *set, struct subchannel_id schid) idset_sch_del() argument 85 idset_del(set, schid.ssid, schid.sch_no); idset_sch_del() 88 /* Clear ids starting from @schid up to end of subchannel set. */ idset_sch_del_subseq() 89 void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid) idset_sch_del_subseq() argument 91 int pos = schid.ssid * set->num_id + schid.sch_no; idset_sch_del_subseq() 93 bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no); idset_sch_del_subseq() 96 int idset_sch_contains(struct idset *set, struct subchannel_id schid) idset_sch_contains() argument 98 return idset_contains(set, schid.ssid, schid.sch_no); idset_sch_contains() 101 int idset_is_empty(struct idset *set) idset_is_empty() argument 103 return bitmap_empty(set->bitmap, set->num_ssid * set->num_id); idset_is_empty()
|
H A D | idset.h | 13 void idset_free(struct idset *set); 14 void idset_fill(struct idset *set); 17 void idset_sch_add(struct idset *set, struct subchannel_id id); 18 void idset_sch_del(struct idset *set, struct subchannel_id id); 19 void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid); 20 int idset_sch_contains(struct idset *set, struct subchannel_id id); 21 int idset_is_empty(struct idset *set);
|
/linux-4.1.27/arch/m68k/fpsp040/ |
H A D | fpsp.h | 79 .set LOCAL_SIZE,192 | bytes needed for local variables 80 .set LV,-LOCAL_SIZE | convenient base value 82 .set USER_DA,LV+0 | save space for D0-D1,A0-A1 83 .set USER_D0,LV+0 | saved user D0 84 .set USER_D1,LV+4 | saved user D1 85 .set USER_A0,LV+8 | saved user A0 86 .set USER_A1,LV+12 | saved user A1 87 .set USER_FP0,LV+16 | saved user FP0 88 .set USER_FP1,LV+28 | saved user FP1 89 .set USER_FP2,LV+40 | saved user FP2 90 .set USER_FP3,LV+52 | saved user FP3 91 .set USER_FPCR,LV+64 | saved user FPCR 92 .set FPCR_ENABLE,USER_FPCR+2 | FPCR exception enable 93 .set FPCR_MODE,USER_FPCR+3 | FPCR rounding mode control 94 .set USER_FPSR,LV+68 | saved user FPSR 95 .set FPSR_CC,USER_FPSR+0 | FPSR condition code 96 .set FPSR_QBYTE,USER_FPSR+1 | FPSR quotient 97 .set FPSR_EXCEPT,USER_FPSR+2 | FPSR exception 98 .set FPSR_AEXCEPT,USER_FPSR+3 | FPSR accrued exception 99 .set USER_FPIAR,LV+72 | saved user FPIAR 100 .set FP_SCR1,LV+76 | room for a temporary float value 101 .set FP_SCR2,LV+92 | room for a temporary float value 102 .set L_SCR1,LV+108 | room for a temporary long value 103 .set L_SCR2,LV+112 | room for a temporary long value 104 .set STORE_FLG,LV+116 105 .set BINDEC_FLG,LV+117 | used in bindec 106 .set DNRM_FLG,LV+118 | used in res_func 107 .set RES_FLG,LV+119 | used in res_func 108 .set DY_MO_FLG,LV+120 | dyadic/monadic flag 109 .set UFLG_TMP,LV+121 | temporary for uflag errata 110 .set CU_ONLY,LV+122 | cu-only flag 111 .set VER_TMP,LV+123 | temp holding for version number 112 .set L_SCR3,LV+124 | room for a temporary long value 113 .set FP_SCR3,LV+128 | room for a temporary float value 114 .set FP_SCR4,LV+144 | room for a temporary float value 115 .set FP_SCR5,LV+160 | room for a temporary float value 116 .set FP_SCR6,LV+176 127 .set CU_SAVEPC,LV-92 | micro-pc for CU (1 byte) 128 .set FPR_DIRTY_BITS,LV-91 | fpr dirty bits 130 .set WBTEMP,LV-76 | write back temp (12 bytes) 131 .set WBTEMP_EX,WBTEMP | wbtemp sign and exponent (2 bytes) 132 .set WBTEMP_HI,WBTEMP+4 | wbtemp mantissa [63:32] (4 bytes) 133 .set WBTEMP_LO,WBTEMP+8 | wbtemp mantissa [31:00] (4 bytes) 135 .set WBTEMP_SGN,WBTEMP+2 | used to store sign 137 .set FPSR_SHADOW,LV-64 | fpsr shadow reg 139 .set FPIARCU,LV-60 | Instr. addr. reg. for CU (4 bytes) 141 .set CMDREG2B,LV-52 | cmd reg for machine 2 142 .set CMDREG3B,LV-48 | cmd reg for E3 exceptions (2 bytes) 144 .set NMNEXC,LV-44 | NMNEXC (unsup,snan bits only) 145 .set nmn_unsup_bit,1 | 146 .set nmn_snan_bit,0 | 148 .set NMCEXC,LV-43 | NMNEXC & NMCEXC 149 .set nmn_operr_bit,7 150 .set nmn_ovfl_bit,6 151 .set nmn_unfl_bit,5 152 .set nmc_unsup_bit,4 153 .set nmc_snan_bit,3 154 .set nmc_operr_bit,2 155 .set nmc_ovfl_bit,1 156 .set nmc_unfl_bit,0 158 .set STAG,LV-40 | source tag (1 byte) 159 .set WBTEMP_GRS,LV-40 | alias wbtemp guard, round, sticky 160 .set guard_bit,1 | guard bit is bit number 1 161 .set round_bit,0 | round bit is bit number 0 162 .set stag_mask,0xE0 | upper 3 bits are source tag type 163 .set denorm_bit,7 | bit determines if denorm or unnorm 164 .set etemp15_bit,4 | etemp exponent bit #15 165 .set wbtemp66_bit,2 | wbtemp mantissa bit #66 166 .set wbtemp1_bit,1 | wbtemp mantissa bit #1 167 .set wbtemp0_bit,0 | wbtemp mantissa bit #0 169 .set STICKY,LV-39 | holds sticky bit 170 .set sticky_bit,7 172 .set CMDREG1B,LV-36 | cmd reg for E1 exceptions (2 bytes) 173 .set kfact_bit,12 | distinguishes static/dynamic k-factor 178 .set CMDWORD,LV-35 | command word in cmd1b 179 .set direction_bit,5 | bit 0 in opclass 180 .set size_bit2,12 | bit 2 in size field 182 .set DTAG,LV-32 | dest tag (1 byte) 183 .set dtag_mask,0xE0 | upper 3 bits are dest type tag 184 .set fptemp15_bit,4 | fptemp exponent bit #15 186 .set WB_BYTE,LV-31 | holds WBTE15 bit (1 byte) 187 .set wbtemp15_bit,4 | wbtemp exponent bit #15 189 .set E_BYTE,LV-28 | holds E1 and E3 bits (1 byte) 190 .set E1,2 | which bit is E1 flag 191 .set E3,1 | which bit is E3 flag 192 .set SFLAG,0 | which bit is S flag 194 .set T_BYTE,LV-27 | holds T and U bits (1 byte) 195 .set XFLAG,7 | which bit is X flag 196 .set UFLAG,5 | which bit is U flag 197 .set TFLAG,4 | which bit is T flag 199 .set FPTEMP,LV-24 | fptemp (12 bytes) 200 .set FPTEMP_EX,FPTEMP | fptemp sign and exponent (2 bytes) 201 .set FPTEMP_HI,FPTEMP+4 | fptemp mantissa [63:32] (4 bytes) 202 .set FPTEMP_LO,FPTEMP+8 | fptemp mantissa [31:00] (4 bytes) 204 .set FPTEMP_SGN,FPTEMP+2 | used to store sign 206 .set ETEMP,LV-12 | etemp (12 bytes) 207 .set ETEMP_EX,ETEMP | etemp sign and exponent (2 bytes) 208 .set ETEMP_HI,ETEMP+4 | etemp mantissa [63:32] (4 bytes) 209 .set ETEMP_LO,ETEMP+8 | etemp mantissa [31:00] (4 bytes) 211 .set ETEMP_SGN,ETEMP+2 | used to store sign 213 .set EXC_SR,4 | exception frame status register 214 .set EXC_PC,6 | exception frame program counter 215 .set EXC_VEC,10 | exception frame vector (format+vector#) 216 .set EXC_EA,12 | exception frame effective address 222 .set neg_bit,3 | negative result 223 .set z_bit,2 | zero result 224 .set inf_bit,1 | infinity result 225 .set nan_bit,0 | not-a-number result 227 .set q_sn_bit,7 | sign bit of quotient byte 229 .set bsun_bit,7 | branch on unordered 230 .set snan_bit,6 | signalling nan 231 .set operr_bit,5 | operand error 232 .set ovfl_bit,4 | overflow 233 .set unfl_bit,3 | underflow 234 .set dz_bit,2 | divide by zero 235 .set inex2_bit,1 | inexact result 2 236 .set inex1_bit,0 | inexact result 1 238 .set aiop_bit,7 | accrued illegal operation 239 .set aovfl_bit,6 | accrued overflow 240 .set aunfl_bit,5 | accrued underflow 241 .set adz_bit,4 | accrued divide by zero 242 .set ainex_bit,3 | accrued inexact 246 .set neg_mask,0x08000000 247 .set z_mask,0x04000000 248 .set inf_mask,0x02000000 249 .set nan_mask,0x01000000 251 .set bsun_mask,0x00008000 | 252 .set snan_mask,0x00004000 253 .set operr_mask,0x00002000 254 .set ovfl_mask,0x00001000 255 .set unfl_mask,0x00000800 256 .set dz_mask,0x00000400 257 .set inex2_mask,0x00000200 258 .set inex1_mask,0x00000100 260 .set aiop_mask,0x00000080 | accrued illegal operation 261 .set aovfl_mask,0x00000040 | accrued overflow 262 .set aunfl_mask,0x00000020 | accrued underflow 263 .set adz_mask,0x00000010 | accrued divide by zero 264 .set ainex_mask,0x00000008 | accrued inexact 268 .set dzinf_mask,inf_mask+dz_mask+adz_mask 269 .set opnan_mask,nan_mask+operr_mask+aiop_mask 270 .set nzi_mask,0x01ffffff | clears N, Z, and I 271 .set unfinx_mask,unfl_mask+inex2_mask+aunfl_mask+ainex_mask 272 .set unf2inx_mask,unfl_mask+inex2_mask+ainex_mask 273 .set ovfinx_mask,ovfl_mask+inex2_mask+aovfl_mask+ainex_mask 274 .set inx1a_mask,inex1_mask+ainex_mask 275 .set inx2a_mask,inex2_mask+ainex_mask 276 .set snaniop_mask,nan_mask+snan_mask+aiop_mask 277 .set naniop_mask,nan_mask+aiop_mask 278 .set neginf_mask,neg_mask+inf_mask 279 .set infaiop_mask,inf_mask+aiop_mask 280 .set negz_mask,neg_mask+z_mask 281 .set opaop_mask,operr_mask+aiop_mask 282 .set unfl_inx_mask,unfl_mask+aunfl_mask+ainex_mask 283 .set ovfl_inx_mask,ovfl_mask+aovfl_mask+ainex_mask 289 .set x_mode,0x00 | round to extended 290 .set s_mode,0x40 | round to single 291 .set d_mode,0x80 | round to double 293 .set rn_mode,0x00 | round nearest 294 .set rz_mode,0x10 | round to zero 295 .set rm_mode,0x20 | round to minus infinity 296 .set rp_mode,0x30 | round to plus infinity 302 .set signan_bit,6 | signalling nan bit in mantissa 303 .set sign_bit,7 305 .set rnd_stky_bit,29 | round/sticky bit of mantissa 307 .set sx_mask,0x01800000 | set s and x bits in word $48 309 .set LOCAL_EX,0 310 .set LOCAL_SGN,2 311 .set LOCAL_HI,4 312 .set LOCAL_LO,8 313 .set LOCAL_GRS,12 | valid ONLY for FP_SCR1, FP_SCR2 316 .set norm_tag,0x00 | tag bits in {7:5} position 317 .set zero_tag,0x20 318 .set inf_tag,0x40 319 .set nan_tag,0x60 320 .set dnrm_tag,0x80 324 .set VER_4,0x40 | fpsp compatible version numbers 326 .set VER_40,0x40 | original version number 327 .set VER_41,0x41 | revision version number 329 .set BUSY_SIZE,100 | size of busy frame 330 .set BUSY_FRAME,LV-BUSY_SIZE | start of busy frame 332 .set UNIMP_40_SIZE,44 | size of orig unimp frame 333 .set UNIMP_41_SIZE,52 | size of rev unimp frame 335 .set IDLE_SIZE,4 | size of idle frame 336 .set IDLE_FRAME,LV-IDLE_SIZE | start of idle frame 340 .set TRACE_VEC,0x2024 | trace trap 341 .set FLINE_VEC,0x002C | real F-line 342 .set UNIMP_VEC,0x202C | unimplemented 343 .set INEX_VEC,0x00C4 345 .set dbl_thresh,0x3C01 346 .set sgl_thresh,0x3F81
|
H A D | kernel_ex.S | 7 | set the appropriate bits in the USER_FPSR word on the stack. 51 | set FPSR exception status dz bit, condition code 57 | set exception status bit & accrued bits in FPSR 58 | set flag to disable sto_res from corrupting fp register 67 bsetb #neg_bit,FPSR_CC(%a6) |set neg bit in FPSR 68 fmovel #0,%FPSR |clr status bits (Z set) 73 fmovel #0,%FPSR |clr status bits (Z set) 84 bsetb #neg_bit,FPSR_CC(%a6) |set neg bit in FPSR 89 orl #dzinf_mask,USER_FPSR(%a6) |set I,DZ,ADZ 97 bsetb #neg_bit,FPSR_CC(%a6) |set neg bit in FPSR 99 orl #dzinf_mask,USER_FPSR(%a6) |set I,DZ,ADZ 106 | set FPSR exception status operr bit, condition code 111 | set FPSR exception status operr bit, accrued operr bit 112 | set flag to disable sto_res from corrupting fp register 116 orl #opnan_mask,USER_FPSR(%a6) |set NaN, OPERR, AIOP 131 | aunfl, and ainex to be set on exit. 138 clrl FP_SCR1(%a6) |set exceptional operand to zero 147 | ;set UNFL, INEX2, AUNFL, AINEX 154 bsetb #wbtemp15_bit,WB_BYTE(%a6) |set wbtemp15 155 bsetb #sticky_bit,STICKY(%a6) |set sticky bit 172 bsetb #sign_bit,FP_SCR1(%a6) |set sign bit of exc operand 183 | ovfl, aovfl, and ainex bits are set, but not the inex2 bit. 192 | the lower 40 bits of ETEMP are zero; if not, set inex2. If double, 193 | check if the lower 21 bits are zero; if not, set inex2. 226 clrl FP_SCR1(%a6) |set exceptional operand 232 bsetb #sticky_bit,STICKY(%a6) |set sticky bit 247 bsetb #sign_bit,FP_SCR1(%a6) |set exceptional operand sign 256 | The inex2 and ainex bits are set. 259 orl #inx2a_mask,USER_FPSR(%a6) |set INEX2, AINEX 266 | the inex2 exception bits set in the FPSR. If the underflow bit 267 | is set, but the underflow trap was not taken, the aunfl bit in 268 | the FPSR must be set. 271 orl #inx2a_mask,USER_FPSR(%a6) |set INEX2, AINEX 272 btstb #unfl_bit,FPSR_EXCEPT(%a6) |test for unfl bit set 273 beqs no_uacc1 |if clear, do not set aunfl 282 | and set the FPSR bits accordingly. See the MC68040 User's Manual 288 bsetb #neg_bit,FPSR_CC(%a6) |set N bit 304 orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP 312 orb #nan_tag,DTAG(%a6) |set up dtag for nan 314 orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP 318 bsetb #signan_bit,FPTEMP_HI(%a6) |set SNAN bit in sop 321 orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP 328 | and set the FPSR bits accordingly. See the MC68040 User's Manual 334 bsetb #neg_bit,FPSR_CC(%a6) |set N bit 345 bsetb #signan_bit,ETEMP_HI(%a6) |set SNAN bit in sop 346 orb #norm_tag,DTAG(%a6) |set up dtag for norm 347 orb #nan_tag,STAG(%a6) |set up stag for nan 349 orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP 353 bsetb #signan_bit,ETEMP_HI(%a6) |set SNAN bit in sop 356 orl #snaniop_mask,USER_FPSR(%a6) |set NAN, SNAN, AIOP 365 | ;set UNFL, INEX2, AUNFL, AINEX 369 | not set inex2, aunfl, or ainex. 406 bsetb #wbtemp15_bit,WB_BYTE(%a6) |set wbtemp15 418 bsetb #neg_bit,FPSR_CC(%a6) |set N bit in FPSR_CC 457 bclrb #7,DTAG(%a2) |set DTAG to norm 471 bclrb #7,STAG(%a2) |set STAG to norm 479 moveb #0xfe,CU_SAVEPC(%a2) |set continue frame
|
H A D | round.S | 40 | The INEX bit of USER_FPSR will be set if the rounded result was 41 | inexact (i.e. if any of the g-r-s bits were set). 46 | If g=r=s=0 then result is exact and round is done, else set 55 swap %d1 |set up d1.w for round prec. 62 orl #inx2a_mask,USER_FPSR(%a6) |set inex2/ainex 81 swap %d1 |set up d1 for round prec. 94 swap %d1 |set up d1 for round prec. 106 swap %d1 |set up d1 for round prec. 116 swap %d1 |set up d1 for round prec. 155 bnes st_stky |if any are set, set sticky 157 bnes st_stky |if any are set, set sticky 167 bnes st_stky |if any are set, set sticky 179 .set ad_1_sgl,0x00000100 | constant to add 1 to l-bit in sgl prec 180 .set ad_1_dbl,0x00000800 | constant to add 1 to l-bit in dbl prec 275 | NRM_SET shifts and decrements until there is a 1 set in the integer 278 | NRM_ZERO shifts and decrements until there is a 1 set in the integer 281 | exponent (d0) is set to 0 and the mantissa (d1 & d2) is not 296 | set exponent = 0 317 beqs ms_clr |branch if no bits were set 331 | ;set exp = 0. 341 bfffo %d2{#0:#32},%d3 |check if any bits set in ls mant 342 beqs all_clr |branch if none set 350 movew #0,LOCAL_EX(%a0) |no mantissa bits set. Set exp = 0. 383 | set (otherwise this would have been tagged a zero not a denorm). 433 bras dnrm_inex |if set, set inex 450 bras dnrm_inex |if set, set inex 463 bras dnrm_inex |if set, set inex 466 tstl LOCAL_HI(%a0) |check for any bits set 468 tstl LOCAL_LO(%a0) |check for any bits set 472 orl #inx2a_mask,USER_FPSR(%a6) |set inex2/ainex 473 movel #0x20000000,%d0 |set sticky bit in return value 476 movel #0,LOCAL_HI(%a0) |set d1 = 0 (ms mantissa) 477 movel #0,LOCAL_LO(%a0) |set d2 = 0 (ms mantissa) 480 orl #inx2a_mask,USER_FPSR(%a6) |set inex2/ainex 526 clrb %d1 |set no inex2 reported 553 orl #0x20000000,%d0 |set sticky bit in d0 571 bnes c2_sstky |bra if sticky bit to be set 573 bnes c2_sstky |bra if sticky bit to be set 588 orl #0x20000000,%d0 |set sticky bit in d0
|
H A D | scale.S | 63 | and set unfl. 71 orl #unfl_mask,USER_FPSR(%a6) |set UNFL 128 blts nden_exit |if set, not denorm 132 orl #unfl_bit,USER_FPSR(%a6) |set unfl 186 | ;set unfl, aunfl, ainex 195 orw #0x8000,FPTEMP_EX(%a6) |set sign bit 205 | Result is zero. Check for rounding mode to set lsb. If the 216 bnes no_dir |if set, neg op, no inc 217 movel #1,FPTEMP_LO(%a6) |set lsb 222 movel #1,FPTEMP_LO(%a6) |set lsb 223 orl #neg_mask,USER_FPSR(%a6) |set N 227 fmovex FPTEMP(%a6),%fp0 |use fmove to set cc's 245 orl #unfl_mask,USER_FPSR(%a6) |set unfl 248 orl #neg_mask,USER_FPSR(%a6) |set N 254 | The result has underflowed to zero. Return zero and set 265 bnes no_dir2 |if set, neg op, no inc 268 movel #1,FPTEMP_LO(%a6) |set lsb 275 movel #1,FPTEMP_LO(%a6) |set lsb 276 orl #neg_mask,USER_FPSR(%a6) |set N 339 orl #neg_mask,USER_FPSR(%a6) |set N 345 orl #unfl_mask,USER_FPSR(%a6) |set unfl
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | linkage.h | 7 asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ 8 "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n") 10 asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ 11 "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
|
H A D | ucc.h | 45 int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask); 49 static inline int ucc_set_qe_mux_grant(unsigned int ucc_num, int set) ucc_set_qe_mux_grant() argument 51 return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT); ucc_set_qe_mux_grant() 54 static inline int ucc_set_qe_mux_tsa(unsigned int ucc_num, int set) ucc_set_qe_mux_tsa() argument 56 return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA); ucc_set_qe_mux_tsa() 59 static inline int ucc_set_qe_mux_bkpt(unsigned int ucc_num, int set) ucc_set_qe_mux_bkpt() argument 61 return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT); ucc_set_qe_mux_bkpt()
|
/linux-4.1.27/net/netfilter/ipset/ |
H A D | ip_set_list_set.c | 8 /* Kernel module implementing an IP set type: the list:set type */ 25 IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); 26 MODULE_ALIAS("ip_set_list:set"); 41 u32 size; /* size of set list array */ 44 struct set_elem members[0]; /* the set members */ 47 #define list_set_elem(set, map, id) \ 48 (struct set_elem *)((void *)(map)->members + (id) * (set)->dsize) 51 list_set_ktest(struct ip_set *set, const struct sk_buff *skb, list_set_ktest() argument 55 struct list_set *map = set->data; list_set_ktest() 65 e = list_set_elem(set, map, i); list_set_ktest() 68 if (SET_WITH_TIMEOUT(set) && list_set_ktest() 69 ip_set_timeout_expired(ext_timeout(e, set))) list_set_ktest() 73 if (SET_WITH_COUNTER(set)) list_set_ktest() 74 ip_set_update_counter(ext_counter(e, set), list_set_ktest() 77 if (SET_WITH_SKBINFO(set)) list_set_ktest() 78 ip_set_get_skbinfo(ext_skbinfo(e, set), list_set_ktest() 88 list_set_kadd(struct ip_set *set, const struct sk_buff *skb, list_set_kadd() argument 92 struct list_set *map = set->data; list_set_kadd() 98 e = list_set_elem(set, map, i); list_set_kadd() 101 if (SET_WITH_TIMEOUT(set) && list_set_kadd() 102 ip_set_timeout_expired(ext_timeout(e, set))) list_set_kadd() 112 list_set_kdel(struct ip_set *set, const struct sk_buff *skb, list_set_kdel() argument 116 struct list_set *map = set->data; list_set_kdel() 122 e = list_set_elem(set, map, i); list_set_kdel() 125 if (SET_WITH_TIMEOUT(set) && list_set_kdel() 126 ip_set_timeout_expired(ext_timeout(e, set))) list_set_kdel() 136 list_set_kadt(struct ip_set *set, const struct sk_buff *skb, list_set_kadt() argument 140 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); list_set_kadt() 144 return list_set_ktest(set, skb, par, opt, &ext); list_set_kadt() 146 return list_set_kadd(set, skb, par, opt, &ext); list_set_kadt() 148 return list_set_kdel(set, skb, par, opt, &ext); list_set_kadt() 156 id_eq(const struct ip_set *set, u32 i, ip_set_id_t id) id_eq() argument 158 const struct list_set *map = set->data; id_eq() 164 e = list_set_elem(set, map, i); id_eq() 166 !(SET_WITH_TIMEOUT(set) && id_eq() 167 ip_set_timeout_expired(ext_timeout(e, set)))); id_eq() 171 list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d, list_set_add() argument 174 struct list_set *map = set->data; list_set_add() 175 struct set_elem *e = list_set_elem(set, map, i); list_set_add() 181 ip_set_ext_destroy(set, e); list_set_add() 183 struct set_elem *x = list_set_elem(set, map, list_set_add() 189 ip_set_ext_destroy(set, x); list_set_add() 191 memmove(list_set_elem(set, map, i + 1), e, list_set_add() 192 set->dsize * (map->size - (i + 1))); list_set_add() 194 memset(e, 0, set->dsize); list_set_add() 199 if (SET_WITH_TIMEOUT(set)) list_set_add() 200 ip_set_timeout_set(ext_timeout(e, set), ext->timeout); list_set_add() 201 if (SET_WITH_COUNTER(set)) list_set_add() 202 ip_set_init_counter(ext_counter(e, set), ext); list_set_add() 203 if (SET_WITH_COMMENT(set)) list_set_add() 204 ip_set_init_comment(ext_comment(e, set), ext); list_set_add() 205 if (SET_WITH_SKBINFO(set)) list_set_add() 206 ip_set_init_skbinfo(ext_skbinfo(e, set), ext); list_set_add() 211 list_set_del(struct ip_set *set, u32 i) list_set_del() argument 213 struct list_set *map = set->data; list_set_del() 214 struct set_elem *e = list_set_elem(set, map, i); list_set_del() 217 ip_set_ext_destroy(set, e); list_set_del() 220 memmove(e, list_set_elem(set, map, i + 1), list_set_del() 221 set->dsize * (map->size - (i + 1))); list_set_del() 224 e = list_set_elem(set, map, map->size - 1); list_set_del() 230 set_cleanup_entries(struct ip_set *set) set_cleanup_entries() argument 232 struct list_set *map = set->data; set_cleanup_entries() 237 e = list_set_elem(set, map, i); set_cleanup_entries() 239 ip_set_timeout_expired(ext_timeout(e, set))) set_cleanup_entries() 240 list_set_del(set, i); set_cleanup_entries() 248 list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext, list_set_utest() argument 251 struct list_set *map = set->data; list_set_utest() 258 e = list_set_elem(set, map, i); list_set_utest() 261 else if (SET_WITH_TIMEOUT(set) && list_set_utest() 262 ip_set_timeout_expired(ext_timeout(e, set))) list_set_utest() 270 ret = id_eq(set, i + 1, d->refid); list_set_utest() 272 ret = i > 0 && id_eq(set, i - 1, d->refid); list_set_utest() 280 list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext, list_set_uadd() argument 283 struct list_set *map = set->data; list_set_uadd() 289 if (SET_WITH_TIMEOUT(set)) list_set_uadd() 290 set_cleanup_entries(set); list_set_uadd() 294 e = list_set_elem(set, map, i); list_set_uadd() 300 if ((d->before > 1 && !id_eq(set, i + 1, d->refid)) || list_set_uadd() 302 (i == 0 || !id_eq(set, i - 1, d->refid)))) list_set_uadd() 309 ip_set_ext_destroy(set, e); list_set_uadd() 311 if (SET_WITH_TIMEOUT(set)) list_set_uadd() 312 ip_set_timeout_set(ext_timeout(e, set), ext->timeout); list_set_uadd() 313 if (SET_WITH_COUNTER(set)) list_set_uadd() 314 ip_set_init_counter(ext_counter(e, set), ext); list_set_uadd() 315 if (SET_WITH_COMMENT(set)) list_set_uadd() 316 ip_set_init_comment(ext_comment(e, set), ext); list_set_uadd() 317 if (SET_WITH_SKBINFO(set)) list_set_uadd() 318 ip_set_init_skbinfo(ext_skbinfo(e, set), ext); list_set_uadd() 326 e = list_set_elem(set, map, i); list_set_uadd() 329 : list_set_add(set, i, d, ext); list_set_uadd() 333 ret = list_set_add(set, i, d, ext); list_set_uadd() 335 ret = list_set_add(set, i + 1, d, ext); list_set_uadd() 342 list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext, list_set_udel() argument 345 struct list_set *map = set->data; list_set_udel() 351 e = list_set_elem(set, map, i); list_set_udel() 355 else if (SET_WITH_TIMEOUT(set) && list_set_udel() 356 ip_set_timeout_expired(ext_timeout(e, set))) list_set_udel() 362 return list_set_del(set, i); list_set_udel() 364 if (!id_eq(set, i + 1, d->refid)) list_set_udel() 366 return list_set_del(set, i); list_set_udel() 367 } else if (i == 0 || !id_eq(set, i - 1, d->refid)) list_set_udel() 370 return list_set_del(set, i); list_set_udel() 376 list_set_uadt(struct ip_set *set, struct nlattr *tb[], list_set_uadt() argument 379 struct list_set *map = set->data; list_set_uadt() 380 ipset_adtfn adtfn = set->variant->adt[adt]; list_set_uadt() 382 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); list_set_uadt() 399 ret = ip_set_get_extensions(set, tb, &ext); list_set_uadt() 432 if (adt != IPSET_TEST && SET_WITH_TIMEOUT(set)) list_set_uadt() 433 set_cleanup_entries(set); list_set_uadt() 435 ret = adtfn(set, &e, &ext, &ext, flags); list_set_uadt() 447 list_set_flush(struct ip_set *set) list_set_flush() argument 449 struct list_set *map = set->data; list_set_flush() 454 e = list_set_elem(set, map, i); list_set_flush() 457 ip_set_ext_destroy(set, e); list_set_flush() 464 list_set_destroy(struct ip_set *set) list_set_destroy() argument 466 struct list_set *map = set->data; list_set_destroy() 468 if (SET_WITH_TIMEOUT(set)) list_set_destroy() 470 list_set_flush(set); list_set_destroy() 473 set->data = NULL; list_set_destroy() 477 list_set_head(struct ip_set *set, struct sk_buff *skb) list_set_head() argument 479 const struct list_set *map = set->data; list_set_head() 486 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || list_set_head() 488 htonl(sizeof(*map) + map->size * set->dsize))) list_set_head() 490 if (unlikely(ip_set_put_flags(skb, set))) list_set_head() 500 list_set_list(const struct ip_set *set, list_set_list() argument 503 const struct list_set *map = set->data; list_set_list() 514 e = list_set_elem(set, map, i); list_set_list() 517 if (SET_WITH_TIMEOUT(set) && list_set_list() 518 ip_set_timeout_expired(ext_timeout(e, set))) list_set_list() 531 if (ip_set_put_extensions(skb, set, e, true)) list_set_list() 580 struct ip_set *set = (struct ip_set *) ul_set; list_set_gc() local 581 struct list_set *map = set->data; list_set_gc() 583 write_lock_bh(&set->lock); list_set_gc() 584 set_cleanup_entries(set); list_set_gc() 585 write_unlock_bh(&set->lock); list_set_gc() 587 map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; list_set_gc() 592 list_set_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) list_set_gc_init() argument 594 struct list_set *map = set->data; list_set_gc_init() 597 map->gc.data = (unsigned long) set; list_set_gc_init() 599 map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; list_set_gc_init() 603 /* Create list:set type of sets */ 606 init_list_set(struct net *net, struct ip_set *set, u32 size) init_list_set() argument 613 min_t(u32, size, IP_SET_LIST_MAX_SIZE) * set->dsize, init_list_set() 620 set->data = map; init_list_set() 623 e = list_set_elem(set, map, i); init_list_set() 631 list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[], list_set_create() argument 646 set->variant = &set_variant; list_set_create() 647 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem)); list_set_create() 648 if (!init_list_set(net, set, size)) list_set_create() 651 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); list_set_create() 652 list_set_gc_init(set, list_set_gc); list_set_create() 658 .name = "list:set",
|
H A D | ip_set_bitmap_gen.h | 36 #define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id)) 39 mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) mtype_gc_init() argument 41 struct mtype *map = set->data; mtype_gc_init() 44 map->gc.data = (unsigned long) set; mtype_gc_init() 46 map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; mtype_gc_init() 51 mtype_ext_cleanup(struct ip_set *set) mtype_ext_cleanup() argument 53 struct mtype *map = set->data; mtype_ext_cleanup() 58 ip_set_ext_destroy(set, get_ext(set, map, id)); mtype_ext_cleanup() 62 mtype_destroy(struct ip_set *set) mtype_destroy() argument 64 struct mtype *map = set->data; mtype_destroy() 66 if (SET_WITH_TIMEOUT(set)) mtype_destroy() 70 if (set->dsize) { mtype_destroy() 71 if (set->extensions & IPSET_EXT_DESTROY) mtype_destroy() 72 mtype_ext_cleanup(set); mtype_destroy() 77 set->data = NULL; mtype_destroy() 81 mtype_flush(struct ip_set *set) mtype_flush() argument 83 struct mtype *map = set->data; mtype_flush() 85 if (set->extensions & IPSET_EXT_DESTROY) mtype_flush() 86 mtype_ext_cleanup(set); mtype_flush() 91 mtype_head(struct ip_set *set, struct sk_buff *skb) mtype_head() argument 93 const struct mtype *map = set->data; mtype_head() 100 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || mtype_head() 104 set->dsize * map->elements))) mtype_head() 106 if (unlikely(ip_set_put_flags(skb, set))) mtype_head() 116 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, mtype_test() argument 119 struct mtype *map = set->data; mtype_test() 121 void *x = get_ext(set, map, e->id); mtype_test() 122 int ret = mtype_do_test(e, map, set->dsize); mtype_test() 126 if (SET_WITH_TIMEOUT(set) && mtype_test() 127 ip_set_timeout_expired(ext_timeout(x, set))) mtype_test() 129 if (SET_WITH_COUNTER(set)) mtype_test() 130 ip_set_update_counter(ext_counter(x, set), ext, mext, flags); mtype_test() 131 if (SET_WITH_SKBINFO(set)) mtype_test() 132 ip_set_get_skbinfo(ext_skbinfo(x, set), ext, mext, flags); mtype_test() 137 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, mtype_add() argument 140 struct mtype *map = set->data; mtype_add() 142 void *x = get_ext(set, map, e->id); mtype_add() 143 int ret = mtype_do_add(e, map, flags, set->dsize); mtype_add() 146 if (SET_WITH_TIMEOUT(set) && mtype_add() 147 ip_set_timeout_expired(ext_timeout(x, set))) mtype_add() 152 ip_set_ext_destroy(set, x); mtype_add() 155 if (SET_WITH_TIMEOUT(set)) mtype_add() 157 mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret); mtype_add() local 159 ip_set_timeout_set(ext_timeout(x, set), ext->timeout); mtype_add() 162 if (SET_WITH_COUNTER(set)) mtype_add() 163 ip_set_init_counter(ext_counter(x, set), ext); mtype_add() 164 if (SET_WITH_COMMENT(set)) mtype_add() 165 ip_set_init_comment(ext_comment(x, set), ext); mtype_add() 166 if (SET_WITH_SKBINFO(set)) mtype_add() 167 ip_set_init_skbinfo(ext_skbinfo(x, set), ext); mtype_add() 172 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, mtype_del() argument 175 struct mtype *map = set->data; mtype_del() 177 void *x = get_ext(set, map, e->id); mtype_del() 182 ip_set_ext_destroy(set, x); mtype_del() 183 if (SET_WITH_TIMEOUT(set) && mtype_del() 184 ip_set_timeout_expired(ext_timeout(x, set))) mtype_del() 199 mtype_list(const struct ip_set *set, mtype_list() argument 202 struct mtype *map = set->data; mtype_list() 213 x = get_ext(set, map, id); mtype_list() 215 (SET_WITH_TIMEOUT(set) && mtype_list() 219 ip_set_timeout_expired(ext_timeout(x, set)))) mtype_list() 229 if (mtype_do_list(skb, map, id, set->dsize)) mtype_list() 231 if (ip_set_put_extensions(skb, set, x, mtype_list() 256 struct ip_set *set = (struct ip_set *) ul_set; mtype_gc() local 257 struct mtype *map = set->data; mtype_gc() 263 read_lock_bh(&set->lock); mtype_gc() 265 if (mtype_gc_test(id, map, set->dsize)) { mtype_gc() 266 x = get_ext(set, map, id); mtype_gc() 267 if (ip_set_timeout_expired(ext_timeout(x, set))) { mtype_gc() 269 ip_set_ext_destroy(set, x); mtype_gc() 272 read_unlock_bh(&set->lock); mtype_gc() 274 map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; mtype_gc()
|
H A D | ip_set_hash_gen.h | 30 * are serialized by the nfnl mutex. During resizing the set is 84 /* Book-keeping of the prefixes added to the set */ 87 u8 cidr[IPSET_NET_COUNT]; /* the different cidr values in the set */ 338 /* Calculate the actual memory size of the set data */ 362 mtype_ext_cleanup(struct ip_set *set, struct hbucket *n) mtype_ext_cleanup() argument 367 ip_set_ext_destroy(set, ahash_data(n, i, set->dsize)); mtype_ext_cleanup() 370 /* Flush a hash type of set: destroy all elements */ 372 mtype_flush(struct ip_set *set) mtype_flush() argument 374 struct htype *h = set->data; mtype_flush() 383 if (set->extensions & IPSET_EXT_DESTROY) mtype_flush() 384 mtype_ext_cleanup(set, n); mtype_flush() 391 memset(h->nets, 0, sizeof(struct net_prefixes) * NLEN(set->family)); mtype_flush() 396 /* Destroy the hashtable part of the set */ 398 mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy) mtype_ahash_destroy() argument 406 if (set->extensions & IPSET_EXT_DESTROY && ext_destroy) mtype_ahash_destroy() 407 mtype_ext_cleanup(set, n); mtype_ahash_destroy() 416 /* Destroy a hash type of set */ 418 mtype_destroy(struct ip_set *set) mtype_destroy() argument 420 struct htype *h = set->data; mtype_destroy() 422 if (set->extensions & IPSET_EXT_TIMEOUT) mtype_destroy() 425 mtype_ahash_destroy(set, rcu_dereference_bh_nfnl(h->table), true); mtype_destroy() 431 set->data = NULL; mtype_destroy() 435 mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) mtype_gc_init() argument 437 struct htype *h = set->data; mtype_gc_init() 440 h->gc.data = (unsigned long) set; mtype_gc_init() 442 h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; mtype_gc_init() 445 IPSET_GC_PERIOD(set->timeout)); mtype_gc_init() 468 mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) mtype_expire() argument 485 if (ip_set_timeout_expired(ext_timeout(data, set))) { mtype_expire() 492 ip_set_ext_destroy(set, data); mtype_expire() 521 struct ip_set *set = (struct ip_set *) ul_set; mtype_gc() local 522 struct htype *h = set->data; mtype_gc() 525 write_lock_bh(&set->lock); mtype_gc() 526 mtype_expire(set, h, NLEN(set->family), set->dsize); mtype_gc() 527 write_unlock_bh(&set->lock); mtype_gc() 529 h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; mtype_gc() 537 mtype_resize(struct ip_set *set, bool retried) mtype_resize() argument 539 struct htype *h = set->data; mtype_resize() 552 if (SET_WITH_TIMEOUT(set) && !retried) { mtype_resize() 554 write_lock_bh(&set->lock); mtype_resize() 555 mtype_expire(set, set->data, NLEN(set->family), set->dsize); mtype_resize() 556 write_unlock_bh(&set->lock); mtype_resize() 564 pr_debug("attempt to resize set %s from %u to %u, t %p\n", mtype_resize() 565 set->name, orig->htable_bits, htable_bits, orig); mtype_resize() 568 pr_warn("Cannot increase the hashsize of set %s further\n", mtype_resize() 569 set->name); mtype_resize() 578 read_lock_bh(&set->lock); mtype_resize() 582 data = ahash_data(n, j, set->dsize); mtype_resize() 588 ret = hbucket_elem_add(m, AHASH_MAX(h), set->dsize); mtype_resize() 593 read_unlock_bh(&set->lock); mtype_resize() 594 mtype_ahash_destroy(set, t, false); mtype_resize() 599 d = ahash_data(m, m->pos++, set->dsize); mtype_resize() 600 memcpy(d, data, set->dsize); mtype_resize() 608 read_unlock_bh(&set->lock); mtype_resize() 610 /* Give time to other readers of the set */ mtype_resize() 613 pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name, mtype_resize() 615 mtype_ahash_destroy(set, orig, false); mtype_resize() 623 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, mtype_add() argument 626 struct htype *h = set->data; mtype_add() 641 data = ahash_data(n, i, set->dsize); mtype_add() 644 (SET_WITH_TIMEOUT(set) && mtype_add() 645 ip_set_timeout_expired(ext_timeout(data, set)))) { mtype_add() 655 if (SET_WITH_TIMEOUT(set) && mtype_add() 656 ip_set_timeout_expired(ext_timeout(data, set)) && mtype_add() 660 if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set) && n->pos) { mtype_add() 665 if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem) mtype_add() 666 /* FIXME: when set is full, we slow down here */ mtype_add() 667 mtype_expire(set, h, NLEN(set->family), set->dsize); mtype_add() 672 set->name, h->maxelem); mtype_add() 680 data = ahash_data(n, j, set->dsize); mtype_add() 684 NLEN(set->family), i); mtype_add() 686 NLEN(set->family), i); mtype_add() 689 ip_set_ext_destroy(set, data); mtype_add() 693 ret = hbucket_elem_add(n, AHASH_MAX(h), set->dsize); mtype_add() 699 data = ahash_data(n, n->pos++, set->dsize); mtype_add() 702 mtype_add_cidr(h, SCIDR(d->cidr, i), NLEN(set->family), mtype_add() 711 if (SET_WITH_TIMEOUT(set)) mtype_add() 712 ip_set_timeout_set(ext_timeout(data, set), ext->timeout); mtype_add() 713 if (SET_WITH_COUNTER(set)) mtype_add() 714 ip_set_init_counter(ext_counter(data, set), ext); mtype_add() 715 if (SET_WITH_COMMENT(set)) mtype_add() 716 ip_set_init_comment(ext_comment(data, set), ext); mtype_add() 717 if (SET_WITH_SKBINFO(set)) mtype_add() 718 ip_set_init_skbinfo(ext_skbinfo(data, set), ext); mtype_add() 729 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext, mtype_del() argument 732 struct htype *h = set->data; mtype_del() 748 data = ahash_data(n, i, set->dsize); mtype_del() 751 if (SET_WITH_TIMEOUT(set) && mtype_del() 752 ip_set_timeout_expired(ext_timeout(data, set))) mtype_del() 756 memcpy(data, ahash_data(n, n->pos - 1, set->dsize), mtype_del() 757 set->dsize); mtype_del() 763 mtype_del_cidr(h, SCIDR(d->cidr, j), NLEN(set->family), mtype_del() 766 ip_set_ext_destroy(set, data); mtype_del() 769 * set->dsize, mtype_del() 776 memcpy(tmp, n->value, n->size * set->dsize); mtype_del() 791 struct ip_set_ext *mext, struct ip_set *set, u32 flags) mtype_data_match() 793 if (SET_WITH_COUNTER(set)) mtype_data_match() 794 ip_set_update_counter(ext_counter(data, set), mtype_data_match() 796 if (SET_WITH_SKBINFO(set)) mtype_data_match() 797 ip_set_get_skbinfo(ext_skbinfo(data, set), mtype_data_match() 804 * sizes added to the set */ 806 mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d, mtype_test_cidrs() argument 810 struct htype *h = set->data; mtype_test_cidrs() 821 u8 nets_length = NLEN(set->family); mtype_test_cidrs() 837 data = ahash_data(n, i, set->dsize); mtype_test_cidrs() 840 if (SET_WITH_TIMEOUT(set)) { mtype_test_cidrs() 842 ext_timeout(data, set))) mtype_test_cidrs() 844 mext, set, mtype_test_cidrs() 851 mext, set, flags); mtype_test_cidrs() 861 /* Test whether the element is added to the set */ 863 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, mtype_test() argument 866 struct htype *h = set->data; mtype_test() 880 if (GCIDR(d->cidr, i) != SET_HOST_MASK(set->family)) mtype_test() 883 ret = mtype_test_cidrs(set, d, ext, mext, flags); mtype_test() 891 data = ahash_data(n, i, set->dsize); mtype_test() 893 !(SET_WITH_TIMEOUT(set) && mtype_test() 894 ip_set_timeout_expired(ext_timeout(data, set)))) { mtype_test() 895 ret = mtype_data_match(data, ext, mext, set, flags); mtype_test() 904 /* Reply a HEADER request: fill out the header part of the set */ 906 mtype_head(struct ip_set *set, struct sk_buff *skb) mtype_head() argument 908 const struct htype *h = set->data; mtype_head() 914 memsize = mtype_ahash_memsize(h, t, NLEN(set->family), set->dsize); mtype_head() 932 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || mtype_head() 935 if (unlikely(ip_set_put_flags(skb, set))) mtype_head() 944 /* Reply a LIST/SAVE request: dump the elements of the specified set */ 946 mtype_list(const struct ip_set *set, mtype_list() argument 949 const struct htype *h = set->data; mtype_list() 962 pr_debug("list hash set %s\n", set->name); mtype_list() 970 e = ahash_data(n, i, set->dsize); mtype_list() 971 if (SET_WITH_TIMEOUT(set) && mtype_list() 972 ip_set_timeout_expired(ext_timeout(e, set))) mtype_list() 986 if (ip_set_put_extensions(skb, set, e, true)) mtype_list() 1000 pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n", mtype_list() 1001 set->name); mtype_list() 1010 IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb, 1015 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[], 1036 IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, IPSET_TOKEN() 1052 if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) IPSET_TOKEN() 1060 netmask = set->family == NFPROTO_IPV4 ? 32 : 128; IPSET_TOKEN() 1061 pr_debug("Create set %s with family %s\n", IPSET_TOKEN() 1062 set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); IPSET_TOKEN() 1087 if ((set->family == NFPROTO_IPV4 && netmask > 32) || IPSET_TOKEN() 1088 (set->family == NFPROTO_IPV6 && netmask > 128) || IPSET_TOKEN() 1104 hsize += sizeof(struct net_prefixes) * NLEN(set->family); IPSET_TOKEN() 1118 set->timeout = IPSET_NO_TIMEOUT; IPSET_TOKEN() 1134 set->data = h; IPSET_TOKEN() 1136 if (set->family == NFPROTO_IPV4) { IPSET_TOKEN() 1138 set->variant = &IPSET_TOKEN(HTYPE, 4_variant); IPSET_TOKEN() 1139 set->dsize = ip_set_elem_len(set, tb, IPSET_TOKEN() 1143 set->variant = &IPSET_TOKEN(HTYPE, 6_variant); IPSET_TOKEN() 1144 set->dsize = ip_set_elem_len(set, tb, IPSET_TOKEN() 1149 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); IPSET_TOKEN() 1151 if (set->family == NFPROTO_IPV4) IPSET_TOKEN() 1153 IPSET_TOKEN(HTYPE, 4_gc_init)(set, IPSET_TOKEN() 1157 IPSET_TOKEN(HTYPE, 6_gc_init)(set, IPSET_TOKEN() 1162 set->name, jhash_size(t->htable_bits), IPSET_TOKEN() 1163 t->htable_bits, h->maxelem, set->data, t); IPSET_TOKEN() 790 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext, struct ip_set_ext *mext, struct ip_set *set, u32 flags) mtype_data_match() argument
|
H A D | ip_set_core.c | 10 /* Kernel module for IP set management */ 28 static LIST_HEAD(ip_set_type_list); /* all registered set types */ 30 static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */ 53 MODULE_DESCRIPTION("core IP set support"); 63 * The set types are implemented in modules and registered set types 97 /* Unlock, try to load a set type module and lock again */ 112 /* Find a set type and reference it */ 149 /* Find a given set type by name and family. 189 /* Register a set type structure. The type is identified by 224 /* Unregister a set type. There's a small race with ip_set_create */ 365 ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len) ip_set_elem_len() argument 374 set->flags |= IPSET_CREATE_FLAG_FORCEADD; ip_set_elem_len() 379 set->offset[id] = offset; ip_set_elem_len() 380 set->extensions |= ip_set_extensions[id].type; ip_set_elem_len() 388 ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], ip_set_get_extensions() argument 393 if (!(set->extensions & IPSET_EXT_TIMEOUT)) ip_set_get_extensions() 398 if (!(set->extensions & IPSET_EXT_COUNTER)) ip_set_get_extensions() 408 if (!(set->extensions & IPSET_EXT_COMMENT)) ip_set_get_extensions() 413 if (!(set->extensions & IPSET_EXT_SKBINFO)) ip_set_get_extensions() 420 if (!(set->extensions & IPSET_EXT_SKBINFO)) ip_set_get_extensions() 426 if (!(set->extensions & IPSET_EXT_SKBINFO)) ip_set_get_extensions() 437 * the properties of a set. All of these can be executed from userspace 441 * is used by the external references (set/SET netfilter modules). 443 * The set behind an index may change by swapping only, from userspace. 447 __ip_set_get(struct ip_set *set) __ip_set_get() argument 450 set->ref++; __ip_set_get() 455 __ip_set_put(struct ip_set *set) __ip_set_put() argument 458 BUG_ON(set->ref == 0); __ip_set_put() 459 set->ref--; __ip_set_put() 464 * Add, del and test set entries from kernel. 466 * The set behind the index must exist and must be referenced 473 struct ip_set *set; ip_set_rcu_get() local 478 set = rcu_dereference(inst->ip_set_list)[index]; ip_set_rcu_get() 481 return set; ip_set_rcu_get() 488 struct ip_set *set = ip_set_rcu_get( ip_set_test() local 492 BUG_ON(set == NULL); ip_set_test() 493 pr_debug("set %s, index %u\n", set->name, index); ip_set_test() 495 if (opt->dim < set->type->dimension || ip_set_test() 496 !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) ip_set_test() 499 read_lock_bh(&set->lock); ip_set_test() 500 ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt); ip_set_test() 501 read_unlock_bh(&set->lock); ip_set_test() 506 write_lock_bh(&set->lock); ip_set_test() 507 set->variant->kadt(set, skb, par, IPSET_ADD, opt); ip_set_test() 508 write_unlock_bh(&set->lock); ip_set_test() 513 (set->type->features & IPSET_TYPE_NOMATCH) && ip_set_test() 527 struct ip_set *set = ip_set_rcu_get( ip_set_add() local 531 BUG_ON(set == NULL); ip_set_add() 532 pr_debug("set %s, index %u\n", set->name, index); ip_set_add() 534 if (opt->dim < set->type->dimension || ip_set_add() 535 !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) ip_set_add() 538 write_lock_bh(&set->lock); ip_set_add() 539 ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt); ip_set_add() 540 write_unlock_bh(&set->lock); ip_set_add() 550 struct ip_set *set = ip_set_rcu_get( ip_set_del() local 554 BUG_ON(set == NULL); ip_set_del() 555 pr_debug("set %s, index %u\n", set->name, index); ip_set_del() 557 if (opt->dim < set->type->dimension || ip_set_del() 558 !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) ip_set_del() 561 write_lock_bh(&set->lock); ip_set_del() 562 ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt); ip_set_del() 563 write_unlock_bh(&set->lock); ip_set_del() 570 * Find set by name, reference it once. The reference makes sure the 575 ip_set_get_byname(struct net *net, const char *name, struct ip_set **set) ip_set_get_byname() argument 587 *set = s; ip_set_get_byname() 598 * If the given set pointer points to a valid set, decrement 607 struct ip_set *set; __ip_set_put_byindex() local 610 set = rcu_dereference(inst->ip_set_list)[index]; __ip_set_put_byindex() 611 if (set != NULL) __ip_set_put_byindex() 612 __ip_set_put(set); __ip_set_put_byindex() 626 * Get the name of a set behind a set index. 627 * We assume the set is referenced, so it does exist and 628 * can't be destroyed. The set cannot be renamed due to 635 const struct ip_set *set = ip_set_rcu_get(net, index); ip_set_name_byindex() local 637 BUG_ON(set == NULL); ip_set_name_byindex() 638 BUG_ON(set->ref == 0); ip_set_name_byindex() 641 return set->name; ip_set_name_byindex() 651 * Find set by index, reference it once. The reference makes sure the 659 struct ip_set *set; ip_set_nfnl_get_byindex() local 666 set = ip_set(inst, index); ip_set_nfnl_get_byindex() 667 if (set) ip_set_nfnl_get_byindex() 668 __ip_set_get(set); ip_set_nfnl_get_byindex() 678 * If the given set pointer points to a valid set, decrement 687 struct ip_set *set; ip_set_nfnl_put() local 692 set = ip_set(inst, index); ip_set_nfnl_put() 693 if (set != NULL) ip_set_nfnl_put() 694 __ip_set_put(set); ip_set_nfnl_put() 739 /* Create a set */ 755 struct ip_set *set = NULL; find_set_and_id() local 760 set = ip_set(inst, i); find_set_and_id() 761 if (set != NULL && STREQ(set->name, name)) { find_set_and_id() 766 return (*id == IPSET_INVALID_ID ? NULL : set); find_set_and_id() 779 struct ip_set **set) find_free_id() 792 *set = s; find_free_id() 817 struct ip_set *set, *clash = NULL; ip_set_create() local 843 * a normal base set structure. ip_set_create() 845 set = kzalloc(sizeof(struct ip_set), GFP_KERNEL); ip_set_create() 846 if (!set) ip_set_create() 848 rwlock_init(&set->lock); ip_set_create() 849 strlcpy(set->name, name, IPSET_MAXNAMELEN); ip_set_create() 850 set->family = family; ip_set_create() 851 set->revision = revision; ip_set_create() 856 * while constructing our new set. ip_set_create() 859 * specific part of the set without holding any locks. ip_set_create() 861 ret = find_set_type_get(typename, family, revision, &(set->type)); ip_set_create() 870 set->type->create_policy)) { ip_set_create() 875 ret = set->type->create(net, set, tb, flags); ip_set_create() 882 * Here, we have a valid, constructed set and we are protected ip_set_create() 886 ret = find_free_id(inst, set->name, &index, &clash); ip_set_create() 888 /* If this is the same set and requested, ignore error */ ip_set_create() 890 STREQ(set->type->name, clash->type->name) && ip_set_create() 891 set->type->family == clash->type->family && ip_set_create() 892 set->type->revision_min == clash->type->revision_min && ip_set_create() 893 set->type->revision_max == clash->type->revision_max && ip_set_create() 894 set->variant->same_set(set, clash)) ip_set_create() 923 * Finally! Add our shiny new set to the list, and be done. ip_set_create() 925 pr_debug("create: '%s' created with index %u!\n", set->name, index); ip_set_create() 926 ip_set(inst, index) = set; ip_set_create() 931 set->variant->destroy(set); ip_set_create() 933 module_put(set->type->me); ip_set_create() 935 kfree(set); ip_set_create() 951 struct ip_set *set = ip_set(inst, index); ip_set_destroy_set() local 953 pr_debug("set: %s\n", set->name); ip_set_destroy_set() 957 set->variant->destroy(set); ip_set_destroy_set() 958 module_put(set->type->me); ip_set_destroy_set() 959 kfree(set); ip_set_destroy_set() 981 * list:set timer can only decrement the reference ip_set_destroy() 1023 ip_set_flush_set(struct ip_set *set) ip_set_flush_set() argument 1025 pr_debug("set: %s\n", set->name); ip_set_flush_set() 1027 write_lock_bh(&set->lock); ip_set_flush_set() 1028 set->variant->flush(set); ip_set_flush_set() 1029 write_unlock_bh(&set->lock); ip_set_flush_set() 1061 /* Rename a set */ 1078 struct ip_set *set, *s; ip_set_rename() local 1088 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); ip_set_rename() 1089 if (set == NULL) ip_set_rename() 1093 if (set->ref != 0) { ip_set_rename() 1106 strncpy(set->name, name2, IPSET_MAXNAMELEN); ip_set_rename() 1114 * References and set names are also swapped. 1167 /* List/save set data */ 1182 pr_debug("release set %s\n", ip_set_dump_done() 1217 * [IPSET_CB_DUMP]: dump single set/all sets dump_init() 1218 * [IPSET_CB_INDEX]: set index dump_init() 1223 struct ip_set *set; dump_init() local 1225 set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]), dump_init() 1227 if (set == NULL) dump_init() 1249 struct ip_set *set = NULL; ip_set_dump_start() local 1280 set = ip_set(inst, index); ip_set_dump_start() 1281 if (set == NULL) { ip_set_dump_start() 1293 !!(set->type->features & IPSET_DUMP_LAST))) ip_set_dump_start() 1295 pr_debug("List set: %s\n", set->name); ip_set_dump_start() 1297 /* Start listing: make sure set won't be destroyed */ ip_set_dump_start() 1298 pr_debug("reference set\n"); ip_set_dump_start() 1299 __ip_set_get(set); ip_set_dump_start() 1309 nla_put_string(skb, IPSET_ATTR_SETNAME, set->name)) ip_set_dump_start() 1317 set->type->name) || ip_set_dump_start() 1319 set->family) || ip_set_dump_start() 1321 set->revision)) ip_set_dump_start() 1323 ret = set->variant->head(set, skb); ip_set_dump_start() 1330 read_lock_bh(&set->lock); ip_set_dump_start() 1331 ret = set->variant->list(set, skb, cb); ip_set_dump_start() 1332 read_unlock_bh(&set->lock); ip_set_dump_start() 1356 /* If there was an error or set is done, release set */ ip_set_dump_start() 1358 pr_debug("release set %s\n", ip_set(inst, index)->name); ip_set_dump_start() 1401 call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, call_ad() argument 1410 write_lock_bh(&set->lock); call_ad() 1411 ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); call_ad() 1412 write_unlock_bh(&set->lock); call_ad() 1415 set->variant->resize && call_ad() 1416 (ret = set->variant->resize(set, retried)) == 0); call_ad() 1464 struct ip_set *set; ip_set_uadd() local 1482 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); ip_set_uadd() 1483 if (set == NULL) ip_set_uadd() 1490 set->type->adt_policy)) ip_set_uadd() 1492 ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags, ip_set_uadd() 1502 set->type->adt_policy)) nla_for_each_nested() 1504 ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, nla_for_each_nested() 1519 struct ip_set *set; ip_set_udel() local 1537 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); ip_set_udel() 1538 if (set == NULL) ip_set_udel() 1545 set->type->adt_policy)) ip_set_udel() 1547 ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags, ip_set_udel() 1557 set->type->adt_policy)) nla_for_each_nested() 1559 ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, nla_for_each_nested() 1574 struct ip_set *set; ip_set_utest() local 1584 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); ip_set_utest() 1585 if (set == NULL) ip_set_utest() 1589 set->type->adt_policy)) ip_set_utest() 1592 read_lock_bh(&set->lock); ip_set_utest() 1593 ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0); ip_set_utest() 1594 read_unlock_bh(&set->lock); ip_set_utest() 1602 /* Get headed data of a set */ 1610 const struct ip_set *set; ip_set_header() local 1619 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME])); ip_set_header() 1620 if (set == NULL) ip_set_header() 1632 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) || ip_set_header() 1633 nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) || ip_set_header() 1634 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) || ip_set_header() 1635 nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision)) ip_set_header() 1900 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; ip_set_sockfn_get() 1902 find_set_and_id(inst, req_get->set.name, &id); ip_set_sockfn_get() 1903 req_get->set.index = id; ip_set_sockfn_get() 1915 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0'; ip_set_sockfn_get() 1917 find_set_and_id(inst, req_get->set.name, &id); ip_set_sockfn_get() 1918 req_get->set.index = id; ip_set_sockfn_get() 1926 struct ip_set *set; ip_set_sockfn_get() local 1929 req_get->set.index >= inst->ip_set_max) { ip_set_sockfn_get() 1934 set = ip_set(inst, req_get->set.index); ip_set_sockfn_get() 1935 strncpy(req_get->set.name, set ? set->name : "", ip_set_sockfn_get() 1986 struct ip_set *set = NULL; ip_set_net_exit() local 1992 set = ip_set(inst, i); ip_set_net_exit() 1993 if (set != NULL) ip_set_net_exit() 778 find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index, struct ip_set **set) find_free_id() argument
|
H A D | ip_set_bitmap_ip.c | 10 /* Kernel module implementing an IP set type: the bitmap:ip type */ 42 void *members; /* the set members */ 46 u32 elements; /* number of max elements in the set */ 110 bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb, bitmap_ip_kadt() argument 114 struct bitmap_ip *map = set->data; bitmap_ip_kadt() 115 ipset_adtfn adtfn = set->variant->adt[adt]; bitmap_ip_kadt() 117 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); bitmap_ip_kadt() 126 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); bitmap_ip_kadt() 130 bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], bitmap_ip_uadt() argument 133 struct bitmap_ip *map = set->data; bitmap_ip_uadt() 134 ipset_adtfn adtfn = set->variant->adt[adt]; bitmap_ip_uadt() 137 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); bitmap_ip_uadt() 153 ip_set_get_extensions(set, tb, &ext); bitmap_ip_uadt() 162 return adtfn(set, &e, &ext, &ext, flags); bitmap_ip_uadt() 188 ret = adtfn(set, &e, &ext, &ext, flags); bitmap_ip_uadt() 221 init_map_ip(struct ip_set *set, struct bitmap_ip *map, init_map_ip() argument 228 if (set->dsize) { init_map_ip() 229 map->extensions = ip_set_alloc(set->dsize * elements); init_map_ip() 240 set->timeout = IPSET_NO_TIMEOUT; init_map_ip() 242 set->data = map; init_map_ip() 243 set->family = NFPROTO_IPV4; init_map_ip() 249 bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], bitmap_ip_create() argument 324 set->variant = &bitmap_ip; bitmap_ip_create() 325 set->dsize = ip_set_elem_len(set, tb, 0); bitmap_ip_create() 326 if (!init_map_ip(set, map, first_ip, last_ip, bitmap_ip_create() 332 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); bitmap_ip_create() 333 bitmap_ip_gc_init(set, bitmap_ip_gc); bitmap_ip_create()
|
H A D | ip_set_bitmap_port.c | 8 /* Kernel module implementing an IP set type: the bitmap:port type */ 37 void *members; /* the set members */ 41 u32 elements; /* number of max elements in the set */ 102 bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb, bitmap_port_kadt() argument 106 struct bitmap_port *map = set->data; bitmap_port_kadt() 107 ipset_adtfn adtfn = set->variant->adt[adt]; bitmap_port_kadt() 109 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); bitmap_port_kadt() 124 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); bitmap_port_kadt() 128 bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], bitmap_port_uadt() argument 131 struct bitmap_port *map = set->data; bitmap_port_uadt() 132 ipset_adtfn adtfn = set->variant->adt[adt]; bitmap_port_uadt() 134 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); bitmap_port_uadt() 155 ret = ip_set_get_extensions(set, tb, &ext); bitmap_port_uadt() 161 return adtfn(set, &e, &ext, &ext, flags); bitmap_port_uadt() 179 ret = adtfn(set, &e, &ext, &ext, flags); bitmap_port_uadt() 211 init_map_port(struct ip_set *set, struct bitmap_port *map, init_map_port() argument 217 if (set->dsize) { init_map_port() 218 map->extensions = ip_set_alloc(set->dsize * map->elements); init_map_port() 226 set->timeout = IPSET_NO_TIMEOUT; init_map_port() 228 set->data = map; init_map_port() 229 set->family = NFPROTO_UNSPEC; init_map_port() 235 bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], bitmap_port_create() argument 262 set->variant = &bitmap_port; bitmap_port_create() 263 set->dsize = ip_set_elem_len(set, tb, 0); bitmap_port_create() 264 if (!init_map_port(set, map, first_port, last_port)) { bitmap_port_create() 269 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); bitmap_port_create() 270 bitmap_port_gc_init(set, bitmap_port_gc); bitmap_port_create()
|
H A D | ip_set_hash_ipmark.c | 9 /* Kernel module implementing an IP set type: the hash:ip,mark type */ 86 hash_ipmark4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipmark4_kadt() argument 90 const struct hash_ipmark *h = set->data; hash_ipmark4_kadt() 91 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipmark4_kadt() 93 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_ipmark4_kadt() 99 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_ipmark4_kadt() 103 hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipmark4_uadt() argument 106 const struct hash_ipmark *h = set->data; hash_ipmark4_uadt() 107 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipmark4_uadt() 109 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_ipmark4_uadt() 127 ip_set_get_extensions(set, tb, &ext); hash_ipmark4_uadt() 136 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipmark4_uadt() 159 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipmark4_uadt() 220 hash_ipmark6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipmark6_kadt() argument 224 const struct hash_ipmark *h = set->data; hash_ipmark6_kadt() 225 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipmark6_kadt() 227 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_ipmark6_kadt() 233 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_ipmark6_kadt() 237 hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipmark6_uadt() argument 240 const struct hash_ipmark *h = set->data; hash_ipmark6_uadt() 241 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipmark6_uadt() 243 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_ipmark6_uadt() 262 ip_set_get_extensions(set, tb, &ext); hash_ipmark6_uadt() 270 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipmark6_uadt() 274 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipmark6_uadt()
|
H A D | ip_set_hash_ip.c | 8 /* Kernel module implementing an IP set type: the hash:ip type */ 82 hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ip4_kadt() argument 86 const struct hash_ip *h = set->data; hash_ip4_kadt() 87 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ip4_kadt() 89 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_ip4_kadt() 98 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_ip4_kadt() 102 hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ip4_uadt() argument 105 const struct hash_ip *h = set->data; hash_ip4_uadt() 106 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ip4_uadt() 108 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_ip4_uadt() 125 ip_set_get_extensions(set, tb, &ext); hash_ip4_uadt() 135 return adtfn(set, &e, &ext, &ext, flags); hash_ip4_uadt() 161 ret = adtfn(set, &e, &ext, &ext, flags); hash_ip4_uadt() 223 hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ip6_kadt() argument 227 const struct hash_ip *h = set->data; hash_ip6_kadt() 228 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ip6_kadt() 230 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_ip6_kadt() 237 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_ip6_kadt() 241 hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ip6_uadt() argument 244 const struct hash_ip *h = set->data; hash_ip6_uadt() 245 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ip6_uadt() 247 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_ip6_uadt() 265 ip_set_get_extensions(set, tb, &ext); hash_ip6_uadt() 273 ret = adtfn(set, &e, &ext, &ext, flags); hash_ip6_uadt()
|
H A D | ip_set_bitmap_ipmac.c | 11 /* Kernel module implementing an IP set type: the bitmap:ip,mac type */ 42 MAC_UNSET, /* element is set, without MAC */ 43 MAC_FILLED, /* element is set with MAC */ 48 void *members; /* the set members */ 52 u32 elements; /* number of max elements in the set */ 119 const struct ip_set_ext *ext, struct ip_set *set, bitmap_ipmac_add_timeout() 125 if (t == set->timeout) bitmap_ipmac_add_timeout() 201 bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, bitmap_ipmac_kadt() argument 205 struct bitmap_ipmac *map = set->data; bitmap_ipmac_kadt() 206 ipset_adtfn adtfn = set->variant->adt[adt]; bitmap_ipmac_kadt() 208 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); bitmap_ipmac_kadt() 227 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); bitmap_ipmac_kadt() 231 bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], bitmap_ipmac_uadt() argument 234 const struct bitmap_ipmac *map = set->data; bitmap_ipmac_uadt() 235 ipset_adtfn adtfn = set->variant->adt[adt]; bitmap_ipmac_uadt() 237 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); bitmap_ipmac_uadt() 254 ip_set_get_extensions(set, tb, &ext); bitmap_ipmac_uadt() 267 ret = adtfn(set, &e, &ext, &ext, flags); bitmap_ipmac_uadt() 291 init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map, init_map_ipmac() argument 297 if (set->dsize) { init_map_ipmac() 298 map->extensions = ip_set_alloc(set->dsize * elements); init_map_ipmac() 307 set->timeout = IPSET_NO_TIMEOUT; init_map_ipmac() 309 set->data = map; init_map_ipmac() 310 set->family = NFPROTO_IPV4; init_map_ipmac() 316 bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], bitmap_ipmac_create() argument 362 set->variant = &bitmap_ipmac; bitmap_ipmac_create() 363 set->dsize = ip_set_elem_len(set, tb, bitmap_ipmac_create() 365 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { bitmap_ipmac_create() 370 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); bitmap_ipmac_create() 371 bitmap_ipmac_gc_init(set, bitmap_ipmac_gc); bitmap_ipmac_create() 117 bitmap_ipmac_add_timeout(unsigned long *timeout, const struct bitmap_ipmac_adt_elem *e, const struct ip_set_ext *ext, struct ip_set *set, struct bitmap_ipmac *map, int mode) bitmap_ipmac_add_timeout() argument
|
H A D | ip_set_hash_net.c | 8 /* Kernel module implementing an IP set type: the hash:net type */ 117 hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_net4_kadt() argument 121 const struct hash_net *h = set->data; hash_net4_kadt() 122 ipset_adtfn adtfn = set->variant->adt[adt]; hash_net4_kadt() 126 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_net4_kadt() 136 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_net4_kadt() 140 hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], hash_net4_uadt() argument 143 const struct hash_net *h = set->data; hash_net4_uadt() 144 ipset_adtfn adtfn = set->variant->adt[adt]; hash_net4_uadt() 146 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_net4_uadt() 164 ip_set_get_extensions(set, tb, &ext); hash_net4_uadt() 182 ret = adtfn(set, &e, &ext, &ext, flags); hash_net4_uadt() 183 return ip_set_enomatch(ret, flags, adt, set) ? -ret: hash_net4_uadt() 202 ret = adtfn(set, &e, &ext, &ext, flags); hash_net4_uadt() 290 hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_net6_kadt() argument 294 const struct hash_net *h = set->data; hash_net6_kadt() 295 ipset_adtfn adtfn = set->variant->adt[adt]; hash_net6_kadt() 299 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_net6_kadt() 309 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_net6_kadt() 313 hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], hash_net6_uadt() argument 316 ipset_adtfn adtfn = set->variant->adt[adt]; hash_net6_uadt() 318 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_net6_uadt() 337 ip_set_get_extensions(set, tb, &ext); hash_net6_uadt() 355 ret = adtfn(set, &e, &ext, &ext, flags); hash_net6_uadt() 357 return ip_set_enomatch(ret, flags, adt, set) ? -ret : hash_net6_uadt()
|
H A D | ip_set_hash_netnet.c | 9 /* Kernel module implementing an IP set type: the hash:net type */ 136 hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_netnet4_kadt() argument 140 const struct hash_netnet *h = set->data; hash_netnet4_kadt() 141 ipset_adtfn adtfn = set->variant->adt[adt]; hash_netnet4_kadt() 143 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_netnet4_kadt() 155 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_netnet4_kadt() 159 hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], hash_netnet4_uadt() argument 162 const struct hash_netnet *h = set->data; hash_netnet4_uadt() 163 ipset_adtfn adtfn = set->variant->adt[adt]; hash_netnet4_uadt() 165 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_netnet4_uadt() 187 ip_set_get_extensions(set, tb, &ext); hash_netnet4_uadt() 215 ret = adtfn(set, &e, &ext, &ext, flags); hash_netnet4_uadt() 216 return ip_set_enomatch(ret, flags, adt, set) ? -ret : hash_netnet4_uadt() 258 ret = adtfn(set, &e, &ext, &ext, flags); hash_netnet4_uadt() 367 hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_netnet6_kadt() argument 371 const struct hash_netnet *h = set->data; hash_netnet6_kadt() 372 ipset_adtfn adtfn = set->variant->adt[adt]; hash_netnet6_kadt() 374 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_netnet6_kadt() 386 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_netnet6_kadt() 390 hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[], hash_netnet6_uadt() argument 393 ipset_adtfn adtfn = set->variant->adt[adt]; hash_netnet6_uadt() 395 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_netnet6_uadt() 416 ip_set_get_extensions(set, tb, &ext); hash_netnet6_uadt() 439 ret = adtfn(set, &e, &ext, &ext, flags); hash_netnet6_uadt() 441 return ip_set_enomatch(ret, flags, adt, set) ? -ret : hash_netnet6_uadt()
|
H A D | ip_set_hash_ipport.c | 8 /* Kernel module implementing an IP set type: the hash:ip,port type */ 93 hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipport4_kadt() argument 97 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipport4_kadt() 99 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_ipport4_kadt() 106 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_ipport4_kadt() 110 hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipport4_uadt() argument 113 const struct hash_ipport *h = set->data; hash_ipport4_uadt() 114 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipport4_uadt() 116 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_ipport4_uadt() 136 ip_set_get_extensions(set, tb, &ext); hash_ipport4_uadt() 160 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipport4_uadt() 194 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipport4_uadt() 260 hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipport6_kadt() argument 264 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipport6_kadt() 266 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_ipport6_kadt() 273 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_ipport6_kadt() 277 hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipport6_uadt() argument 280 const struct hash_ipport *h = set->data; hash_ipport6_uadt() 281 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipport6_uadt() 283 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_ipport6_uadt() 305 ip_set_get_extensions(set, tb, &ext); hash_ipport6_uadt() 327 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipport6_uadt() 340 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipport6_uadt()
|
H A D | ip_set_hash_ipportip.c | 8 /* Kernel module implementing an IP set type: the hash:ip,port,ip type */ 94 hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipportip4_kadt() argument 98 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipportip4_kadt() 100 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_ipportip4_kadt() 108 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_ipportip4_kadt() 112 hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportip4_uadt() argument 115 const struct hash_ipportip *h = set->data; hash_ipportip4_uadt() 116 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipportip4_uadt() 118 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_ipportip4_uadt() 138 ip_set_get_extensions(set, tb, &ext); hash_ipportip4_uadt() 166 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipportip4_uadt() 200 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipportip4_uadt() 267 hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, hash_ipportip6_kadt() argument 271 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipportip6_kadt() 273 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_ipportip6_kadt() 281 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_ipportip6_kadt() 285 hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportip6_uadt() argument 288 const struct hash_ipportip *h = set->data; hash_ipportip6_uadt() 289 ipset_adtfn adtfn = set->variant->adt[adt]; hash_ipportip6_uadt() 291 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_ipportip6_uadt() 313 ip_set_get_extensions(set, tb, &ext); hash_ipportip6_uadt() 339 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipportip6_uadt() 352 ret = adtfn(set, &e, &ext, &ext, flags); hash_ipportip6_uadt()
|
H A D | ip_set_hash_mac.c | 8 /* Kernel module implementing an IP set type: the hash:mac type */ 75 hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb, hash_mac4_kadt() argument 79 ipset_adtfn adtfn = set->variant->adt[adt]; hash_mac4_kadt() 81 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); hash_mac4_kadt() 94 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); hash_mac4_kadt() 98 hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[], hash_mac4_uadt() argument 101 ipset_adtfn adtfn = set->variant->adt[adt]; hash_mac4_uadt() 103 struct ip_set_ext ext = IP_SET_INIT_UEXT(set); hash_mac4_uadt() 118 ret = ip_set_get_extensions(set, tb, &ext); hash_mac4_uadt() 125 return adtfn(set, &e, &ext, &ext, flags); hash_mac4_uadt()
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | signal.h | 43 #define sigaddset(set,sig) \ 45 ? __const_sigaddset((set), (sig)) \ 46 : __gen_sigaddset((set), (sig))) 48 static inline void __gen_sigaddset(sigset_t *set, int _sig) __gen_sigaddset() argument 50 asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); __gen_sigaddset() 53 static inline void __const_sigaddset(sigset_t *set, int _sig) __const_sigaddset() argument 56 set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); __const_sigaddset() 59 #define sigdelset(set, sig) \ 61 ? __const_sigdelset((set), (sig)) \ 62 : __gen_sigdelset((set), (sig))) 65 static inline void __gen_sigdelset(sigset_t *set, int _sig) __gen_sigdelset() argument 67 asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); __gen_sigdelset() 70 static inline void __const_sigdelset(sigset_t *set, int _sig) __const_sigdelset() argument 73 set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); __const_sigdelset() 76 static inline int __const_sigismember(sigset_t *set, int _sig) __const_sigismember() argument 79 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); __const_sigismember() 82 static inline int __gen_sigismember(sigset_t *set, int _sig) __gen_sigismember() argument 86 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); __gen_sigismember() 90 #define sigismember(set, sig) \ 92 ? __const_sigismember((set), (sig)) \ 93 : __gen_sigismember((set), (sig)))
|
/linux-4.1.27/include/uapi/linux/netfilter/ipset/ |
H A D | ip_set_list.h | 8 /* list:set type is not permitted to add */ 10 /* Missing reference set */ 12 /* Reference set does not exist */ 16 /* Reference set is not added to the set */
|
H A D | ip_set_bitmap.h | 6 /* The element is out of the range of the set */ 8 /* The range exceeds the size limit of the set type */
|
H A D | ip_set.h | 21 /* The max length of strings including NUL: set and type identifiers */ 28 IPSET_CMD_CREATE, /* 2: Create a new (empty) set */ 29 IPSET_CMD_DESTROY, /* 3: Destroy a (empty) set */ 30 IPSET_CMD_FLUSH, /* 4: Remove all elements from a set */ 31 IPSET_CMD_RENAME, /* 5: Rename a set */ 35 IPSET_CMD_ADD, /* 9: Add an element to a set */ 36 IPSET_CMD_DEL, /* 10: Delete an element from a set */ 37 IPSET_CMD_TEST, /* 11: Test an element in a set */ 38 IPSET_CMD_HEADER, /* 12: Get set header data only */ 39 IPSET_CMD_TYPE, /* 13: Get set type */ 57 IPSET_ATTR_SETNAME, /* 2: Name of the set */ 238 /* Backward compatibility: set match revision 2 */ 259 /* Backward compatibility for set match v3 */ 279 #define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */ 283 union ip_set_name_index set; member in struct:ip_set_req_get_set 286 #define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */ 289 #define IP_SET_OP_GET_FNAME 0x00000008 /* Get set index and family */ 294 union ip_set_name_index set; member in struct:ip_set_req_get_set_family
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | signal.h | 26 static inline void sigaddset(sigset_t *set, int _sig) sigaddset() argument 29 : "+o" (*set) sigaddset() 34 static inline void sigdelset(sigset_t *set, int _sig) sigdelset() argument 37 : "+o" (*set) sigdelset() 42 static inline int __const_sigismember(sigset_t *set, int _sig) __const_sigismember() argument 45 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); __const_sigismember() 48 static inline int __gen_sigismember(sigset_t *set, int _sig) __gen_sigismember() argument 53 : "o" (*set), "id" ((_sig-1) ^ 31) __gen_sigismember() 58 #define sigismember(set,sig) \ 60 __const_sigismember(set,sig) : \ 61 __gen_sigismember(set,sig))
|
H A D | cacheflush_mm.h | 63 unsigned long set; flush_cf_icache() local 65 for (set = start; set <= end; set += (0x10 - 3)) { flush_cf_icache() 74 : "=a" (set) flush_cf_icache() 75 : "a" (set)); flush_cf_icache() 81 unsigned long set; flush_cf_dcache() local 83 for (set = start; set <= end; set += (0x10 - 3)) { flush_cf_dcache() 92 : "=a" (set) flush_cf_dcache() 93 : "a" (set)); flush_cf_dcache() 99 unsigned long set; flush_cf_bcache() local 101 for (set = start; set <= end; set += (0x10 - 3)) { flush_cf_bcache() 110 : "=a" (set) flush_cf_bcache() 111 : "a" (set)); flush_cf_bcache()
|
/linux-4.1.27/include/linux/platform_data/ |
H A D | ad7791.h | 6 * @buffered: If set to true configure the device for buffered input mode. 7 * @burnout_current: If set to true the 100mA burnout current is enabled. 8 * @unipolar: If set to true sample in unipolar mode, if set to false sample in
|
H A D | adau1977.h | 15 * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V 16 * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V 17 * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V 18 * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V 19 * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V 20 * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V 21 * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V 22 * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V 23 * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V
|
H A D | max6697.h | 21 bool smbus_timeout_disable; /* set to disable SMBus timeouts */ 22 bool extended_range_enable; /* set to enable extended temp range */ 23 bool beta_compensation; /* set to enable beta compensation */ 24 u8 alert_mask; /* set bit to 1 to disable alert */ 25 u8 over_temperature_mask; /* set bit to 1 to disable */ 26 u8 resistance_cancellation; /* set bit to 0 to disable 30 u8 ideality_mask; /* set bit to 0 to disable */
|
H A D | ad7887.h | 13 * @en_dual: Whether to use dual channel mode. If set to true AIN1 becomes the 14 * second input channel, and Vref is internally connected to Vdd. If set to 17 * @use_onchip_ref: Whether to use the onchip reference. If set to true the 18 * internal 2.5V reference is used. If set to false a external reference is
|
H A D | ad7266.h | 15 * (RANGE pin set to low) 17 * (RANGE pin set to high) 27 * (SGL/DIFF pin set to low, AD0 pin set to low) 29 * (SGL/DIFF pin set to low, AD0 pin set to high) 31 * (SGL/DIFF pin set to high) 45 * fixed_addr is set to false.
|
H A D | mmc-s3cmci.h | 18 * that a card is inserted. If @detect_invert is set, then the value from 22 * protected if @no_wprotect is not set. A 0 returned from gpio_get_value() 26 * Card power is set by @ocr_availa, using MCC_VDD_ constants if it is set 33 unsigned int detect_invert:1; /* set => detect active high */ 44 * s3c24xx_mci_set_platdata - set platform data for mmc/sdi device
|
H A D | ad7303.h | 14 * @use_external_ref: If set to true use an external voltage reference connected
|
/linux-4.1.27/arch/sh/boards/mach-se/7724/ |
H A D | irq.c | 47 struct fpga_irq set; get_fpga_irq() local 51 set.sraddr = IRQ0_SR; get_fpga_irq() 52 set.mraddr = IRQ0_MR; get_fpga_irq() 53 set.mask = IRQ0_MASK; get_fpga_irq() 54 set.base = IRQ0_BASE; get_fpga_irq() 57 set.sraddr = IRQ1_SR; get_fpga_irq() 58 set.mraddr = IRQ1_MR; get_fpga_irq() 59 set.mask = IRQ1_MASK; get_fpga_irq() 60 set.base = IRQ1_BASE; get_fpga_irq() 63 set.sraddr = IRQ2_SR; get_fpga_irq() 64 set.mraddr = IRQ2_MR; get_fpga_irq() 65 set.mask = IRQ2_MASK; get_fpga_irq() 66 set.base = IRQ2_BASE; get_fpga_irq() 70 return set; get_fpga_irq() 76 struct fpga_irq set = get_fpga_irq(fpga2irq(irq)); disable_se7724_irq() local 77 unsigned int bit = irq - set.base; disable_se7724_irq() 78 __raw_writew(__raw_readw(set.mraddr) | 0x0001 << bit, set.mraddr); disable_se7724_irq() 84 struct fpga_irq set = get_fpga_irq(fpga2irq(irq)); enable_se7724_irq() local 85 unsigned int bit = irq - set.base; enable_se7724_irq() 86 __raw_writew(__raw_readw(set.mraddr) & ~(0x0001 << bit), set.mraddr); enable_se7724_irq() 97 struct fpga_irq set = get_fpga_irq(irq); se7724_irq_demux() local 98 unsigned short intv = __raw_readw(set.sraddr); se7724_irq_demux() 99 unsigned int ext_irq = set.base; se7724_irq_demux() 101 intv &= set.mask; se7724_irq_demux() 124 __raw_writew(0x002a, IRQ_MODE); /* set irq type */ init_se7724_IRQ()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | futex.h | 25 " .set push \n" \ 26 " .set noat \n" \ 27 " .set arch=r4000 \n" \ 29 " .set mips0 \n" \ 31 " .set arch=r4000 \n" \ 37 " .set pop \n" \ 38 " .set mips0 \n" \ 54 " .set push \n" \ 55 " .set noat \n" \ 56 " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 58 " .set mips0 \n" \ 60 " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 66 " .set pop \n" \ 67 " .set mips0 \n" \ 155 " .set push \n" futex_atomic_cmpxchg_inatomic() 156 " .set noat \n" futex_atomic_cmpxchg_inatomic() 157 " .set arch=r4000 \n" futex_atomic_cmpxchg_inatomic() 160 " .set mips0 \n" futex_atomic_cmpxchg_inatomic() 162 " .set arch=r4000 \n" futex_atomic_cmpxchg_inatomic() 168 " .set pop \n" futex_atomic_cmpxchg_inatomic() 184 " .set push \n" futex_atomic_cmpxchg_inatomic() 185 " .set noat \n" futex_atomic_cmpxchg_inatomic() 186 " .set "MIPS_ISA_ARCH_LEVEL" \n" futex_atomic_cmpxchg_inatomic() 189 " .set mips0 \n" futex_atomic_cmpxchg_inatomic() 191 " .set "MIPS_ISA_ARCH_LEVEL" \n" futex_atomic_cmpxchg_inatomic() 197 " .set pop \n" futex_atomic_cmpxchg_inatomic()
|
H A D | asmmacro.h | 60 .set push 80 .set pop 84 .set push 85 .set mips64r2 103 .set pop 118 .set push 141 .set push 142 .set mips64r2 160 .set pop 215 .set push 216 .set mips32r2 217 .set msa 219 .set pop 223 .set push 224 .set mips32r2 225 .set msa 227 .set pop 231 .set push 232 .set mips32r2 233 .set msa 235 .set pop 239 .set push 240 .set mips32r2 241 .set msa 243 .set pop 247 .set push 248 .set mips32r2 249 .set msa 251 .set pop 255 .set push 256 .set mips64r2 257 .set msa 259 .set pop 263 .set push 264 .set mips32r2 265 .set msa 267 .set pop 271 .set push 272 .set mips64r2 273 .set msa 275 .set pop 303 .set push 304 .set noat 309 .set pop 313 .set push 314 .set noat 318 .set pop 322 .set push 323 .set noat 327 .set pop 331 .set push 332 .set noat 336 .set pop 340 .set push 341 .set noat 345 .set pop 349 .set push 350 .set noat 354 .set pop 358 .set push 359 .set noat 362 .set pop 366 .set push 367 .set noat 370 .set pop 407 .set push 408 .set noat 412 .set pop 416 .set push 417 .set noat 421 .set pop 466 .set push 467 .set noat 502 .set pop
|
H A D | irqflags.h | 26 " .set push \n" arch_local_irq_disable() 27 " .set noat \n" arch_local_irq_disable() 30 " .set pop \n" arch_local_irq_disable() 41 " .set push \n" arch_local_irq_save() 42 " .set reorder \n" arch_local_irq_save() 43 " .set noat \n" arch_local_irq_save() 47 " .set pop \n" arch_local_irq_save() 60 " .set push \n" arch_local_irq_restore() 61 " .set noreorder \n" arch_local_irq_restore() 62 " .set noat \n" arch_local_irq_restore() 81 " .set pop \n" arch_local_irq_restore() 90 " .set push \n" __arch_local_irq_restore() 91 " .set noreorder \n" __arch_local_irq_restore() 92 " .set noat \n" __arch_local_irq_restore() 111 " .set pop \n" __arch_local_irq_restore() 127 " .set push \n" arch_local_irq_enable() 128 " .set reorder \n" arch_local_irq_enable() 129 " .set noat \n" arch_local_irq_enable() 139 " .set pop \n" arch_local_irq_enable() 150 " .set push \n" arch_local_save_flags() 151 " .set reorder \n" arch_local_save_flags() 153 " .set pop \n" arch_local_save_flags()
|
H A D | string.h | 28 ".set\tnoreorder\n\t" strcpy() 29 ".set\tnoat\n" strcpy() 35 ".set\tat\n\t" strcpy() 36 ".set\treorder" strcpy() 53 ".set\tnoreorder\n\t" strncpy() 54 ".set\tnoat\n" strncpy() 63 ".set\tat\n\t" strncpy() 64 ".set\treorder" strncpy() 78 ".set\tnoreorder\n\t" strcmp() 79 ".set\tnoat\n\t" strcmp() 92 "3:\t.set\tat\n\t" strcmp() 93 ".set\treorder" strcmp() 109 ".set\tnoreorder\n\t" strncmp() 110 ".set\tnoat\n" strncmp() 125 ".set\tat\n\t" strncmp() 126 ".set\treorder" strncmp()
|
H A D | cmpxchg.h | 26 " .set arch=r4000 \n" __xchg_u32() 28 " .set mips0 \n" __xchg_u32() 30 " .set arch=r4000 \n" __xchg_u32() 33 " .set mips0 \n" __xchg_u32() 42 " .set "MIPS_ISA_ARCH_LEVEL" \n" __xchg_u32() 44 " .set mips0 \n" __xchg_u32() 46 " .set "MIPS_ISA_ARCH_LEVEL" \n" __xchg_u32() 48 " .set mips0 \n" __xchg_u32() 79 " .set arch=r4000 \n" __xchg_u64() 84 " .set mips0 \n" __xchg_u64() 93 " .set "MIPS_ISA_ARCH_LEVEL" \n" __xchg_u64() 97 " .set mips0 \n" __xchg_u64() 149 " .set push \n" \ 150 " .set noat \n" \ 151 " .set arch=r4000 \n" \ 154 " .set mips0 \n" \ 156 " .set arch=r4000 \n" \ 160 " .set pop \n" \ 166 " .set push \n" \ 167 " .set noat \n" \ 168 " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 171 " .set mips0 \n" \ 173 " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 176 " .set pop \n" \
|
H A D | mipsmtregs.h | 197 " .set push \n" dvpe() 198 " .set noreorder \n" dvpe() 199 " .set noat \n" dvpe() 200 " .set mips32r2 \n" dvpe() 204 " .set pop \n" dvpe() 215 " .set push \n" __raw_evpe() 216 " .set noreorder \n" __raw_evpe() 217 " .set noat \n" __raw_evpe() 218 " .set mips32r2 \n" __raw_evpe() 221 " .set pop \n"); __raw_evpe() 240 " .set push \n" dmt() 241 " .set mips32r2 \n" dmt() 242 " .set noat \n" dmt() 246 " .set pop \n" dmt() 257 " .set noreorder \n" __raw_emt() 258 " .set mips32r2 \n" __raw_emt() 261 " .set mips0 \n" __raw_emt() 262 " .set reorder"); __raw_emt() 279 " .set mips32r2 \n" ehb() 281 " .set mips0 \n"); ehb() 289 " .set push \n" \ 290 " .set mips32r2 \n" \ 291 " .set noat \n" \ 295 " .set pop \n" \ 306 " .set push \n" \ 307 " .set noat \n" \ 308 " .set mips32r2 \n" \ 312 " .set pop \n" \ 332 " .set push \n" \ 333 " .set mips32r2 \n" \ 334 " .set noat \n" \ 338 " .set pop \n" \ 345 " .set push \n" \ 346 " .set mips32r2 \n" \ 347 " .set noat \n" \ 351 " .set pop \n" \ 372 /* you *must* set the target tc (settc) before trying to use these */
|
H A D | barrier.h | 19 ".set push\n\t" \ 20 ".set noreorder\n\t" \ 21 ".set mips2\n\t" \ 23 ".set pop" \ 33 ".set push\n\t" \ 34 ".set noreorder\n\t" \ 37 ".set pop" \ 42 # define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n" 56 ".set push\n\t" \ 57 ".set noreorder\n\t" \ 61 ".set pop" \ 123 #define nudge_writes() __asm__ __volatile__(".set push\n\t" \ 124 ".set arch=octeon\n\t" \ 126 ".set pop" : : : "memory")
|
H A D | mipsregs.h | 846 ".set push\n\t" tlbinvf() 847 ".set noreorder\n\t" tlbinvf() 849 ".set pop"); tlbinvf() 911 ".set\tmips32\n\t" \ 913 ".set\tmips0\n\t" \ 924 ".set\tmips3\n\t" \ 926 ".set\tmips0" \ 930 ".set\tmips64\n\t" \ 932 ".set\tmips0" \ 945 ".set\tmips32\n\t" \ 947 ".set\tmips0" \ 957 ".set\tmips3\n\t" \ 959 ".set\tmips0" \ 963 ".set\tmips64\n\t" \ 965 ".set\tmips0" \ 983 * On RM7000/RM9000 these are uses to access cop0 set 1 registers 1012 ".set\tmips64\n\t" \ 1017 ".set\tmips0" \ 1021 ".set\tmips64\n\t" \ 1026 ".set\tmips0" \ 1040 ".set\tmips64\n\t" \ 1046 ".set\tmips0" \ 1050 ".set\tmips64\n\t" \ 1056 ".set\tmips0" \ 1066 " .set push \n" \ 1067 " .set noat \n" \ 1068 " .set mips32r2 \n" \ 1073 " .set pop \n" \ 1082 " .set push \n" \ 1083 " .set noat \n" \ 1084 " .set mips32r2 \n" \ 1089 " .set pop \n" \ 1442 " .set push \n" \ 1443 " .set reorder \n" \ 1446 " .set mips1 \n" \ 1449 " .set pop \n" \ 1457 " .set push \n" \ 1458 " .set reorder \n" \ 1461 " .set pop \n" \ 1467 _read_32bit_cp1_register(source, .set hardfloat) 1469 _write_32bit_cp1_register(dest, val, .set hardfloat) 1483 " .set push \n" \ 1484 " .set dsp \n" \ 1486 " .set pop \n" \ 1495 " .set push \n" \ 1496 " .set dsp \n" \ 1498 " .set pop \n" \ 1507 " .set push \n" \ 1508 " .set dsp \n" \ 1510 " .set pop \n" \ 1519 " .set push \n" \ 1520 " .set dsp \n" \ 1522 " .set pop \n" \ 1531 " .set push \n" \ 1532 " .set dsp \n" \ 1534 " .set pop \n" \ 1543 " .set push \n" \ 1544 " .set dsp \n" \ 1546 " .set pop \n" \ 1555 " .set push \n" \ 1556 " .set dsp \n" \ 1558 " .set pop \n" \ 1567 " .set push \n" \ 1568 " .set dsp \n" \ 1570 " .set pop \n" \ 1579 " .set push \n" \ 1580 " .set dsp \n" \ 1582 " .set pop \n" \ 1591 " .set push \n" \ 1592 " .set dsp \n" \ 1594 " .set pop \n" \ 1603 " .set push \n" \ 1604 " .set dsp \n" \ 1606 " .set pop \n" \ 1614 " .set push \n" \ 1615 " .set dsp \n" \ 1617 " .set pop \n" \ 1625 " .set push \n" \ 1626 " .set dsp \n" \ 1628 " .set pop \n" \ 1636 " .set push \n" \ 1637 " .set dsp \n" \ 1639 " .set pop \n" \ 1647 " .set push \n" \ 1648 " .set dsp \n" \ 1650 " .set pop \n" \ 1658 " .set push \n" \ 1659 " .set dsp \n" \ 1661 " .set pop \n" \ 1669 " .set push \n" \ 1670 " .set dsp \n" \ 1672 " .set pop \n" \ 1680 " .set push \n" \ 1681 " .set dsp \n" \ 1683 " .set pop \n" \ 1696 " .set push \n" \ 1697 " .set noat \n" \ 1702 " .set pop \n" \ 1711 " .set push \n" \ 1712 " .set noat \n" \ 1717 " .set pop \n" \ 1727 " .set push \n" \ 1728 " .set noat \n" \ 1732 " .set pop \n" \ 1741 " .set push \n" \ 1742 " .set noat \n" \ 1746 " .set pop \n" \ 1783 " .set push \n" \ 1784 " .set noat \n" \ 1788 " .set pop \n" \ 1797 " .set push \n" \ 1798 " .set noat \n" \ 1802 " .set pop \n" \ 1812 " .set push \n" \ 1813 " .set noat \n" \ 1816 " .set pop \n" \ 1825 " .set push \n" \ 1826 " .set noat \n" \ 1829 " .set pop \n" \ 1871 ".set noreorder\n\t" tlb_probe() 1873 ".set reorder"); tlb_probe() 1882 " .set push \n" tlb_read() 1883 " .set noreorder \n" tlb_read() 1884 " .set noat \n" tlb_read() 1885 " .set mips32r2 \n" tlb_read() 1889 " .set pop \n" tlb_read() 1896 ".set noreorder\n\t" tlb_read() 1898 ".set reorder"); tlb_read() 1903 " .set push \n" tlb_read() 1904 " .set noreorder \n" tlb_read() 1905 " .set noat \n" tlb_read() 1906 " .set mips32r2 \n" tlb_read() 1909 " .set pop \n"); tlb_read() 1916 ".set noreorder\n\t" tlb_write_indexed() 1918 ".set reorder"); tlb_write_indexed() 1924 ".set noreorder\n\t" tlb_write_random() 1926 ".set reorder"); tlb_write_random() 1934 set_c0_##name(unsigned int set) \ 1939 new = res | set; \
|
H A D | atomic.h | 36 * atomic_set - set atomic variable 51 " .set arch=r4000 \n" \ 56 " .set mips0 \n" \ 64 " .set "MIPS_ISA_LEVEL" \n" \ 68 " .set mips0 \n" \ 92 " .set arch=r4000 \n" \ 98 " .set mips0 \n" \ 107 " .set "MIPS_ISA_LEVEL" \n" \ 111 " .set mips0 \n" \ 162 " .set arch=r4000 \n" atomic_sub_if_positive() 167 " .set noreorder \n" atomic_sub_if_positive() 170 " .set reorder \n" atomic_sub_if_positive() 172 " .set mips0 \n" atomic_sub_if_positive() 181 " .set "MIPS_ISA_LEVEL" \n" atomic_sub_if_positive() 186 " .set noreorder \n" atomic_sub_if_positive() 189 " .set reorder \n" atomic_sub_if_positive() 191 " .set mips0 \n" atomic_sub_if_positive() 317 * atomic64_set - set atomic variable 330 " .set arch=r4000 \n" \ 335 " .set mips0 \n" \ 343 " .set "MIPS_ISA_LEVEL" \n" \ 347 " .set mips0 \n" \ 371 " .set arch=r4000 \n" \ 377 " .set mips0 \n" \ 386 " .set "MIPS_ISA_LEVEL" \n" \ 390 " .set mips0 \n" \ 443 " .set arch=r4000 \n" atomic64_sub_if_positive() 448 " .set noreorder \n" atomic64_sub_if_positive() 451 " .set reorder \n" atomic64_sub_if_positive() 453 " .set mips0 \n" atomic64_sub_if_positive() 462 " .set "MIPS_ISA_LEVEL" \n" atomic64_sub_if_positive() 467 " .set noreorder \n" atomic64_sub_if_positive() 470 " .set reorder \n" atomic64_sub_if_positive() 472 " .set mips0 \n" atomic64_sub_if_positive()
|
H A D | r4kcache.h | 34 * set by the R4000. To keep unpleasant surprises from happening we pick 43 " .set push \n" \ 44 " .set noreorder \n" \ 45 " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 47 " .set pop \n" \ 151 " .set push \n" \ 152 " .set noreorder \n" \ 153 " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 155 "2: .set pop \n" \ 164 " .set push \n" \ 165 " .set noreorder \n" \ 166 " .set mips0 \n" \ 167 " .set eva \n" \ 169 "2: .set pop \n" \ 227 " .set push \n" \ 228 " .set noreorder \n" \ 229 " .set mips3 \n" \ 246 " .set pop \n" \ 253 " .set push \n" \ 254 " .set noreorder \n" \ 255 " .set mips3 \n" \ 272 " .set pop \n" \ 279 " .set push \n" \ 280 " .set noreorder \n" \ 281 " .set mips3 \n" \ 298 " .set pop \n" \ 305 " .set push \n" \ 306 " .set noreorder \n" \ 307 " .set mips3 \n" \ 324 " .set pop \n" \ 337 " .set push\n" \ 338 " .set noreorder\n" \ 339 " .set mips64r6\n" \ 340 " .set noat\n" \ 358 " .set pop\n" \ 365 " .set push\n" \ 366 " .set noreorder\n" \ 367 " .set mips64r6\n" \ 368 " .set noat\n" \ 388 " .set pop\n" \ 395 " .set push\n" \ 396 " .set noreorder\n" \ 397 " .set mips64r6\n" \ 398 " .set noat\n" \ 422 " .set pop\n" \ 429 " .set push\n" \ 430 " .set noreorder\n" \ 431 " .set mips64r6\n" \ 432 " .set noat\n" \ 466 " .set pop\n" \ 478 " .set push \n" \ 479 " .set noreorder \n" \ 480 " .set mips0 \n" \ 481 " .set eva \n" \ 498 " .set pop \n" \ 505 " .set push \n" \ 506 " .set noreorder \n" \ 507 " .set mips0 \n" \ 508 " .set eva \n" \ 525 " .set pop \n" \ 532 " .set push \n" \ 533 " .set noreorder \n" \ 534 " .set mips0 \n" \ 535 " .set eva \n" \ 552 " .set pop \n" \
|
H A D | div64.h | 32 " .set push \n" \ 33 " .set noat \n" \ 34 " .set noreorder \n" \ 56 " .set pop" \
|
H A D | edac.h | 23 " .set mips2 \n" atomic_scrub() 28 " .set mips0 \n" atomic_scrub()
|
H A D | bitops.h | 59 * set_bit - Atomically set a bit in memory 60 * @nr: the bit to set 76 " .set arch=r4000 \n" set_bit() 81 " .set mips0 \n" set_bit() 98 " .set "MIPS_ISA_ARCH_LEVEL" \n" set_bit() 102 " .set mips0 \n" set_bit() 128 " .set arch=r4000 \n" clear_bit() 133 " .set mips0 \n" clear_bit() 150 " .set "MIPS_ISA_ARCH_LEVEL" \n" clear_bit() 154 " .set mips0 \n" clear_bit() 194 " .set arch=r4000 \n" change_bit() 199 " .set mips0 \n" change_bit() 208 " .set "MIPS_ISA_ARCH_LEVEL" \n" change_bit() 212 " .set mips0 \n" change_bit() 222 * @nr: Bit to set 241 " .set arch=r4000 \n" test_and_set_bit() 247 " .set mips0 \n" test_and_set_bit() 257 " .set "MIPS_ISA_ARCH_LEVEL" \n" test_and_set_bit() 261 " .set mips0 \n" test_and_set_bit() 278 * @nr: Bit to set 295 " .set arch=r4000 \n" test_and_set_bit_lock() 301 " .set mips0 \n" test_and_set_bit_lock() 311 " .set "MIPS_ISA_ARCH_LEVEL" \n" test_and_set_bit_lock() 315 " .set mips0 \n" test_and_set_bit_lock() 350 " .set arch=r4000 \n" test_and_clear_bit() 357 " .set mips0 \n" test_and_clear_bit() 383 " .set "MIPS_ISA_ARCH_LEVEL" \n" test_and_clear_bit() 388 " .set mips0 \n" test_and_clear_bit() 424 " .set arch=r4000 \n" test_and_change_bit() 430 " .set mips0 \n" test_and_change_bit() 440 " .set "MIPS_ISA_ARCH_LEVEL" \n" test_and_change_bit() 444 " .set mips0 \n" test_and_change_bit() 487 " .set push \n" __fls() 488 " .set "MIPS_ISA_LEVEL" \n" __fls() 490 " .set pop \n" __fls() 500 " .set push \n" __fls() 501 " .set "MIPS_ISA_LEVEL" \n" __fls() 503 " .set pop \n" __fls() 552 * fls - find last bit set. 565 " .set push \n" fls() 566 " .set "MIPS_ISA_LEVEL" \n" fls() 568 " .set pop \n" fls() 604 * ffs - find first bit set.
|
H A D | asm.h | 99 .set push; \ 100 .set reorder; \ 104 .set pop; \ 112 .set push; \ 113 .set reorder; \ 116 .set pop; \ 140 * Use with .set noreorder only! 148 .set push; \ 149 .set arch=r5000; \ 151 .set pop 154 .set push; \ 155 .set mips0; \ 156 .set eva; \ 158 .set pop 161 .set push; \ 162 .set arch=r5000; \ 164 .set pop 179 .set push; \ 180 .set reorder; \ 183 .set pop; \ 186 .set push; \ 187 .set reorder; \ 190 .set pop; \ 195 .set push; \ 196 .set noreorder; \ 199 .set pop; \ 202 .set push; \ 203 .set noreorder; \ 206 .set pop; \
|
H A D | spinlock.h | 65 " .set push # arch_spin_lock \n" arch_spin_lock() 66 " .set noreorder \n" arch_spin_lock() 91 " .set pop \n" arch_spin_lock() 99 " .set push # arch_spin_lock \n" arch_spin_lock() 100 " .set noreorder \n" arch_spin_lock() 124 " .set pop \n" arch_spin_lock() 150 " .set push # arch_spin_trylock \n" arch_spin_trylock() 151 " .set noreorder \n" arch_spin_trylock() 166 " .set pop \n" arch_spin_trylock() 174 " .set push # arch_spin_trylock \n" arch_spin_trylock() 175 " .set noreorder \n" arch_spin_trylock() 190 " .set pop \n" arch_spin_trylock() 230 " .set noreorder # arch_read_lock \n" arch_read_lock() 237 " .set reorder \n" arch_read_lock() 291 " .set noreorder # arch_write_lock \n" arch_write_lock() 298 " .set reorder \n" arch_write_lock() 337 " .set noreorder # arch_read_trylock \n" arch_read_trylock() 343 " .set reorder \n" arch_read_trylock() 354 " .set noreorder # arch_read_trylock \n" arch_read_trylock() 362 " .set reorder \n" arch_read_trylock() 381 " .set noreorder # arch_write_trylock \n" arch_write_trylock() 391 " .set reorder \n" arch_write_trylock()
|
H A D | stackframe.h | 29 .set push 30 .set noat 32 .set pop 147 .set push 148 .set noat 149 .set reorder 152 .set noreorder 180 .set reorder 187 .set at=k0 189 .set noat 222 .set mips64 225 .set pop 236 .set push 237 .set noat 239 .set pop 288 .set push 289 .set reorder 290 .set noat 311 .set pop 315 .set push 316 .set noreorder 321 .set pop 326 .set push 327 .set reorder 328 .set noat 355 .set pop 360 .set arch=r4000 362 .set mips0
|
H A D | msa.h | 90 " .set push\n" \ 91 " .set msa\n" \ 93 " .set pop\n" \ 101 " .set push\n" \ 102 " .set msa\n" \ 104 " .set pop\n" \ 128 " .set push\n" \ 129 " .set noat\n" \ 133 " .set pop\n" \ 141 " .set push\n" \ 142 " .set noat\n" \ 146 " .set pop\n" \
|
H A D | stacktrace.h | 33 ".set push\n\t" prepare_frametrace() 34 ".set noat\n\t" prepare_frametrace() 46 ".set pop\n\t" prepare_frametrace()
|
H A D | abi.h | 17 struct pt_regs *regs, sigset_t *set); 20 struct pt_regs *regs, sigset_t *set);
|
H A D | local.h | 37 " .set arch=r4000 \n" local_add_return() 43 " .set mips0 \n" local_add_return() 51 " .set "MIPS_ISA_ARCH_LEVEL" \n" local_add_return() 57 " .set mips0 \n" local_add_return() 82 " .set arch=r4000 \n" local_sub_return() 88 " .set mips0 \n" local_sub_return() 96 " .set "MIPS_ISA_ARCH_LEVEL" \n" local_sub_return() 102 " .set mips0 \n" local_sub_return()
|
/linux-4.1.27/net/netfilter/ |
H A D | nft_lookup.c | 22 struct nft_set *set; member in struct:nft_lookup 33 const struct nft_set *set = priv->set; nft_lookup_eval() local 36 if (set->ops->lookup(set, ®s->data[priv->sreg], &ext)) { nft_lookup_eval() 37 if (set->flags & NFT_SET_MAP) nft_lookup_eval() 39 nft_set_ext_data(ext), set->dlen); nft_lookup_eval() 57 struct nft_set *set; nft_lookup_init() local 64 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]); nft_lookup_init() 65 if (IS_ERR(set)) { nft_lookup_init() 67 set = nf_tables_set_lookup_byid(ctx->net, nft_lookup_init() 70 if (IS_ERR(set)) nft_lookup_init() 71 return PTR_ERR(set); nft_lookup_init() 74 if (set->flags & NFT_SET_EVAL) nft_lookup_init() 78 err = nft_validate_register_load(priv->sreg, set->klen); nft_lookup_init() 83 if (!(set->flags & NFT_SET_MAP)) nft_lookup_init() 88 set->dtype, set->dlen); nft_lookup_init() 91 } else if (set->flags & NFT_SET_MAP) nft_lookup_init() 94 priv->binding.flags = set->flags & NFT_SET_MAP; nft_lookup_init() 96 err = nf_tables_bind_set(ctx, set, &priv->binding); nft_lookup_init() 100 priv->set = set; nft_lookup_init() 109 nf_tables_unbind_set(ctx, priv->set, &priv->binding); nft_lookup_destroy() 116 if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name)) nft_lookup_dump() 120 if (priv->set->flags & NFT_SET_MAP) nft_lookup_dump()
|
H A D | nft_hash.c | 38 const struct nft_set *set; member in struct:nft_hash_cmp_arg 65 if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen)) nft_hash_cmp() 74 static bool nft_hash_lookup(const struct nft_set *set, const u32 *key, nft_hash_lookup() argument 77 struct nft_hash *priv = nft_set_priv(set); nft_hash_lookup() 80 .genmask = nft_genmask_cur(read_pnet(&set->pnet)), nft_hash_lookup() 81 .set = set, nft_hash_lookup() 92 static bool nft_hash_update(struct nft_set *set, const u32 *key, nft_hash_update() argument 100 struct nft_hash *priv = nft_set_priv(set); nft_hash_update() 104 .set = set, nft_hash_update() 112 he = new(set, expr, regs); nft_hash_update() 123 nft_set_elem_destroy(set, he); nft_hash_update() 128 static int nft_hash_insert(const struct nft_set *set, nft_hash_insert() argument 131 struct nft_hash *priv = nft_set_priv(set); nft_hash_insert() 134 .genmask = nft_genmask_next(read_pnet(&set->pnet)), nft_hash_insert() 135 .set = set, nft_hash_insert() 143 static void nft_hash_activate(const struct nft_set *set, nft_hash_activate() argument 148 nft_set_elem_change_active(set, &he->ext); nft_hash_activate() 152 static void *nft_hash_deactivate(const struct nft_set *set, nft_hash_deactivate() argument 155 struct nft_hash *priv = nft_set_priv(set); nft_hash_deactivate() 158 .genmask = nft_genmask_next(read_pnet(&set->pnet)), nft_hash_deactivate() 159 .set = set, nft_hash_deactivate() 167 nft_set_elem_change_active(set, &he->ext); nft_hash_deactivate() 176 static void nft_hash_remove(const struct nft_set *set, nft_hash_remove() argument 179 struct nft_hash *priv = nft_set_priv(set); nft_hash_remove() 185 static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, nft_hash_walk() argument 188 struct nft_hash *priv = nft_set_priv(set); nft_hash_walk() 192 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); nft_hash_walk() 226 iter->err = iter->fn(ctx, set, iter, &elem); nft_hash_walk() 241 struct nft_set *set; nft_hash_gc() local 249 set = nft_set_container_of(priv); nft_hash_gc() 271 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); nft_hash_gc() 275 atomic_dec(&set->nelems); nft_hash_gc() 285 nft_set_gc_interval(set)); nft_hash_gc() 301 static int nft_hash_init(const struct nft_set *set, nft_hash_init() argument 305 struct nft_hash *priv = nft_set_priv(set); nft_hash_init() 310 params.key_len = set->klen; nft_hash_init() 317 if (set->flags & NFT_SET_TIMEOUT) nft_hash_init() 319 nft_set_gc_interval(set)); nft_hash_init() 328 static void nft_hash_destroy(const struct nft_set *set) nft_hash_destroy() argument 330 struct nft_hash *priv = nft_set_priv(set); nft_hash_destroy() 334 (void *)set); nft_hash_destroy()
|
H A D | nft_dynset.c | 20 struct nft_set *set; member in struct:nft_dynset 30 static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr, nft_dynset_new() argument 38 if (set->size && !atomic_add_unless(&set->nelems, 1, set->size)) nft_dynset_new() 41 timeout = priv->timeout ? : set->timeout; nft_dynset_new() 42 elem = nft_set_elem_init(set, &priv->tmpl, nft_dynset_new() 47 if (set->size) nft_dynset_new() 48 atomic_dec(&set->nelems); nft_dynset_new() 52 ext = nft_set_elem_ext(set, elem); nft_dynset_new() 64 struct nft_set *set = priv->set; nft_dynset_eval() local 69 if (set->ops->update(set, ®s->data[priv->sreg_key], nft_dynset_new, nft_dynset_eval() 77 timeout = priv->timeout ? : set->timeout; nft_dynset_eval() 105 struct nft_set *set; nft_dynset_init() local 114 set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME]); nft_dynset_init() 115 if (IS_ERR(set)) { nft_dynset_init() 117 set = nf_tables_set_lookup_byid(ctx->net, nft_dynset_init() 119 if (IS_ERR(set)) nft_dynset_init() 120 return PTR_ERR(set); nft_dynset_init() 123 if (set->flags & NFT_SET_CONSTANT) nft_dynset_init() 131 if (!(set->flags & NFT_SET_TIMEOUT)) nft_dynset_init() 140 if (!(set->flags & NFT_SET_TIMEOUT)) nft_dynset_init() 146 err = nft_validate_register_load(priv->sreg_key, set->klen);; nft_dynset_init() 151 if (!(set->flags & NFT_SET_MAP)) nft_dynset_init() 153 if (set->dtype == NFT_DATA_VERDICT) nft_dynset_init() 157 err = nft_validate_register_load(priv->sreg_data, set->dlen); nft_dynset_init() 160 } else if (set->flags & NFT_SET_MAP) nft_dynset_init() 164 if (!(set->flags & NFT_SET_EVAL)) nft_dynset_init() 166 if (!(set->flags & NFT_SET_ANONYMOUS)) nft_dynset_init() 176 } else if (set->flags & NFT_SET_EVAL) nft_dynset_init() 180 nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_KEY, set->klen); nft_dynset_init() 181 if (set->flags & NFT_SET_MAP) nft_dynset_init() 182 nft_set_ext_add_length(&priv->tmpl, NFT_SET_EXT_DATA, set->dlen); nft_dynset_init() 186 if (set->flags & NFT_SET_TIMEOUT) { nft_dynset_init() 187 if (timeout || set->timeout) nft_dynset_init() 193 err = nf_tables_bind_set(ctx, set, &priv->binding); nft_dynset_init() 197 priv->set = set; nft_dynset_init() 211 nf_tables_unbind_set(ctx, priv->set, &priv->binding); nft_dynset_destroy() 222 if (priv->set->flags & NFT_SET_MAP && nft_dynset_dump() 227 if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name)) nft_dynset_dump()
|
H A D | nft_rbtree.c | 33 static bool nft_rbtree_lookup(const struct nft_set *set, const u32 *key, nft_rbtree_lookup() argument 36 const struct nft_rbtree *priv = nft_set_priv(set); nft_rbtree_lookup() 39 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); nft_rbtree_lookup() 47 d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen); nft_rbtree_lookup() 70 if (set->flags & NFT_SET_INTERVAL && interval != NULL) { nft_rbtree_lookup() 79 static int __nft_rbtree_insert(const struct nft_set *set, __nft_rbtree_insert() argument 82 struct nft_rbtree *priv = nft_set_priv(set); __nft_rbtree_insert() 85 u8 genmask = nft_genmask_next(read_pnet(&set->pnet)); __nft_rbtree_insert() 95 set->klen); __nft_rbtree_insert() 111 static int nft_rbtree_insert(const struct nft_set *set, nft_rbtree_insert() argument 118 err = __nft_rbtree_insert(set, rbe); nft_rbtree_insert() 124 static void nft_rbtree_remove(const struct nft_set *set, nft_rbtree_remove() argument 127 struct nft_rbtree *priv = nft_set_priv(set); nft_rbtree_remove() 135 static void nft_rbtree_activate(const struct nft_set *set, nft_rbtree_activate() argument 140 nft_set_elem_change_active(set, &rbe->ext); nft_rbtree_activate() 143 static void *nft_rbtree_deactivate(const struct nft_set *set, nft_rbtree_deactivate() argument 146 const struct nft_rbtree *priv = nft_set_priv(set); nft_rbtree_deactivate() 149 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); nft_rbtree_deactivate() 156 set->klen); nft_rbtree_deactivate() 166 nft_set_elem_change_active(set, &rbe->ext); nft_rbtree_deactivate() 174 const struct nft_set *set, nft_rbtree_walk() 177 const struct nft_rbtree *priv = nft_set_priv(set); nft_rbtree_walk() 181 u8 genmask = nft_genmask_cur(read_pnet(&set->pnet)); nft_rbtree_walk() 194 iter->err = iter->fn(ctx, set, iter, &elem); nft_rbtree_walk() 210 static int nft_rbtree_init(const struct nft_set *set, nft_rbtree_init() argument 214 struct nft_rbtree *priv = nft_set_priv(set); nft_rbtree_init() 220 static void nft_rbtree_destroy(const struct nft_set *set) nft_rbtree_destroy() argument 222 struct nft_rbtree *priv = nft_set_priv(set); nft_rbtree_destroy() 229 nft_set_elem_destroy(set, rbe); nft_rbtree_destroy() 173 nft_rbtree_walk(const struct nft_ctx *ctx, const struct nft_set *set, struct nft_set_iter *iter) nft_rbtree_walk() argument
|
/linux-4.1.27/arch/mips/include/uapi/asm/ |
H A D | swab.h | 22 " .set push \n" __arch_swab16() 23 " .set arch=mips32r2 \n" __arch_swab16() 25 " .set pop \n" __arch_swab16() 36 " .set push \n" __arch_swab32() 37 " .set arch=mips32r2 \n" __arch_swab32() 40 " .set pop \n" __arch_swab32() 56 " .set push \n" __arch_swab64() 57 " .set arch=mips64r2 \n" __arch_swab64() 60 " .set pop \n" __arch_swab64()
|
H A D | sysmips.h | 19 #define SETNAME 1 /* set hostname */ 23 #define MIPS_ATOMIC_SET 2001 /* atomically set variable */
|
H A D | ioctls.h | 28 #define TIOCEXCL 0x740d /* set exclusive use of tty */ 35 #define TIOCMSET 0x741a /* set all modem bits */ 36 #define TIOCPKT 0x5470 /* pty: set/clear packet mode */ 45 #define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */ 58 #define TIOCSLTC 0x7475 /* set special local chars */ 59 #define TIOCSPGRP _IOW('t', 118, int) /* set pgrp of tty */ 70 /* #define TIOCSETA _IOW('t', 20, struct termios) set termios struct */ 71 /* #define TIOCSETAW _IOW('t', 21, struct termios) drain output, set */ 72 /* #define TIOCSETAF _IOW('t', 22, struct termios) drn out, fls in, set */ 74 /* #define TIOCSETD _IOW('t', 27, int) set line discipline */
|
/linux-4.1.27/drivers/staging/lustre/lustre/lov/ |
H A D | lov_request.c | 45 static void lov_init_set(struct lov_request_set *set) lov_init_set() argument 47 set->set_count = 0; lov_init_set() 48 atomic_set(&set->set_completes, 0); lov_init_set() 49 atomic_set(&set->set_success, 0); lov_init_set() 50 atomic_set(&set->set_finish_checked, 0); lov_init_set() 51 set->set_cookies = NULL; lov_init_set() 52 INIT_LIST_HEAD(&set->set_list); lov_init_set() 53 atomic_set(&set->set_refcount, 1); lov_init_set() 54 init_waitqueue_head(&set->set_waitq); lov_init_set() 55 spin_lock_init(&set->set_lock); lov_init_set() 58 void lov_finish_set(struct lov_request_set *set) lov_finish_set() argument 62 LASSERT(set); lov_finish_set() 63 list_for_each_safe(pos, n, &set->set_list) { lov_finish_set() 79 if (set->set_pga) { lov_finish_set() 80 int len = set->set_oabufs * sizeof(*set->set_pga); lov_finish_set() 81 OBD_FREE_LARGE(set->set_pga, len); lov_finish_set() 83 if (set->set_lockh) lov_finish_set() 84 lov_llh_put(set->set_lockh); lov_finish_set() 86 OBD_FREE(set, sizeof(*set)); lov_finish_set() 89 int lov_set_finished(struct lov_request_set *set, int idempotent) lov_set_finished() argument 91 int completes = atomic_read(&set->set_completes); lov_set_finished() 93 CDEBUG(D_INFO, "check set %d/%d\n", completes, set->set_count); lov_set_finished() 95 if (completes == set->set_count) { lov_set_finished() 98 if (atomic_inc_return(&set->set_finish_checked) == 1) lov_set_finished() 104 void lov_update_set(struct lov_request_set *set, lov_update_set() argument 110 atomic_inc(&set->set_completes); lov_update_set() 112 atomic_inc(&set->set_success); lov_update_set() 114 wake_up(&set->set_waitq); lov_update_set() 117 int lov_update_common_set(struct lov_request_set *set, lov_update_common_set() argument 120 struct lov_obd *lov = &set->set_exp->exp_obd->u.lov; lov_update_common_set() 122 lov_update_set(set, req, rc); lov_update_common_set() 133 void lov_set_add_req(struct lov_request *req, struct lov_request_set *set) lov_set_add_req() argument 135 list_add_tail(&req->rq_link, &set->set_list); lov_set_add_req() 136 set->set_count++; lov_set_add_req() 137 req->rq_rqset = set; lov_set_add_req() 202 static int common_attr_done(struct lov_request_set *set) common_attr_done() argument 209 LASSERT(set->set_oi != NULL); common_attr_done() 211 if (set->set_oi->oi_oa == NULL) common_attr_done() 214 if (!atomic_read(&set->set_success)) common_attr_done() 223 list_for_each(pos, &set->set_list) { common_attr_done() 232 set->set_oi->oi_md, req->rq_stripe, &attrset); common_attr_done() 238 if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) && common_attr_done() 239 (set->set_oi->oi_md->lsm_stripe_count != attrset)) { common_attr_done() 247 tmp_oa->o_oi = set->set_oi->oi_oa->o_oi; common_attr_done() 248 memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa)); common_attr_done() 256 int lov_fini_getattr_set(struct lov_request_set *set) lov_fini_getattr_set() argument 260 if (set == NULL) lov_fini_getattr_set() 262 LASSERT(set->set_exp); lov_fini_getattr_set() 263 if (atomic_read(&set->set_completes)) lov_fini_getattr_set() 264 rc = common_attr_done(set); lov_fini_getattr_set() 266 lov_put_reqset(set); lov_fini_getattr_set() 285 struct lov_request_set *set; lov_prep_getattr_set() local 289 OBD_ALLOC(set, sizeof(*set)); lov_prep_getattr_set() 290 if (set == NULL) lov_prep_getattr_set() 292 lov_init_set(set); lov_prep_getattr_set() 294 set->set_exp = exp; lov_prep_getattr_set() 295 set->set_oi = oinfo; lov_prep_getattr_set() 336 lov_set_add_req(req, set); lov_prep_getattr_set() 338 if (!set->set_count) { lov_prep_getattr_set() 342 *reqset = set; lov_prep_getattr_set() 345 lov_fini_getattr_set(set); lov_prep_getattr_set() 349 int lov_fini_destroy_set(struct lov_request_set *set) lov_fini_destroy_set() argument 351 if (set == NULL) lov_fini_destroy_set() 353 LASSERT(set->set_exp); lov_fini_destroy_set() 354 if (atomic_read(&set->set_completes)) { lov_fini_destroy_set() 358 lov_put_reqset(set); lov_fini_destroy_set() 368 struct lov_request_set *set; lov_prep_destroy_set() local 372 OBD_ALLOC(set, sizeof(*set)); lov_prep_destroy_set() 373 if (set == NULL) lov_prep_destroy_set() 375 lov_init_set(set); lov_prep_destroy_set() 377 set->set_exp = exp; lov_prep_destroy_set() 378 set->set_oi = oinfo; lov_prep_destroy_set() 379 set->set_oi->oi_md = lsm; lov_prep_destroy_set() 380 set->set_oi->oi_oa = src_oa; lov_prep_destroy_set() 381 set->set_oti = oti; lov_prep_destroy_set() 383 set->set_cookies = oti->oti_logcookies; lov_prep_destroy_set() 415 lov_set_add_req(req, set); lov_prep_destroy_set() 417 if (!set->set_count) { lov_prep_destroy_set() 421 *reqset = set; lov_prep_destroy_set() 424 lov_fini_destroy_set(set); lov_prep_destroy_set() 428 int lov_fini_setattr_set(struct lov_request_set *set) lov_fini_setattr_set() argument 432 if (set == NULL) lov_fini_setattr_set() 434 LASSERT(set->set_exp); lov_fini_setattr_set() 435 if (atomic_read(&set->set_completes)) { lov_fini_setattr_set() 436 rc = common_attr_done(set); lov_fini_setattr_set() 440 lov_put_reqset(set); lov_fini_setattr_set() 444 int lov_update_setattr_set(struct lov_request_set *set, lov_update_setattr_set() argument 450 lov_update_set(set, req, rc); lov_update_setattr_set() 487 struct lov_request_set *set; lov_prep_setattr_set() local 491 OBD_ALLOC(set, sizeof(*set)); lov_prep_setattr_set() 492 if (set == NULL) lov_prep_setattr_set() 494 lov_init_set(set); lov_prep_setattr_set() 496 set->set_exp = exp; lov_prep_setattr_set() 497 set->set_oti = oti; lov_prep_setattr_set() 498 set->set_oi = oinfo; lov_prep_setattr_set() 500 set->set_cookies = oti->oti_logcookies; lov_prep_setattr_set() 547 lov_set_add_req(req, set); lov_prep_setattr_set() 549 if (!set->set_count) { lov_prep_setattr_set() 553 *reqset = set; lov_prep_setattr_set() 556 lov_fini_setattr_set(set); lov_prep_setattr_set() 590 int lov_fini_statfs_set(struct lov_request_set *set) lov_fini_statfs_set() argument 594 if (set == NULL) lov_fini_statfs_set() 597 if (atomic_read(&set->set_completes)) { lov_fini_statfs_set() 598 rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs, lov_fini_statfs_set() 599 atomic_read(&set->set_success)); lov_fini_statfs_set() 601 lov_put_reqset(set); lov_fini_statfs_set() 666 struct lov_request_set *set; cb_statfs_update() local 674 set = lovreq->rq_rqset; cb_statfs_update() 675 lovobd = set->set_obd; cb_statfs_update() 677 osfs = set->set_oi->oi_osfs; cb_statfs_update() 679 success = atomic_read(&set->set_success); cb_statfs_update() 682 lov_update_set(set, lovreq, rc); cb_statfs_update() 703 if (set->set_oi->oi_flags & OBD_STATFS_PTLRPCD && cb_statfs_update() 704 lov_set_finished(set, 0)) { cb_statfs_update() 705 lov_statfs_interpret(NULL, set, set->set_count != cb_statfs_update() 706 atomic_read(&set->set_success)); cb_statfs_update() 715 struct lov_request_set *set; lov_prep_statfs_set() local 719 OBD_ALLOC(set, sizeof(*set)); lov_prep_statfs_set() 720 if (set == NULL) lov_prep_statfs_set() 722 lov_init_set(set); lov_prep_statfs_set() 724 set->set_obd = obd; lov_prep_statfs_set() 725 set->set_oi = oinfo; lov_prep_statfs_set() 762 lov_set_add_req(req, set); lov_prep_statfs_set() 764 if (!set->set_count) { lov_prep_statfs_set() 768 *reqset = set; lov_prep_statfs_set() 771 lov_fini_statfs_set(set); lov_prep_statfs_set()
|
H A D | lov_internal.h | 124 void lov_finish_set(struct lov_request_set *set); 126 static inline void lov_get_reqset(struct lov_request_set *set) lov_get_reqset() argument 128 LASSERT(set != NULL); lov_get_reqset() 129 LASSERT(atomic_read(&set->set_refcount) > 0); lov_get_reqset() 130 atomic_inc(&set->set_refcount); lov_get_reqset() 133 static inline void lov_put_reqset(struct lov_request_set *set) lov_put_reqset() argument 135 if (atomic_dec_and_test(&set->set_refcount)) lov_put_reqset() 136 lov_finish_set(set); lov_put_reqset() 170 struct lov_stripe_md *lsm, int stripeno, int *set); 193 void qos_shrink_lsm(struct lov_request_set *set); 194 int qos_prep_create(struct obd_export *exp, struct lov_request_set *set); 198 int qos_remedy_create(struct lov_request_set *set, struct lov_request *req); 201 void lov_set_add_req(struct lov_request *req, struct lov_request_set *set); 202 int lov_set_finished(struct lov_request_set *set, int idempotent); 203 void lov_update_set(struct lov_request_set *set, 205 int lov_update_common_set(struct lov_request_set *set, 210 int lov_fini_getattr_set(struct lov_request_set *set); 215 int lov_fini_destroy_set(struct lov_request_set *set); 219 int lov_update_setattr_set(struct lov_request_set *set, 221 int lov_fini_setattr_set(struct lov_request_set *set); 228 int lov_fini_statfs_set(struct lov_request_set *set);
|
/linux-4.1.27/arch/mips/lib/ |
H A D | mips-atomic.c | 43 " .set push \n" arch_local_irq_disable() 44 " .set noat \n" arch_local_irq_disable() 48 " .set noreorder \n" arch_local_irq_disable() 51 " .set pop \n" arch_local_irq_disable() 68 " .set push \n" arch_local_irq_save() 69 " .set reorder \n" arch_local_irq_save() 70 " .set noat \n" arch_local_irq_save() 74 " .set noreorder \n" arch_local_irq_save() 77 " .set pop \n" arch_local_irq_save() 95 " .set push \n" arch_local_irq_restore() 96 " .set noreorder \n" arch_local_irq_restore() 97 " .set noat \n" arch_local_irq_restore() 105 " .set pop \n" arch_local_irq_restore() 122 " .set push \n" __arch_local_irq_restore() 123 " .set noreorder \n" __arch_local_irq_restore() 124 " .set noat \n" __arch_local_irq_restore() 132 " .set pop \n" __arch_local_irq_restore()
|
H A D | strnlen_user.S | 39 .set noat 48 .set noreorder 55 .set at 57 .set reorder 71 .set __strnlen_user_asm, __strnlen_kernel_asm 72 .set __strnlen_user_nocheck_asm, __strnlen_kernel_nocheck_asm 79 .set push 80 .set eva 82 .set pop
|
H A D | strlen_user.S | 50 .set __strlen_user_asm, __strlen_kernel_asm 57 .set push 58 .set eva 60 .set pop
|
H A D | strncpy_user.S | 73 .set __strncpy_from_user_asm, __strncpy_from_kernel_asm 74 .set __strncpy_from_user_nocheck_asm, __strncpy_from_kernel_nocheck_asm 80 .set push 81 .set eva 83 .set pop
|
/linux-4.1.27/include/linux/ |
H A D | uts.h | 12 #define UTS_NODENAME CONFIG_DEFAULT_HOSTNAME /* set by sethostname() */ 16 #define UTS_DOMAINNAME "(none)" /* set by setdomainname() */
|
H A D | signal.h | 55 static inline void sigaddset(sigset_t *set, int _sig) sigaddset() argument 59 set->sig[0] |= 1UL << sig; sigaddset() 61 set->sig[sig / _NSIG_BPW] |= 1UL << (sig % _NSIG_BPW); sigaddset() 64 static inline void sigdelset(sigset_t *set, int _sig) sigdelset() argument 68 set->sig[0] &= ~(1UL << sig); sigdelset() 70 set->sig[sig / _NSIG_BPW] &= ~(1UL << (sig % _NSIG_BPW)); sigdelset() 73 static inline int sigismember(sigset_t *set, int _sig) sigismember() argument 77 return 1 & (set->sig[0] >> sig); sigismember() 79 return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); sigismember() 84 static inline int sigisemptyset(sigset_t *set) sigisemptyset() argument 88 return (set->sig[3] | set->sig[2] | sigisemptyset() 89 set->sig[1] | set->sig[0]) == 0; sigisemptyset() 91 return (set->sig[1] | set->sig[0]) == 0; sigisemptyset() 93 return set->sig[0] == 0; sigisemptyset() 143 static inline void name(sigset_t *set) \ 146 case 4: set->sig[3] = op(set->sig[3]); \ 147 set->sig[2] = op(set->sig[2]); \ 148 case 2: set->sig[1] = op(set->sig[1]); \ 149 case 1: set->sig[0] = op(set->sig[0]); \ 162 static inline void sigemptyset(sigset_t *set) sigemptyset() argument 166 memset(set, 0, sizeof(sigset_t)); sigemptyset() 168 case 2: set->sig[1] = 0; sigemptyset() 169 case 1: set->sig[0] = 0; sigemptyset() 174 static inline void sigfillset(sigset_t *set) sigfillset() argument 178 memset(set, -1, sizeof(sigset_t)); sigfillset() 180 case 2: set->sig[1] = -1; sigfillset() 181 case 1: set->sig[0] = -1; sigfillset() 188 static inline void sigaddsetmask(sigset_t *set, unsigned long mask) sigaddsetmask() argument 190 set->sig[0] |= mask; sigaddsetmask() 193 static inline void sigdelsetmask(sigset_t *set, unsigned long mask) sigdelsetmask() argument 195 set->sig[0] &= ~mask; sigdelsetmask() 198 static inline int sigtestsetmask(sigset_t *set, unsigned long mask) sigtestsetmask() argument 200 return (set->sig[0] & mask) != 0; sigtestsetmask() 203 static inline void siginitset(sigset_t *set, unsigned long mask) siginitset() argument 205 set->sig[0] = mask; siginitset() 208 memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1)); siginitset() 210 case 2: set->sig[1] = 0; siginitset() 215 static inline void siginitsetinv(sigset_t *set, unsigned long mask) siginitsetinv() argument 217 set->sig[0] = ~mask; siginitsetinv() 220 memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1)); siginitsetinv() 222 case 2: set->sig[1] = -1; siginitsetinv() 328 * The possible effects an unblocked signal set to SIG_DFL can have are: 338 * Other signals when not blocked and set to SIG_DFL behaves as follows.
|
H A D | mISDNdsp.h | 27 int hfc_dtmf; /* set if HFCmulti card supports dtmf */ 28 int hfc_conf; /* set if HFCmulti card supports conferences */ 29 int hfc_loops; /* set if card supports tone loops */ 30 int hfc_echocanhw; /* set if card supports echocancelation*/
|
H A D | jz4740-adc.h | 10 * @mask: Mask for the config value to be set 11 * @val: Value to be set
|
H A D | kconfig.h | 26 * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 33 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 42 * built-in code when CONFIG_FOO is set to 'm'. 48 * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
|
H A D | led-class-flash.h | 38 /* set flash brightness */ 44 /* set flash strobe state */ 48 /* set flash timeout */ 114 * @fled_cdev: the flash LED to set strobe on 146 * led_set_flash_brightness - set flash LED brightness 147 * @fled_cdev: the flash LED to set 148 * @brightness: the brightness to set it to 169 * led_set_flash_timeout - set flash LED timeout 170 * @fled_cdev: the flash LED to set 171 * @timeout: the flash timeout to set it to
|
/linux-4.1.27/arch/m32r/include/uapi/asm/ |
H A D | setup.h | 5 * This is set up by the setup-routine at boot-time
|
/linux-4.1.27/arch/x86/crypto/ |
H A D | sha1_avx2_x86_64_asm.S | 59 * SHA-1 implementation with Intel(R) AVX2 instruction set extensions. 100 .set A, REG_A 101 .set B, REG_B 102 .set C, REG_C 103 .set D, REG_D 104 .set E, REG_E 105 .set TB, REG_TB 106 .set TA, REG_TA 108 .set RA, REG_RA 109 .set RB, REG_RB 110 .set RC, REG_RC 111 .set RD, REG_RD 112 .set RE, REG_RE 114 .set RTA, REG_RTA 115 .set RTB, REG_RTB 117 .set T1, REG_T1 161 .set WY_00, WY0 162 .set WY_04, WY4 163 .set WY_08, WY08 164 .set WY_12, WY12 165 .set WY_16, WY16 166 .set WY_20, WY20 167 .set WY_24, WY24 168 .set WY_28, WY28 169 .set WY_32, WY_00 174 .set WY_32, WY_28 175 .set WY_28, WY_24 176 .set WY_24, WY_20 177 .set WY_20, WY_16 178 .set WY_16, WY_12 179 .set WY_12, WY_08 180 .set WY_08, WY_04 181 .set WY_04, WY_00 182 .set WY_00, WY_32 185 .set WY, WY_00 186 .set WY_minus_04, WY_04 187 .set WY_minus_08, WY_08 188 .set WY_minus_12, WY_12 189 .set WY_minus_16, WY_16 190 .set WY_minus_20, WY_20 191 .set WY_minus_24, WY_24 192 .set WY_minus_28, WY_28 193 .set WY_minus_32, WY 302 .set i, \r 305 .set K_XMM, 32*0 307 .set K_XMM, 32*1 309 .set K_XMM, 32*2 311 .set K_XMM, 32*3 324 .set T_REG, E 325 .set E, D 326 .set D, C 327 .set C, B 328 .set B, TB 329 .set TB, A 330 .set A, T_REG 332 .set T_REG, RE 333 .set RE, RD 334 .set RD, RC 335 .set RC, RB 336 .set RB, RTB 337 .set RTB, RA 338 .set RA, T_REG 354 .set round_id, (\r % 80) 357 .set ROUND_FUNC, RND_F1 370 .set ROUND_FUNC, RND_F2 372 .set ROUND_FUNC, RND_F3 374 .set ROUND_FUNC, RND_F2 377 .set round_id, ( (\r+1) % 80) 467 .set i, 0 470 .set i, i + 1 480 * it is set below by: cmovae BUFFER_PTR, K_BASE 493 .set j, 0 496 .set j, j+2 512 .set j, j+2 526 .set j, j+2 547 .set j, 0 550 .set j, j+2 562 .set j, j+2 575 .set j, j+2 593 .set j, j+2
|
H A D | sha1_ssse3_asm.S | 3 * SSE3 instruction set extensions introduced in Intel Core Microarchitecture 124 .set i, 0 127 .set i, (i+1) 196 .set A, REG_A 197 .set B, REG_B 198 .set C, REG_C 199 .set D, REG_D 200 .set E, REG_E 201 .set T1, REG_T1 202 .set T2, REG_T2 214 .set _T, \a 215 .set \a, \b 216 .set \b, _T 290 .set i, \r 293 .set K_XMM, 0 295 .set K_XMM, 16 297 .set K_XMM, 32 299 .set K_XMM, 48 303 .set i, ((\r) % 80) # pre-compute for the next iteration 316 .set W, W0 317 .set W_minus_04, W4 318 .set W_minus_08, W8 319 .set W_minus_12, W12 320 .set W_minus_16, W16 321 .set W_minus_20, W20 322 .set W_minus_24, W24 323 .set W_minus_28, W28 324 .set W_minus_32, W 328 .set W_minus_32, W_minus_28 329 .set W_minus_28, W_minus_24 330 .set W_minus_24, W_minus_20 331 .set W_minus_20, W_minus_16 332 .set W_minus_16, W_minus_12 333 .set W_minus_12, W_minus_08 334 .set W_minus_08, W_minus_04 335 .set W_minus_04, W 336 .set W, W_minus_32
|
H A D | aes_ctrby8_avx-x86_64.S | 159 .set by, \b 160 .set load_keys, \k 161 .set klen, \key_len 169 .set i, 1 180 .set i, (i +1) 193 .set i, 1 197 .set i, (i +1) 202 .set i, 0 206 .set i, (i +1) 217 .set i, 0 221 .set i, (i +1) 234 .set i, 0 243 .set i, (i +1) 248 .set i, 0 257 .set i, (i +1) 268 .set i, 0 272 .set i, (i +1) 277 .set i, 0 286 .set i, (i +1) 297 .set i, 0 301 .set i, (i +1) 312 .set i, 0 321 .set i, (i +1) 326 .set i, 0 335 .set i, (i +1) 342 .set i, 0 351 .set i, (i +1) 359 .set i, 0 363 .set i, (i +1) 370 .set i, 0 379 .set i, (i +1) 385 .set i, 0 390 .set i, (i +1) 393 .set i, 0 398 .set i, (i +1) 403 .set i, 0 405 .set j, (i+1) 412 .set i, (i+2) 421 .set i, 0 425 .set i, (i+1)
|
/linux-4.1.27/tools/testing/selftests/timers/ |
H A D | Makefile | 8 TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ 13 leapcrash set-tai set-2038 32 ./set-tai 33 ./set-2038
|
/linux-4.1.27/arch/arm/plat-samsung/ |
H A D | platformdata.c | 42 struct s3c_sdhci_platdata *set) s3c_sdhci_set_platdata() 44 set->cd_type = pd->cd_type; s3c_sdhci_set_platdata() 45 set->ext_cd_init = pd->ext_cd_init; s3c_sdhci_set_platdata() 46 set->ext_cd_cleanup = pd->ext_cd_cleanup; s3c_sdhci_set_platdata() 47 set->ext_cd_gpio = pd->ext_cd_gpio; s3c_sdhci_set_platdata() 48 set->ext_cd_gpio_invert = pd->ext_cd_gpio_invert; s3c_sdhci_set_platdata() 51 set->max_width = pd->max_width; s3c_sdhci_set_platdata() 53 set->cfg_gpio = pd->cfg_gpio; s3c_sdhci_set_platdata() 55 set->host_caps |= pd->host_caps; s3c_sdhci_set_platdata() 57 set->host_caps2 |= pd->host_caps2; s3c_sdhci_set_platdata() 59 set->pm_caps |= pd->pm_caps; s3c_sdhci_set_platdata() 41 s3c_sdhci_set_platdata(struct s3c_sdhci_platdata *pd, struct s3c_sdhci_platdata *set) s3c_sdhci_set_platdata() argument
|
/linux-4.1.27/arch/mips/include/asm/netlogic/ |
H A D | mips-extns.h | 52 ".set\tmips64\n\t" \ 58 ".set\tmips0" \ 74 ".set push\n\t" ack_c0_eirr() 75 ".set mips64\n\t" ack_c0_eirr() 76 ".set noat\n\t" ack_c0_eirr() 80 ".set pop" ack_c0_eirr() 87 ".set push\n\t" set_c0_eimr() 88 ".set mips64\n\t" set_c0_eimr() 89 ".set noat\n\t" set_c0_eimr() 95 ".set pop" set_c0_eimr() 102 ".set push\n\t" clear_c0_eimr() 103 ".set mips64\n\t" clear_c0_eimr() 104 ".set noat\n\t" clear_c0_eimr() 111 ".set pop" clear_c0_eimr() 127 ".set push\n\t" read_c0_eirr_and_eimr() 128 ".set mips64\n\t" read_c0_eirr_and_eimr() 129 ".set noat\n\t" read_c0_eirr_and_eimr() 136 ".set pop" read_c0_eirr_and_eimr() 182 ".set\tmips64\n\t" \ 187 ".set\tmips0\n\t" \ 191 ".set\tmips64\n\t" \ 196 ".set\tmips0\n\t" \ 210 ".set\tmips64\n\t" \ 216 ".set\tmips0\n\t" \ 220 ".set\tmips64\n\t" \ 226 ".set\tmips0\n\t" \ 235 ".set\tmips32\n\t" \ 237 ".set\tmips0\n\t" \ 241 ".set\tmips32\n\t" \ 243 ".set\tmips0\n\t" \ 254 ".set\tmips64\n\t" \ 256 ".set\tmips0\n\t" \ 260 ".set\tmips64\n\t" \ 262 ".set\tmips0\n\t" \ 273 ".set\tmips64\n\t" \ 275 ".set\tmips0\n\t" \ 279 ".set\tmips64\n\t" \ 281 ".set\tmips0\n\t" \ 289 ".set\tmips32\n\t" \ 291 ".set\tmips0\n\t" \ 295 ".set\tmips32\n\t" \ 297 ".set\tmips0\n\t" \
|
/linux-4.1.27/arch/mips/include/asm/mach-loongson/ |
H A D | kernel-entry-init.h | 19 .set push 20 .set mips64 29 .set pop 38 .set push 39 .set mips64 48 .set pop
|
/linux-4.1.27/tools/include/asm-generic/bitops/ |
H A D | find.h | 6 * find_next_bit - find the next set bit in a memory region 11 * Returns the bit number for the next set bit 12 * If no bits are set, returns @size. 21 * find_first_bit - find the first set bit in a memory region 25 * Returns the bit number of the first set bit. 26 * If no bits are set, returns @size.
|
/linux-4.1.27/arch/alpha/lib/ |
H A D | strcat.S | 17 mov $16, $0 # set up return value 26 cmpbge $31, $1, $2 # bits set iff byte == 0 34 $found: negq $2, $3 # clear all but least set bit 37 and $2, 0xf0, $3 # binary search for that set bit
|
H A D | strchr.S | 11 .set noreorder 12 .set noat 28 cmpbge zero, t0, t2 # .. e1 : bits set iff byte == zero 32 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage 35 cmpbge zero, t1, t3 # e0 : bits set iff byte == c 36 or t2, t3, t0 # e1 : bits set iff char match or zero match 44 cmpbge zero, t0, t2 # e0 : bits set iff byte == 0 45 cmpbge zero, t1, t3 # .. e1 : bits set iff byte == c 49 $found: negq t0, t1 # e0 : clear all but least set bit 52 and t0, t3, t1 # e0 : bit set iff byte was the char 55 and t0, 0xf0, t2 # e0 : binary search for that set bit
|
H A D | strcpy.S | 18 mov $16, $0 # set up return value 19 mov $26, $23 # set up return address
|
H A D | ev67-strchr.S | 21 .set noreorder 22 .set noat 48 cmpbge zero, t0, t2 # E : bits set iff byte == zero 49 cmpbge zero, t4, t4 # E : bits set iff byte is garbage 54 cmpbge zero, t1, t3 # E : bits set iff byte == c 55 or t2, t3, t0 # E : bits set iff char match or zero match 71 cmpbge zero, t0, t2 # E : bits set iff byte == 0 73 cmpbge zero, t1, t3 # E : bits set iff byte == c 78 $found: negq t0, t1 # E : clear all but least set bit 80 and t0, t3, t1 # E : bit set iff byte was the char
|
H A D | strrchr.S | 11 .set noreorder 12 .set noat 32 cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero 36 cmpbge zero, t4, t4 # .. e1 : bits set iff byte is garbage 37 cmpbge zero, t2, t3 # e0 : bits set iff byte == c 49 cmpbge zero, t0, t1 # .. e1 : bits set iff byte == zero 50 cmpbge zero, t2, t3 # e0 : bits set iff byte == c 70 and t8, 0xf0, t2 # e0 : binary search for the high bit set
|
H A D | dbg_current.S | 11 .set noat
|
H A D | dbg_stackcheck.S | 11 .set noat
|
H A D | dbg_stackkill.S | 12 .set noat
|
H A D | strncat.S | 22 mov $16, $0 # set up return value 32 cmpbge $31, $1, $2 # bits set iff byte == 0 40 $found: negq $2, $3 # clear all but least set bit 43 and $2, 0xf0, $3 # binary search for that set bit
|
/linux-4.1.27/tools/perf/util/include/asm/ |
H A D | dwarf2.h | 5 /* dwarf2.h ... dummy header file for including arch/x86/lib/mem{cpy,set}_64.S */
|
/linux-4.1.27/include/uapi/linux/ |
H A D | sched.h | 8 #define CLONE_VM 0x00000100 /* set if VM shared between processes */ 9 #define CLONE_FS 0x00000200 /* set if fs info shared between processes */ 10 #define CLONE_FILES 0x00000400 /* set if open files shared between processes */ 11 #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ 12 #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ 13 #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 14 #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ 19 #define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ 22 #define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ 23 #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ 48 * For the sched_{set,get}attr() calls
|
H A D | oom.h | 5 * /proc/<pid>/oom_score_adj set to OOM_SCORE_ADJ_MIN disables oom killing for 12 * /proc/<pid>/oom_adj set to -17 protects from the oom killer for legacy
|
H A D | prctl.h | 11 /* Get/set current->mm->dumpable */ 15 /* Get/set unaligned access control bits (if meaningful) */ 21 /* Get/set whether or not to drop capabilities on setuid() away from 26 /* Get/set floating-point emulation control bits (if meaningful) */ 32 /* Get/set floating-point exception mode (if meaningful) */ 46 /* Get/set whether we use statistical process timing or accurate timestamp 58 /* Get/set process endian */ 65 /* Get/set process seccomp mode */ 69 /* Get/set the capability bounding set (as per security/commoncap.c) */ 73 /* Get/set the process' ability to use the timestamp counter instruction */ 79 /* Get/set securebits (as per security/commoncap.c) */ 84 * Get/set the timerslack as used by poll/select/nanosleep 161 * If no_new_privs is set, then operations that grant new privileges (i.e.
|
H A D | securebits.h | 12 /* When set UID 0 has no special privileges. When unset, we support 24 /* When set, setuid to/from uid 0 does not trigger capability-"fixup". 26 set*uid to gain/lose privilege, transitions to/from uid 0 cause 35 /* When set, a process can retain its capabilities even after 36 transitioning to a non-root user (the set-uid fixup suppressed by
|
H A D | sockios.h | 34 #define SIOCSIFLINK 0x8911 /* set iface channel */ 37 #define SIOCSIFFLAGS 0x8914 /* set flags */ 39 #define SIOCSIFADDR 0x8916 /* set PA address */ 41 #define SIOCSIFDSTADDR 0x8918 /* set remote PA address */ 43 #define SIOCSIFBRDADDR 0x891a /* set broadcast PA address */ 45 #define SIOCSIFNETMASK 0x891c /* set network PA mask */ 47 #define SIOCSIFMETRIC 0x891e /* set metric */ 49 #define SIOCSIFMEM 0x8920 /* set memory address (BSD) */ 51 #define SIOCSIFMTU 0x8922 /* set MTU size */ 52 #define SIOCSIFNAME 0x8923 /* set interface name */ 53 #define SIOCSIFHWADDR 0x8924 /* set hardware address */ 54 #define SIOCGIFENCAP 0x8925 /* get/set encapsulations */ 63 #define SIOCSIFPFLAGS 0x8934 /* set/get extended flags set */ 66 #define SIOCSIFHWBROADCAST 0x8937 /* set hardware broadcast addr */ 84 #define SIOCWANDEV 0x894A /* get/set netdev parameters */ 92 #define SIOCSARP 0x8955 /* set ARP table entry */ 97 #define SIOCSRARP 0x8962 /* set RARP table entry */ 116 #define SIOCBONDSETHWADDR 0x8992 /* set the hw addr of the bond */ 128 #define SIOCSHWTSTAMP 0x89b0 /* set and get config */
|
H A D | if_cablemodem.h | 18 #define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */ 20 #define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */
|
H A D | atm_eni.h | 21 /* set buffer multipliers */
|
H A D | nvram.h | 7 #define NVRAM_INIT _IO('p', 0x40) /* initialize NVRAM and set checksum */
|
H A D | ppp-ioctl.h | 86 #define PPPIOCSFLAGS _IOW('t', 89, int) /* set configuration flags */ 88 #define PPPIOCSASYNCMAP _IOW('t', 87, int) /* set async map */ 91 #define PPPIOCSRASYNCMAP _IOW('t', 84, int) /* set receive async map */ 93 #define PPPIOCSMRU _IOW('t', 82, int) /* set max receive unit */ 94 #define PPPIOCSMAXCID _IOW('t', 81, int) /* set VJ max slot ID */ 96 #define PPPIOCSXASYNCMAP _IOW('t', 79, ext_accm) /* set extended ACCM */ 100 #define PPPIOCSNPMODE _IOW('t', 75, struct npioctl) /* set NP mode */ 101 #define PPPIOCSPASS _IOW('t', 71, struct sock_fprog) /* set pass filter */ 102 #define PPPIOCSACTIVE _IOW('t', 70, struct sock_fprog) /* set active filt */ 109 #define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */
|
/linux-4.1.27/include/linux/netfilter/ipset/ |
H A D | ip_set.h | 62 /* Mark set with an extension which needs to call destroy */ 133 typedef int (*ipset_adtfn)(struct ip_set *set, void *value, 152 int (*kadt)(struct ip_set *set, const struct sk_buff *skb, 160 int (*uadt)(struct ip_set *set, struct nlattr *tb[], 166 /* When adding entries and set is full, try to resize the set */ 167 int (*resize)(struct ip_set *set, bool retried); 168 /* Destroy the set */ 169 void (*destroy)(struct ip_set *set); 171 void (*flush)(struct ip_set *set); 173 void (*expire)(struct ip_set *set); 174 /* List set header data */ 175 int (*head)(struct ip_set *set, struct sk_buff *skb); 177 int (*list)(const struct ip_set *set, struct sk_buff *skb, 180 /* Return true if "b" set is the same as "a" 181 * according to the create set parameters */ 185 /* The core set type structure */ 205 /* Create set */ 206 int (*create)(struct net *net, struct ip_set *set, 217 /* register and unregister set type */ 221 /* A generic IP set */ 223 /* The name of the set */ 225 /* Lock protecting the set data */ 227 /* References to the set */ 229 /* The core set type */ 233 /* The actual INET family of the set */ 252 ip_set_ext_destroy(struct ip_set *set, void *data) ip_set_ext_destroy() argument 254 /* Check that the extension is enabled for the set and ip_set_ext_destroy() 257 if (SET_WITH_COMMENT(set)) ip_set_ext_destroy() 259 ext_comment(data, set)); ip_set_ext_destroy() 263 ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) ip_set_put_flags() argument 267 if (SET_WITH_TIMEOUT(set)) ip_set_put_flags() 269 htonl(set->timeout)))) ip_set_put_flags() 271 if (SET_WITH_COUNTER(set)) ip_set_put_flags() 273 if (SET_WITH_COMMENT(set)) ip_set_put_flags() 275 if (SET_WITH_SKBINFO(set)) ip_set_put_flags() 277 if (SET_WITH_FORCEADD(set)) ip_set_put_flags() 391 /* register and unregister set references */ 393 const char *name, struct ip_set **set); 399 /* API for iptables set match, and SET target */ 416 extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], 418 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], 442 ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set) ip_set_enomatch() argument 445 (set->type->features & IPSET_TYPE_NOMATCH) && ip_set_enomatch() 537 ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, ip_set_put_extensions() argument 540 if (SET_WITH_TIMEOUT(set)) { ip_set_put_extensions() 541 unsigned long *timeout = ext_timeout(e, set); ip_set_put_extensions() 548 if (SET_WITH_COUNTER(set) && ip_set_put_extensions() 549 ip_set_put_counter(skb, ext_counter(e, set))) ip_set_put_extensions() 551 if (SET_WITH_COMMENT(set) && ip_set_put_extensions() 552 ip_set_put_comment(skb, ext_comment(e, set))) ip_set_put_extensions() 554 if (SET_WITH_SKBINFO(set) && ip_set_put_extensions() 555 ip_set_put_skbinfo(skb, ext_skbinfo(e, set))) ip_set_put_extensions() 560 #define IP_SET_INIT_KEXT(skb, opt, set) \ 562 .timeout = ip_set_adt_opt_timeout(opt, set) } 564 #define IP_SET_INIT_UEXT(set) \ 566 .timeout = (set)->timeout }
|
H A D | ip_set_timeout.h | 16 /* Timeout period depending on the timeout value of the given set */ 20 /* Entry is set with no timeout value */ 26 #define ip_set_adt_opt_timeout(opt, set) \ 27 ((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
|
/linux-4.1.27/arch/x86/um/ |
H A D | tls_64.c | 10 * If CLONE_SETTLS is set, we need to save the thread id arch_copy_tls() 11 * (which is argument 5, child_tid, of clone) so it can be set arch_copy_tls()
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | scratchpad.h | 6 #define SCRATCHPAD_MMU_MISS 0x00 /* Shared with OBP - set by OBP */ 7 #define SCRATCHPAD_CPUID 0x08 /* Shared with OBP - set by hypervisor */
|
H A D | traps.h | 20 /* We set this to _start in system setup. */
|
/linux-4.1.27/arch/mips/lasat/image/ |
H A D | head.S | 5 .set noreorder 6 .set mips3
|
/linux-4.1.27/include/asm-generic/bitops/ |
H A D | builtin-__fls.h | 5 * __fls - find last (most-significant) set bit in a long word 8 * Undefined if no set bit exists, so code should check against 0 first.
|
H A D | fls64.h | 7 * fls64 - find last set bit in a 64-bit word 11 * ffsll, but returns the position of the most significant set bit. 14 * set bit if value is nonzero. The last (most significant) bit is
|
H A D | builtin-fls.h | 5 * fls - find last (most-significant) bit set
|
H A D | find.h | 6 * find_next_bit - find the next set bit in a memory region 11 * Returns the bit number for the next set bit 12 * If no bits are set, returns @size. 35 * find_first_bit - find the first set bit in a memory region 39 * Returns the bit number of the first set bit. 40 * If no bits are set, returns @size.
|
H A D | lock.h | 6 * @nr: Bit to set 16 * @nr: the bit to set 29 * @nr: the bit to set
|
H A D | __fls.h | 7 * __fls - find last (most-significant) set bit in a long word 10 * Undefined if no set bit exists, so code should check against 0 first.
|
H A D | builtin-ffs.h | 5 * ffs - find first bit set
|
H A D | count_zeros.h | 23 * If the MSB of @x is set, the result is 0. 24 * If only the LSB of @x is set, then the result is BITS_PER_LONG-1. 43 * If the LSB of @x is set, the result is 0. 44 * If only the MSB of @x is set, then the result is BITS_PER_LONG-1.
|
H A D | ffs.h | 5 * ffs - find first bit set
|
H A D | fls.h | 5 * fls - find last (most-significant) bit set
|
/linux-4.1.27/arch/mips/mm/ |
H A D | sc-ip22.c | 34 ".set\tpush\t\t\t# indy_sc_wipe\n\t" indy_sc_wipe() 35 ".set\tnoreorder\n\t" indy_sc_wipe() 36 ".set\tmips3\n\t" indy_sc_wipe() 37 ".set\tnoat\n\t" indy_sc_wipe() 45 ".set\tat\n\t" indy_sc_wipe() 53 ".set\tpop" indy_sc_wipe() 95 ".set\tpush\n\t" indy_sc_enable() 96 ".set\tnoreorder\n\t" indy_sc_enable() 97 ".set\tmips3\n\t" indy_sc_enable() 113 ".set\tpop" indy_sc_enable() 125 ".set\tpush\n\t" indy_sc_disable() 126 ".set\tnoreorder\n\t" indy_sc_disable() 127 ".set\tmips3\n\t" indy_sc_disable() 143 ".set\tpop" indy_sc_disable()
|
/linux-4.1.27/arch/sh/boards/mach-x3proto/ |
H A D | ilsel.c | 43 * directly for hooking up an ILSEL set and getting back an IRQ which can 64 static void __ilsel_enable(ilsel_source_t set, unsigned int bit) __ilsel_enable() argument 69 pr_notice("enabling ILSEL set %d\n", set); __ilsel_enable() 74 pr_debug("%s: bit#%d: addr - 0x%08lx (shift %d, set %d)\n", __ilsel_enable() 75 __func__, bit, addr, shift, set); __ilsel_enable() 79 tmp |= set << shift; __ilsel_enable() 84 * ilsel_enable - Enable an ILSEL set. 85 * @set: ILSEL source (see ilsel_source_t enum in include/asm-sh/ilsel.h). 95 int ilsel_enable(ilsel_source_t set) ilsel_enable() argument 99 if (unlikely(set > ILSEL_KEY)) { ilsel_enable() 108 __ilsel_enable(set, bit); ilsel_enable() 115 * ilsel_enable_fixed - Enable an ILSEL set at a fixed interrupt level 116 * @set: ILSEL source (see ilsel_source_t enum in include/asm-sh/ilsel.h). 125 int ilsel_enable_fixed(ilsel_source_t set, unsigned int level) ilsel_enable_fixed() argument 132 __ilsel_enable(set, bit); ilsel_enable_fixed() 139 * ilsel_disable - Disable an ILSEL set 140 * @irq: Bit position for ILSEL set value (retval from enable routines) 142 * Disable a previously enabled ILSEL set. 149 pr_notice("disabling ILSEL set %d\n", irq); ilsel_disable()
|
/linux-4.1.27/arch/mips/loongson/common/ |
H A D | setup.c | 24 asm(".set\tpush\n\t" wbflush_loongson() 25 ".set\tnoreorder\n\t" wbflush_loongson() 26 ".set mips3\n\t" wbflush_loongson() 29 ".set\tpop\n\t" wbflush_loongson() 30 ".set mips0\n\t"); wbflush_loongson()
|
/linux-4.1.27/drivers/uwb/ |
H A D | allocator.c | 88 * Find the best column set for a given availability, interval, num safe mas and 95 * set 1 -> { 8 } 97 * set 1 -> { 4 } 98 * set 2 -> { 12 } 100 * set 1 -> { 2 } 101 * set 2 -> { 6 } 102 * set 3 -> { 10 } 103 * set 4 -> { 14 } 105 * set 1 -> { 1 } 106 * set 2 -> { 3 } 107 * set 3 -> { 5 } 108 * set 4 -> { 7 } 109 * set 5 -> { 9 } 110 * set 6 -> { 11 } 111 * set 7 -> { 13 } 112 * set 8 -> { 15 } 116 * set 1 -> { 4 12 } 118 * set 1 -> { 2 10 } 119 * set 2 -> { 6 14 } 121 * set 1 -> { 1 9 } 122 * set 2 -> { 3 11 } 123 * set 3 -> { 5 13 } 124 * set 4 -> { 7 15 } 128 * set 1 -> { 2 6 10 14 } 130 * set 1 -> { 1 5 9 13 } 131 * set 2 -> { 3 7 11 15 } 135 * set 1 -> { 1 3 5 7 9 11 13 15 } 143 int deep, set, col, start_col_deep, col_start_set; uwb_rsv_find_best_column_set() local 157 for (set = 1; set <= (1 << deep); set++) { uwb_rsv_find_best_column_set()
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_bit.h | 26 * masks with n high/low bits set, 64-bit values 41 /* Get high bit set out of 32-bit argument, -1 if none set */ xfs_highbit32() 47 /* Get high bit set out of 64-bit argument, -1 if none set */ xfs_highbit64() 53 /* Get low bit set out of 32-bit argument, -1 if none set */ xfs_lowbit32() 59 /* Get low bit set out of 64-bit argument, -1 if none set */ xfs_lowbit64() 84 /* Find next set bit in map */
|
/linux-4.1.27/arch/arm/mm/ |
H A D | dump.c | 3 * so that we can see what the various memory ranges are set to. 50 const char *set; member in struct:prot_bits 58 .set = "USR", 63 .set = "ro", 68 .set = "NX", 73 .set = "SHD", 78 .set = "SO/UNCACHED", 82 .set = "MEM/BUFFERABLE/WC", 86 .set = "MEM/CACHED/WT", 90 .set = "MEM/CACHED/WBRA", 95 .set = "MEM/MINICACHE", 100 .set = "MEM/CACHED/WBWA", 104 .set = "DEV/SHARED", 109 .set = "DEV/NONSHARED", 114 .set = "DEV/WC", 118 .set = "DEV/CACHED", 127 .set = "USR", 131 .set = "ro", 137 .set = " ro", 141 .set = " RW", 145 .set = "USR ro", 149 .set = "USR RW", 155 .set = " ro", 159 .set = " RW", 163 .set = "USR ro", 167 .set = "USR RW", 172 .set = "NX", 177 .set = "SHD", 209 s = bits->set; dump_prot()
|
H A D | cache-xsc3l2.c | 56 int set, way; xsc3_l2_inv_all() local 60 for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { xsc3_l2_inv_all() 62 set_way = (way << 29) | (set << 5); xsc3_l2_inv_all() 161 * optimize L2 flush all operation by set/way format 166 int set, way; xsc3_l2_flush_all() local 170 for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) { xsc3_l2_flush_all() 172 set_way = (way << 29) | (set << 5); xsc3_l2_flush_all()
|
/linux-4.1.27/arch/mips/kernel/ |
H A D | genex.S | 33 .set push 34 .set noat 45 .set pop 55 .set push 56 .set arch=r4000 57 .set noat 61 .set push 62 .set noreorder 63 .set nomacro 70 .set pop 105 .set pop 112 .set push 113 .set noreorder 128 .set MIPS_ISA_ARCH_LEVEL_RAW 134 .set pop 139 .set push 140 .set noat 148 .set pop 165 .set push 166 .set noat 173 .set noreorder 183 .set pop 238 .set push 239 .set noreorder 246 .set pop 274 .set push 275 .set noat 293 .set mips32 295 .set pop 325 .set push 326 .set noat 341 .set pop 358 .set push 360 .set mips1 363 .set pop 407 .set noat 411 .set at 453 .set push 454 .set noat 455 .set noreorder 468 .set pop 474 .set push 475 .set noat 476 .set noreorder 501 .set reorder 508 .set noreorder 521 .set at=v1 523 .set noat 530 .set arch=r4000 532 .set mips0 534 .set pop
|
H A D | cpu-bugs64.c | 34 ".set push\n\t" align_mod() 35 ".set noreorder\n\t" align_mod() 40 ".set pop" align_mod() 83 ".set push\n\t" mult_sh_align_mod() 84 ".set noat\n\t" mult_sh_align_mod() 85 ".set noreorder\n\t" mult_sh_align_mod() 86 ".set nomacro\n\t" mult_sh_align_mod() 92 ".set pop" mult_sh_align_mod() 204 ".set push\n\t" check_daddi() 205 ".set noat\n\t" check_daddi() 206 ".set noreorder\n\t" check_daddi() 207 ".set nomacro\n\t" check_daddi() 211 ".set daddi\n\t" check_daddi() 214 ".set pop" check_daddi() 272 ".set push\n\t" check_daddiu() 273 ".set noat\n\t" check_daddiu() 274 ".set noreorder\n\t" check_daddiu() 275 ".set nomacro\n\t" check_daddiu() 279 ".set daddi\n\t" check_daddiu() 284 ".set pop" check_daddiu()
|
H A D | r4k_fpu.S | 22 /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ 26 .set push 28 .set nomacro 30 .set pop 36 .set noreorder 39 .set push 42 .set pop 46 .set push 49 .set mips32r2 50 .set fp=64 73 1: .set pop 76 .set push 98 .set pop 104 .set push 105 .set MIPS_ISA_ARCH_LEVEL_RAW 154 .set pop 171 .set push 174 .set mips32r2 175 .set fp=64 197 1: .set pop 199 .set push 218 .set pop 226 .set push 273 .set pop 277 .set reorder
|
/linux-4.1.27/include/trace/events/ |
H A D | thp.h | 48 TP_PROTO(unsigned long addr, unsigned long pte, unsigned long clr, unsigned long set), 49 TP_ARGS(addr, pte, clr, set), 54 __field(unsigned long, set) 61 __entry->set = set; 65 TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set)
|
/linux-4.1.27/tools/power/cpupower/utils/idle_monitor/ |
H A D | cpupower-monitor.h | 75 cpu_set_t set; bind_cpu() local 77 if (sched_getaffinity(getpid(), sizeof(set), &set) == 0) { bind_cpu() 78 CPU_ZERO(&set); bind_cpu() 79 CPU_SET(cpu, &set); bind_cpu() 80 return sched_setaffinity(getpid(), sizeof(set), &set); bind_cpu()
|
/linux-4.1.27/drivers/thunderbolt/ |
H A D | nhi_regs.h | 22 RING_DESC_COMPLETED = 0x2, /* set by NHI */ 23 RING_DESC_POSTED = 0x4, /* always set this */ 30 * For TX set length/eof/sof. 31 * For RX length/eof/sof are set by the NHI. 47 * 08: ring tail (set by NHI) 57 * 10: ring tail (set by NHI) 74 * If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to 93 * enable/disable interrupts set/clear the corresponding bits.
|
/linux-4.1.27/arch/x86/boot/ |
H A D | regs.c | 12 * Simple helper function for initializing a register set. 14 * Note that this sets EFLAGS_CF in the input register set; this 16 * explicitly set CF.
|
/linux-4.1.27/include/uapi/linux/netfilter_ipv4/ |
H A D | ipt_ECN.h | 17 #define IPT_ECN_OP_SET_IP 0x01 /* set ECN bits of IPv4 header */ 18 #define IPT_ECN_OP_SET_ECE 0x10 /* set ECE bit of TCP header */ 19 #define IPT_ECN_OP_SET_CWR 0x20 /* set CWR bit of TCP header */
|
/linux-4.1.27/drivers/watchdog/ |
H A D | octeon-wdt-nmi.S | 14 .set push 15 .set noreorder 16 .set noat 63 .set pop
|
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/ |
H A D | reg.h | 30 #define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */ 31 #define SPRN_BESCRSU 801 /* Branch event status & control set upper */ 32 #define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */
|
/linux-4.1.27/net/sched/ |
H A D | em_ipset.c | 25 struct xt_set_info *set = data; em_ipset_change() local 28 if (data_len != sizeof(*set)) em_ipset_change() 31 index = ip_set_nfnl_get_byindex(net, set->index); em_ipset_change() 35 em->datalen = sizeof(*set); em_ipset_change() 46 const struct xt_set_info *set = (const void *) em->data; em_ipset_destroy() local 47 if (set) { em_ipset_destroy() 48 ip_set_nfnl_put(em->net, set->index); em_ipset_destroy() 58 const struct xt_set_info *set = (const void *) em->data; em_ipset_match() local 83 opt.dim = set->dim; em_ipset_match() 84 opt.flags = set->flags; em_ipset_match() 101 ret = ip_set_test(set->index, skb, &acpar, &opt); em_ipset_match()
|
/linux-4.1.27/arch/mips/dec/ |
H A D | wbflush.c | 55 asm(".set\tpush\n\t" wbflush_kn01() 56 ".set\tnoreorder\n\t" wbflush_kn01() 59 ".set\tpop"); wbflush_kn01() 68 asm(".set\tpush\n\t" wbflush_kn210() 69 ".set\tnoreorder\n\t" wbflush_kn210() 79 ".set\tpop" wbflush_kn210()
|
/linux-4.1.27/arch/mips/include/asm/mach-pmcs-msp71xx/ |
H A D | msp_regops.h | 79 " .set push \n" set_value_reg32() 80 " .set arch=r4000 \n" set_value_reg32() 87 " .set pop \n" set_value_reg32() 101 " .set push \n" set_reg32() 102 " .set arch=r4000 \n" set_reg32() 108 " .set pop \n" set_reg32() 122 " .set push \n" clear_reg32() 123 " .set arch=r4000 \n" clear_reg32() 129 " .set pop \n" clear_reg32() 143 " .set push \n" toggle_reg32() 144 " .set arch=r4000 \n" toggle_reg32() 150 " .set pop \n" toggle_reg32() 164 " .set push \n" read_reg32() 165 " .set noreorder \n" read_reg32() 168 " .set pop \n" read_reg32() 187 " .set push \n" blocking_read_reg32() 188 " .set noreorder \n" blocking_read_reg32() 191 " .set pop \n" blocking_read_reg32() 219 " .set push \n" \ 220 " .set arch=r4000 \n" \ 222 " .set pop \n" \ 228 " .set push \n" \ 229 " .set arch=r4000 \n" \ 233 " .set pop \n" \
|
/linux-4.1.27/include/xen/interface/ |
H A D | features.h | 13 * If set, the guest does not need to write-protect its pagetables, and can 19 * If set, the guest does not need to write-protect its segment descriptor 25 * If set, translation between the guest's 'pseudo-physical' address space 32 /* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */ 36 * If set, the guest does not need to allocate x86 PAE page directories 45 * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
|
/linux-4.1.27/arch/mips/netlogic/xlp/ |
H A D | cop2-ex.c | 32 ".set push\n" nlm_cop2_save() 33 ".set noat\n" nlm_cop2_save() 50 ".set pop\n" nlm_cop2_save() 63 ".set push\n" nlm_cop2_restore() 64 ".set noat\n" nlm_cop2_restore() 81 ".set pop\n" nlm_cop2_restore()
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | opcodes.c | 23 0xF0F0, /* EQ == Z set */ 25 0xCCCC, /* CS == C set */ 27 0xFF00, /* MI == N set */ 29 0xAAAA, /* VS == V set */ 31 0x0C0C, /* HI == C set && Z clear */ 32 0xF3F3, /* LS == C clear || Z set */
|
H A D | io.c | 14 void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set) atomic_io_modify_relaxed() argument 21 value |= (set & mask); atomic_io_modify_relaxed() 27 void atomic_io_modify(void __iomem *reg, u32 mask, u32 set) atomic_io_modify() argument 34 value |= (set & mask); atomic_io_modify()
|
/linux-4.1.27/kernel/trace/ |
H A D | trace_nop.c | 25 { } /* Always set a last empty entry */ 63 static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) nop_set_flag() argument 70 printk(KERN_DEBUG "nop_test_accept flag set to %d: we accept." nop_set_flag() 72 set); nop_set_flag() 77 printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse." nop_set_flag() 79 set); nop_set_flag()
|
/linux-4.1.27/drivers/gpu/drm/ |
H A D | drm_crtc_helper.c | 248 * drm_crtc_helper_set_mode - internal helper to set a mode 255 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance 256 * to fixup or reject the mode prior to trying to set it. This is an internal 265 * True if the mode was set successfully, false otherwise. 373 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", drm_crtc_helper_set_mode() 384 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ drm_crtc_helper_set_mode() 455 * drm_crtc_helper_set_config - set a new config from userspace 456 * @set: mode set configuration 459 * from userspace or internally e.g. from the fbdev support code) in @set, and 467 int drm_crtc_helper_set_config(struct drm_mode_set *set) drm_crtc_helper_set_config() argument 472 bool mode_changed = false; /* if true do a full mode set */ drm_crtc_helper_set_config() 483 BUG_ON(!set); drm_crtc_helper_set_config() 484 BUG_ON(!set->crtc); drm_crtc_helper_set_config() 485 BUG_ON(!set->crtc->helper_private); drm_crtc_helper_set_config() 488 BUG_ON(!set->mode && set->fb); drm_crtc_helper_set_config() 489 BUG_ON(set->fb && set->num_connectors == 0); drm_crtc_helper_set_config() 491 crtc_funcs = set->crtc->helper_private; drm_crtc_helper_set_config() 493 if (!set->mode) drm_crtc_helper_set_config() 494 set->fb = NULL; drm_crtc_helper_set_config() 496 if (set->fb) { drm_crtc_helper_set_config() 498 set->crtc->base.id, set->fb->base.id, drm_crtc_helper_set_config() 499 (int)set->num_connectors, set->x, set->y); drm_crtc_helper_set_config() 501 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); drm_crtc_helper_set_config() 502 drm_crtc_helper_disable(set->crtc); drm_crtc_helper_set_config() 506 dev = set->crtc->dev; drm_crtc_helper_set_config() 541 save_set.crtc = set->crtc; drm_crtc_helper_set_config() 542 save_set.mode = &set->crtc->mode; drm_crtc_helper_set_config() 543 save_set.x = set->crtc->x; drm_crtc_helper_set_config() 544 save_set.y = set->crtc->y; drm_crtc_helper_set_config() 545 save_set.fb = set->crtc->primary->fb; drm_crtc_helper_set_config() 549 if (set->crtc->primary->fb != set->fb) { drm_crtc_helper_set_config() 550 /* If we have no fb then treat it as a full mode set */ drm_crtc_helper_set_config() 551 if (set->crtc->primary->fb == NULL) { drm_crtc_helper_set_config() 552 DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); drm_crtc_helper_set_config() 554 } else if (set->fb == NULL) { drm_crtc_helper_set_config() 556 } else if (set->fb->pixel_format != drm_crtc_helper_set_config() 557 set->crtc->primary->fb->pixel_format) { drm_crtc_helper_set_config() 563 if (set->x != set->crtc->x || set->y != set->crtc->y) drm_crtc_helper_set_config() 566 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { drm_crtc_helper_set_config() 567 DRM_DEBUG_KMS("modes are different, full mode set\n"); drm_crtc_helper_set_config() 568 drm_mode_debug_printmodeline(&set->crtc->mode); drm_crtc_helper_set_config() 569 drm_mode_debug_printmodeline(set->mode); drm_crtc_helper_set_config() 579 for (ro = 0; ro < set->num_connectors; ro++) { drm_crtc_helper_set_config() 580 if (set->connectors[ro] == connector) { drm_crtc_helper_set_config() 601 * the appropriate crtc will be set later. drm_crtc_helper_set_config() 619 if (connector->encoder->crtc == set->crtc) drm_crtc_helper_set_config() 624 for (ro = 0; ro < set->num_connectors; ro++) { drm_crtc_helper_set_config() 625 if (set->connectors[ro] == connector) drm_crtc_helper_set_config() 626 new_crtc = set->crtc; drm_crtc_helper_set_config() 655 if (drm_helper_crtc_in_use(set->crtc)) { drm_crtc_helper_set_config() 656 DRM_DEBUG_KMS("attempting to set mode from" drm_crtc_helper_set_config() 658 drm_mode_debug_printmodeline(set->mode); drm_crtc_helper_set_config() 659 set->crtc->primary->fb = set->fb; drm_crtc_helper_set_config() 660 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, drm_crtc_helper_set_config() 661 set->x, set->y, drm_crtc_helper_set_config() 663 DRM_ERROR("failed to set mode on [CRTC:%d]\n", drm_crtc_helper_set_config() 664 set->crtc->base.id); drm_crtc_helper_set_config() 665 set->crtc->primary->fb = save_set.fb; drm_crtc_helper_set_config() 670 for (i = 0; i < set->num_connectors; i++) { drm_crtc_helper_set_config() 671 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, drm_crtc_helper_set_config() 672 set->connectors[i]->name); drm_crtc_helper_set_config() 673 set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); drm_crtc_helper_set_config() 678 set->crtc->x = set->x; drm_crtc_helper_set_config() 679 set->crtc->y = set->y; drm_crtc_helper_set_config() 680 set->crtc->primary->fb = set->fb; drm_crtc_helper_set_config() 681 ret = crtc_funcs->mode_set_base(set->crtc, drm_crtc_helper_set_config() 682 set->x, set->y, save_set.fb); drm_crtc_helper_set_config() 684 set->crtc->x = save_set.x; drm_crtc_helper_set_config() 685 set->crtc->y = save_set.y; drm_crtc_helper_set_config() 686 set->crtc->primary->fb = save_set.fb; drm_crtc_helper_set_config() 861 * has successfully set the restored configuration already. Hence this should 867 * configuration is restored in a different order than when userspace set it up) 889 DRM_ERROR("failed to set mode on crtc %p\n", crtc); drm_helper_resume_force_mode() 929 * to set up the crtc.
|
/linux-4.1.27/drivers/media/pci/bt8xx/ |
H A D | bttv-audio-hook.c | 55 void gvbctv3pci_audio(struct bttv *btv, struct v4l2_tuner *t, int set) gvbctv3pci_audio() argument 59 if (set) { gvbctv3pci_audio() 76 void gvbctv5pci_audio(struct bttv *btv, struct v4l2_tuner *t, int set) gvbctv5pci_audio() argument 84 if (set) { gvbctv5pci_audio() 136 * input/output sound connection, so both must be set for output mode. 143 void avermedia_tvphone_audio(struct bttv *btv, struct v4l2_tuner *t, int set) avermedia_tvphone_audio() argument 147 if (set) { avermedia_tvphone_audio() 165 void avermedia_tv_stereo_audio(struct bttv *btv, struct v4l2_tuner *t, int set) avermedia_tv_stereo_audio() argument 169 if (set) { avermedia_tv_stereo_audio() 186 void lt9415_audio(struct bttv *btv, struct v4l2_tuner *t, int set) lt9415_audio() argument 195 if (set) { lt9415_audio() 215 void terratv_audio(struct bttv *btv, struct v4l2_tuner *t, int set) terratv_audio() argument 219 if (set) { terratv_audio() 235 void winfast2000_audio(struct bttv *btv, struct v4l2_tuner *t, int set) winfast2000_audio() argument 239 if (set) { winfast2000_audio() 268 void pvbt878p9b_audio(struct bttv *btv, struct v4l2_tuner *t, int set) pvbt878p9b_audio() argument 275 if (set) { pvbt878p9b_audio() 299 void fv2000s_audio(struct bttv *btv, struct v4l2_tuner *t, int set) fv2000s_audio() argument 306 if (set) { fv2000s_audio() 329 void windvr_audio(struct bttv *btv, struct v4l2_tuner *t, int set) windvr_audio() argument 333 if (set) { windvr_audio() 357 void adtvk503_audio(struct bttv *btv, struct v4l2_tuner *t, int set) adtvk503_audio() argument 363 if (set) { adtvk503_audio()
|
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/ |
H A D | fail.c | 50 int __cfs_fail_check_set(__u32 id, __u32 value, int set) __cfs_fail_check_set() argument 75 if (set == CFS_FAIL_LOC_VALUE) { __cfs_fail_check_set() 94 if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) && __cfs_fail_check_set() 97 /* Lost race to set CFS_FAILED_BIT. */ __cfs_fail_check_set() 105 switch (set) { __cfs_fail_check_set() 116 LASSERTF(0, "called with bad set %u\n", set); __cfs_fail_check_set() 124 int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) __cfs_fail_timeout_set() argument 128 ret = __cfs_fail_check_set(id, value, set); __cfs_fail_timeout_set()
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | trampoline_32.S | 45 set (PSR_PIL | PSR_S | PSR_PS), %g1 59 set current_set, %g5 74 set poke_srmmu, %g5 99 set (PSR_PIL | PSR_S | PSR_PS), %g1 109 set trapbase, %g1 114 set SUN4D_BOOTBUS_CPUID, %g3 121 set current_set, %g5 135 set poke_srmmu, %g5 151 set smp_penguin_ctable,%g1 154 set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */ 158 set (PSR_PIL | PSR_S | PSR_PS), %g1 168 set trapbase, %g1 176 set current_set, %g5 191 set poke_srmmu, %g5
|
/linux-4.1.27/block/ |
H A D | blk-mq.c | 210 /* tag was already set */ blk_mq_rq_ctx_init() 430 * Ensure that ->deadline is visible before set the started blk_mq_start_request() 437 * set if requeue raced with timeout, which then marked it as blk_mq_start_request() 597 * We know that complete is set at this point. If STARTED isn't set blk_mq_rq_timed_out() 600 * Timeout first checks if STARTED is set, and if it is, assumes blk_mq_rq_timed_out() 842 * left in the list, set dptr to defer issue. __blk_mq_run_hw_queue() 1141 * If 'this_ctx' is set, we know we have entries to complete blk_mq_flush_plug_list() 1401 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, blk_mq_free_rq_map() argument 1406 if (tags->rqs && set->ops->exit_request) { blk_mq_free_rq_map() 1412 set->ops->exit_request(set->driver_data, tags->rqs[i], blk_mq_free_rq_map() 1434 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, blk_mq_init_rq_map() argument 1441 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags, blk_mq_init_rq_map() 1442 set->numa_node, blk_mq_init_rq_map() 1443 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); blk_mq_init_rq_map() 1449 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *), blk_mq_init_rq_map() 1451 set->numa_node); blk_mq_init_rq_map() 1461 rq_size = round_up(sizeof(struct request) + set->cmd_size, blk_mq_init_rq_map() 1463 left = rq_size * set->queue_depth; blk_mq_init_rq_map() 1465 for (i = 0; i < set->queue_depth; ) { blk_mq_init_rq_map() 1475 page = alloc_pages_node(set->numa_node, blk_mq_init_rq_map() 1494 to_do = min(entries_per_page, set->queue_depth - i); blk_mq_init_rq_map() 1498 if (set->ops->init_request) { blk_mq_init_rq_map() 1499 if (set->ops->init_request(set->driver_data, blk_mq_init_rq_map() 1501 set->numa_node)) { blk_mq_init_rq_map() 1515 blk_mq_free_rq_map(set, tags, hctx_idx); blk_mq_init_rq_map() 1605 struct blk_mq_tag_set *set, blk_mq_exit_hctx() 1608 unsigned flush_start_tag = set->queue_depth; blk_mq_exit_hctx() 1612 if (set->ops->exit_request) blk_mq_exit_hctx() 1613 set->ops->exit_request(set->driver_data, blk_mq_exit_hctx() 1617 if (set->ops->exit_hctx) blk_mq_exit_hctx() 1618 set->ops->exit_hctx(hctx, hctx_idx); blk_mq_exit_hctx() 1626 struct blk_mq_tag_set *set, int nr_queue) blk_mq_exit_hw_queues() 1634 blk_mq_exit_hctx(q, set, hctx, i); queue_for_each_hw_ctx() 1639 struct blk_mq_tag_set *set) blk_mq_free_hw_queues() 1649 struct blk_mq_tag_set *set, blk_mq_init_hctx() 1653 unsigned flush_start_tag = set->queue_depth; blk_mq_init_hctx() 1657 node = hctx->numa_node = set->numa_node; blk_mq_init_hctx() 1665 hctx->flags = set->flags; blk_mq_init_hctx() 1671 hctx->tags = set->tags[hctx_idx]; blk_mq_init_hctx() 1687 if (set->ops->init_hctx && blk_mq_init_hctx() 1688 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) blk_mq_init_hctx() 1691 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); blk_mq_init_hctx() 1695 if (set->ops->init_request && blk_mq_init_hctx() 1696 set->ops->init_request(set->driver_data, blk_mq_init_hctx() 1706 if (set->ops->exit_hctx) blk_mq_init_hctx() 1707 set->ops->exit_hctx(hctx, hctx_idx); blk_mq_init_hctx() 1719 struct blk_mq_tag_set *set) blk_mq_init_hw_queues() 1728 if (blk_mq_init_hctx(q, set, hctx, i)) queue_for_each_hw_ctx() 1738 blk_mq_exit_hw_queues(q, set, i); 1778 struct blk_mq_tag_set *set = q->tag_set; blk_mq_map_swqueue() local 1807 if (set->tags[i]) { queue_for_each_hw_ctx() 1808 blk_mq_free_rq_map(set, set->tags[i], i); queue_for_each_hw_ctx() 1809 set->tags[i] = NULL; queue_for_each_hw_ctx() 1816 if (!set->tags[i]) queue_for_each_hw_ctx() 1817 set->tags[i] = blk_mq_init_rq_map(set, i); queue_for_each_hw_ctx() 1818 hctx->tags = set->tags[i]; queue_for_each_hw_ctx() 1836 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) blk_mq_update_tag_set_depth() argument 1843 if (set->tag_list.next == set->tag_list.prev) blk_mq_update_tag_set_depth() 1848 list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_update_tag_set_depth() 1863 struct blk_mq_tag_set *set = q->tag_set; blk_mq_del_queue_tag_set() local 1865 mutex_lock(&set->tag_list_lock); blk_mq_del_queue_tag_set() 1867 blk_mq_update_tag_set_depth(set); blk_mq_del_queue_tag_set() 1868 mutex_unlock(&set->tag_list_lock); blk_mq_del_queue_tag_set() 1871 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, blk_mq_add_queue_tag_set() argument 1874 q->tag_set = set; blk_mq_add_queue_tag_set() 1876 mutex_lock(&set->tag_list_lock); blk_mq_add_queue_tag_set() 1877 list_add_tail(&q->tag_set_list, &set->tag_list); blk_mq_add_queue_tag_set() 1878 blk_mq_update_tag_set_depth(set); blk_mq_add_queue_tag_set() 1879 mutex_unlock(&set->tag_list_lock); blk_mq_add_queue_tag_set() 1907 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) blk_mq_init_queue() argument 1911 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); blk_mq_init_queue() 1915 q = blk_mq_init_allocated_queue(set, uninit_q); blk_mq_init_queue() 1923 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, blk_mq_init_allocated_queue() argument 1935 hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, blk_mq_init_allocated_queue() 1936 set->numa_node); blk_mq_init_allocated_queue() 1941 map = blk_mq_make_queue_map(set); blk_mq_init_allocated_queue() 1945 for (i = 0; i < set->nr_hw_queues; i++) { blk_mq_init_allocated_queue() 1971 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); blk_mq_init_allocated_queue() 1974 q->nr_hw_queues = set->nr_hw_queues; blk_mq_init_allocated_queue() 1980 q->mq_ops = set->ops; blk_mq_init_allocated_queue() 1983 if (!(set->flags & BLK_MQ_F_SG_MERGE)) blk_mq_init_allocated_queue() 2000 q->nr_requests = set->queue_depth; blk_mq_init_allocated_queue() 2002 if (set->ops->complete) blk_mq_init_allocated_queue() 2003 blk_queue_softirq_done(q, set->ops->complete); blk_mq_init_allocated_queue() 2005 blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_init_allocated_queue() 2007 if (blk_mq_init_hw_queues(q, set)) blk_mq_init_allocated_queue() 2014 blk_mq_add_queue_tag_set(set, q); blk_mq_init_allocated_queue() 2022 for (i = 0; i < set->nr_hw_queues; i++) { blk_mq_init_allocated_queue() 2038 struct blk_mq_tag_set *set = q->tag_set; blk_mq_free_queue() local 2042 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_free_queue() 2043 blk_mq_free_hw_queues(q, set); blk_mq_free_queue() 2122 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) __blk_mq_alloc_rq_maps() argument 2126 for (i = 0; i < set->nr_hw_queues; i++) { __blk_mq_alloc_rq_maps() 2127 set->tags[i] = blk_mq_init_rq_map(set, i); __blk_mq_alloc_rq_maps() 2128 if (!set->tags[i]) __blk_mq_alloc_rq_maps() 2136 blk_mq_free_rq_map(set, set->tags[i], i); __blk_mq_alloc_rq_maps() 2143 * may reduce the depth asked for, if memory is tight. set->queue_depth 2146 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) blk_mq_alloc_rq_maps() argument 2151 depth = set->queue_depth; blk_mq_alloc_rq_maps() 2153 err = __blk_mq_alloc_rq_maps(set); blk_mq_alloc_rq_maps() 2157 set->queue_depth >>= 1; blk_mq_alloc_rq_maps() 2158 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { blk_mq_alloc_rq_maps() 2162 } while (set->queue_depth); blk_mq_alloc_rq_maps() 2164 if (!set->queue_depth || err) { blk_mq_alloc_rq_maps() 2169 if (depth != set->queue_depth) blk_mq_alloc_rq_maps() 2171 depth, set->queue_depth); blk_mq_alloc_rq_maps() 2177 * Alloc a tag set to be associated with one or more request queues. 2179 * requested depth down, if if it too large. In that case, the set 2180 * value will be stored in set->queue_depth. 2182 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) blk_mq_alloc_tag_set() argument 2186 if (!set->nr_hw_queues) blk_mq_alloc_tag_set() 2188 if (!set->queue_depth) blk_mq_alloc_tag_set() 2190 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) blk_mq_alloc_tag_set() 2193 if (!set->ops->queue_rq || !set->ops->map_queue) blk_mq_alloc_tag_set() 2196 if (set->queue_depth > BLK_MQ_MAX_DEPTH) { blk_mq_alloc_tag_set() 2199 set->queue_depth = BLK_MQ_MAX_DEPTH; blk_mq_alloc_tag_set() 2208 set->nr_hw_queues = 1; blk_mq_alloc_tag_set() 2209 set->queue_depth = min(64U, set->queue_depth); blk_mq_alloc_tag_set() 2212 set->tags = kmalloc_node(set->nr_hw_queues * blk_mq_alloc_tag_set() 2214 GFP_KERNEL, set->numa_node); blk_mq_alloc_tag_set() 2215 if (!set->tags) blk_mq_alloc_tag_set() 2218 if (blk_mq_alloc_rq_maps(set)) blk_mq_alloc_tag_set() 2221 mutex_init(&set->tag_list_lock); blk_mq_alloc_tag_set() 2222 INIT_LIST_HEAD(&set->tag_list); blk_mq_alloc_tag_set() 2226 kfree(set->tags); blk_mq_alloc_tag_set() 2227 set->tags = NULL; blk_mq_alloc_tag_set() 2232 void blk_mq_free_tag_set(struct blk_mq_tag_set *set) blk_mq_free_tag_set() argument 2236 for (i = 0; i < set->nr_hw_queues; i++) { blk_mq_free_tag_set() 2237 if (set->tags[i]) blk_mq_free_tag_set() 2238 blk_mq_free_rq_map(set, set->tags[i], i); blk_mq_free_tag_set() 2241 kfree(set->tags); blk_mq_free_tag_set() 2242 set->tags = NULL; blk_mq_free_tag_set() 2248 struct blk_mq_tag_set *set = q->tag_set; blk_mq_update_nr_requests() local 2252 if (!set || nr > set->queue_depth) blk_mq_update_nr_requests() 1604 blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) blk_mq_exit_hctx() argument 1625 blk_mq_exit_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set, int nr_queue) blk_mq_exit_hw_queues() argument 1638 blk_mq_free_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set) blk_mq_free_hw_queues() argument 1648 blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) blk_mq_init_hctx() argument 1718 blk_mq_init_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set) blk_mq_init_hw_queues() argument
|
/linux-4.1.27/drivers/staging/iio/adc/ |
H A D | ad7606.h | 21 * @gpio_reset: gpio connected to the RESET pin, if not used set to -1 22 * @gpio_range: gpio connected to the RANGE pin, if not used set to -1 23 * @gpio_os0: gpio connected to the OS0 pin, if not used set to -1 24 * @gpio_os1: gpio connected to the OS1 pin, if not used set to -1 25 * @gpio_os2: gpio connected to the OS2 pin, if not used set to -1 26 * @gpio_frstdata: gpio connected to the FRSTDAT pin, if not used set to -1 27 * @gpio_stby: gpio connected to the STBY pin, if not used set to -1
|
/linux-4.1.27/arch/mips/include/asm/octeon/ |
H A D | octeon.h | 78 /* If set, use uart1 for console */ 80 /* If set, use PCI console */ 132 /* If set, use uart1 for console */ 134 /* If set, use PCI console */ 200 /* R/W If set, marked write-buffer entries time out 204 /* R/W If set, a merged store does not clear the 211 /* R/W If set, SYNCWS and SYNCS only order marked 214 * DISSYNCWS is set. */ 216 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as 219 /* R/W If set, no stall happens on write buffer 222 /* R/W If set (and SX set), supervisor-level 226 /* R/W If set (and UX set), user-level loads/stores 229 /* R/W If set (and SX set), supervisor-level 233 /* R/W If set (and UX set), user-level loads/stores 236 /* R/W If set, all stores act as SYNCW (NOMERGE must 237 * be set when this is set) RW, reset to 0. */ 239 /* R/W If set, no stores merge, and all stores reach 248 /* R/W If set, the (mem) CSR clock never turns off. */ 250 /* R/W If set, mclk never turns off. */ 260 /* R/W If set, do not put Istream in the L2 cache. */ 266 /* R/W If set, CVMSEG is available for loads/stores in 269 /* R/W If set, CVMSEG is available for loads/stores in 272 /* R/W If set, CVMSEG is available for loads/stores in
|
/linux-4.1.27/arch/mn10300/include/asm/ |
H A D | bitops.h | 24 * set bit 92 * test and set bit 156 * __ffs - find first bit set 159 * - return 31..0 to indicate bit 31..0 most least significant bit set 160 * - if no bits are set in x, the result is undefined 183 * fls - find last bit set 187 * - return 32..1 to indicate bit 31..0 most significant bit set 188 * - return 0 to indicate no bits set 197 * __fls - find last (most-significant) set bit in a long word 200 * Undefined if no set bit exists, so code should check against 0 first. 208 * ffs - find first bit set 211 * - return 32..1 to indicate bit 31..0 most least significant bit set 212 * - return 0 to indicate no bits set
|
/linux-4.1.27/drivers/media/platform/davinci/ |
H A D | ccdc_hw_device.h | 37 /* set ccdc base address */ 45 /* Pointer to function to set hw parameters */ 50 * Pointer to function to set parameters. Used 62 /* Pointer to function to set buffer type */ 66 /* Pointer to function to set frame format */ 72 /* Pointer to function to set buffer type */ 76 /* Pointer to function to set image window */ 78 /* Pointer to function to set image window */ 90 /* Pointer to function to set frame buffer address */
|
/linux-4.1.27/fs/btrfs/ |
H A D | sysfs.c | 38 enum btrfs_feature_set set) get_features() 41 if (set == FEAT_COMPAT) get_features() 43 else if (set == FEAT_COMPAT_RO) get_features() 50 enum btrfs_feature_set set, u64 features) set_features() 53 if (set == FEAT_COMPAT) set_features() 55 else if (set == FEAT_COMPAT_RO) set_features() 64 u64 set, clear; can_modify_feature() local 67 set = BTRFS_FEATURE_COMPAT_SAFE_SET; can_modify_feature() 71 set = BTRFS_FEATURE_COMPAT_RO_SAFE_SET; can_modify_feature() 75 set = BTRFS_FEATURE_INCOMPAT_SAFE_SET; can_modify_feature() 79 printk(KERN_WARNING "btrfs: sysfs: unknown feature set %d\n", can_modify_feature() 84 if (set & fa->feature_bit) can_modify_feature() 114 u64 features, set, clear; btrfs_feature_attr_store() local 127 set = BTRFS_FEATURE_COMPAT_SAFE_SET; btrfs_feature_attr_store() 130 set = BTRFS_FEATURE_COMPAT_RO_SAFE_SET; btrfs_feature_attr_store() 133 set = BTRFS_FEATURE_INCOMPAT_SAFE_SET; btrfs_feature_attr_store() 144 if ((val && !(set & fa->feature_bit)) || btrfs_feature_attr_store() 470 int set; addrm_unknown_feature_attrs() local 472 for (set = 0; set < FEAT_MAX; set++) { addrm_unknown_feature_attrs() 479 u64 features = get_features(fs_info, set); addrm_unknown_feature_attrs() 480 features &= ~supported_feature_masks[set]; addrm_unknown_feature_attrs() 492 fa = &btrfs_feature_attrs[set][i]; addrm_unknown_feature_attrs() 536 char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags) btrfs_printable_features() argument 547 for (i = 0; i < ARRAY_SIZE(btrfs_feature_attrs[set]); i++) { btrfs_printable_features() 553 name = btrfs_feature_attrs[set][i].kobj_attr.attr.name; btrfs_printable_features() 564 int set, i; init_feature_attrs() local 586 for (set = 0; set < FEAT_MAX; set++) { init_feature_attrs() 587 for (i = 0; i < ARRAY_SIZE(btrfs_feature_attrs[set]); i++) { init_feature_attrs() 588 char *name = btrfs_unknown_feature_names[set][i]; init_feature_attrs() 589 fa = &btrfs_feature_attrs[set][i]; init_feature_attrs() 595 btrfs_feature_set_names[set], i); init_feature_attrs() 599 fa->feature_set = set; init_feature_attrs() 37 get_features(struct btrfs_fs_info *fs_info, enum btrfs_feature_set set) get_features() argument 49 set_features(struct btrfs_fs_info *fs_info, enum btrfs_feature_set set, u64 features) set_features() argument
|
/linux-4.1.27/arch/sh/lib/ |
H A D | movmem.S | 45 .set __movstr, __movmem 83 .set __movstrSI64, __movmemSI64 89 .set __movstrSI60, __movmemSI60 95 .set __movstrSI56, __movmemSI56 101 .set __movstrSI52, __movmemSI52 107 .set __movstrSI48, __movmemSI48 113 .set __movstrSI44, __movmemSI44 119 .set __movstrSI40, __movmemSI40 125 .set __movstrSI36, __movmemSI36 131 .set __movstrSI32, __movmemSI32 137 .set __movstrSI28, __movmemSI28 143 .set __movstrSI24, __movmemSI24 149 .set __movstrSI20, __movmemSI20 155 .set __movstrSI16, __movmemSI16 161 .set __movstrSI12, __movmemSI12 167 .set __movstrSI8, __movmemSI8 173 .set __movstrSI4, __movmemSI4 181 .set __movstr_i4_even, __movmem_i4_even 185 .set __movstr_i4_odd, __movmem_i4_odd 189 .set __movstrSI12_i4, __movmemSI12_i4
|
/linux-4.1.27/include/net/caif/ |
H A D | caif_device.h | 24 * low-latency. This member is set by CAIF Link 29 * Is set by CAIF Link Layer in order to indicate if the 34 * Is set if the physical interface is 38 * Is set if the CAIF Link Layer expects 43 * CAIF Core layer must set the member flowctrl in order to supply
|
/linux-4.1.27/arch/m68k/sun3/ |
H A D | intersil.c | 21 /* bits to set for start/run of the intersil */ 32 /* get/set hwclock */ 34 int sun3_hwclk(int set, struct rtc_time *t) sun3_hwclk() argument 45 /* set or read the clock */ sun3_hwclk() 46 if(set) { sun3_hwclk()
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
H A D | intel_cacheinfo.c | 40 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 41 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 42 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ 43 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ 44 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ 45 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ 46 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ 47 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ 48 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 49 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ 50 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */ 51 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */ 52 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ 53 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ 54 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 55 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ 56 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ 57 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 58 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ 59 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 60 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */ 61 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ 62 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ 63 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ 64 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */ 65 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ 66 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ 67 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ 68 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ 69 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ 70 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ 71 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ 72 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */ 73 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */ 74 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */ 75 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 76 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 77 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 78 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ 79 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ 80 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ 81 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ 82 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ 83 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */ 84 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 85 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 86 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ 87 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ 88 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ 89 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ 90 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ 91 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ 92 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ 93 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ 94 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */ 95 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ 96 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */ 97 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ 98 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */ 99 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */ 100 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */ 101 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */ 102 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ 103 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */ 104 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ 105 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */ 106 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */ 107 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ 108 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ 109 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */ 110 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */ 111 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */ 726 /* If bit 31 is set, this is an unknown format */ init_intel_cacheinfo() 790 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in init_intel_cacheinfo() 793 * that SMT shares all caches, we can unconditionally set cpu_llc_id to init_intel_cacheinfo()
|
/linux-4.1.27/drivers/video/fbdev/mbx/ |
H A D | mbxfb.c | 334 static int mbxfb_setupOverlay(struct mbxfb_overlaySetup *set) mbxfb_setupOverlay() argument 341 if (set->scaled_width==0 || set->scaled_height==0) mbxfb_setupOverlay() 361 vsctrl |= Vsctrl_Width(set->width) | Vsctrl_Height(set->height) | mbxfb_setupOverlay() 368 switch (set->fmt) { mbxfb_setupOverlay() 372 set->Y_stride = ((set->width) + 0xf ) & ~0xf; mbxfb_setupOverlay() 377 set->Y_stride = ((set->width) + 0xf ) & ~0xf; mbxfb_setupOverlay() 383 set->Y_stride = (set->width*2 + 0xf ) & ~0xf; mbxfb_setupOverlay() 387 set->Y_stride = (set->width*2 + 0xf ) & ~0xf; mbxfb_setupOverlay() 391 set->Y_stride = (set->width*2 + 0xf ) & ~0xf; mbxfb_setupOverlay() 395 set->Y_stride = (set->width*2 + 0xf ) & ~0xf; mbxfb_setupOverlay() 404 * be zero if we would not set them here. (And then, mbxfb_setupOverlay() 410 set->UV_stride = ((set->width/2) + 0x7 ) & ~0x7; mbxfb_setupOverlay() 411 set->U_offset = set->height * set->Y_stride; mbxfb_setupOverlay() 412 set->V_offset = set->U_offset + mbxfb_setupOverlay() 413 set->height * set->UV_stride; mbxfb_setupOverlay() 415 (0x60000 + set->mem_offset + set->U_offset)>>3); mbxfb_setupOverlay() 417 (0x60000 + set->mem_offset + set->V_offset)>>3); mbxfb_setupOverlay() 420 vscadr |= Vscadr_Vbase_Adr((0x60000 + set->mem_offset)>>4); mbxfb_setupOverlay() 422 if (set->enable) mbxfb_setupOverlay() 426 vsadr = Vsadr_Srcstride((set->Y_stride)/16-1) | mbxfb_setupOverlay() 427 Vsadr_Xstart(set->x) | Vsadr_Ystart(set->y); mbxfb_setupOverlay() 430 sssize = Sssize_Sc_Width(set->scaled_width-1) | mbxfb_setupOverlay() 431 Sssize_Sc_Height(set->scaled_height-1); mbxfb_setupOverlay() 436 spoctrl |= Spoctrl_Vpitch((set->height<<11)/set->scaled_height); mbxfb_setupOverlay() 439 if (set->scaled_width == set->width) mbxfb_setupOverlay() 441 if (set->scaled_height == set->height) mbxfb_setupOverlay() 445 shctrl |= Shctrl_Hpitch((set->width<<11)/set->scaled_width); mbxfb_setupOverlay() 460 if (set->enable) mbxfb_setupOverlay()
|
/linux-4.1.27/arch/score/include/asm/ |
H A D | checksum.h | 57 ".set volatile\n\t" csum_fold() 58 ".set\tr1\n\t" csum_fold() 69 ".set\tnor1\n\t" csum_fold() 70 ".set optimize\n\t" csum_fold() 89 ".set volatile\n\t" ip_fast_csum() 90 ".set\tnor1\n\t" ip_fast_csum() 121 ".set\tr1\n\t" ip_fast_csum() 122 ".set optimize\n\t" ip_fast_csum() 135 ".set volatile\n\t" csum_tcpudp_nofold() 151 ".set optimize\n\t" csum_tcpudp_nofold() 187 ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t" csum_ipv6_magic() 237 ".set\toptimize" csum_ipv6_magic()
|
/linux-4.1.27/arch/frv/lib/ |
H A D | atomic-ops.S | 30 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 33 orcr cc7,cc7,cc3 /* set CC3 to true */ 52 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 55 orcr cc7,cc7,cc3 /* set CC3 to true */ 74 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 77 orcr cc7,cc7,cc3 /* set CC3 to true */ 96 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 99 orcr cc7,cc7,cc3 /* set CC3 to true */ 118 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 121 orcr cc7,cc7,cc3 /* set CC3 to true */ 140 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 143 orcr cc7,cc7,cc3 /* set CC3 to true */
|
H A D | atomic64-ops.S | 31 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 34 orcr cc7,cc7,cc3 /* set CC3 to true */ 54 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 57 orcr cc7,cc7,cc3 /* set CC3 to true */ 78 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 81 orcr cc7,cc7,cc3 /* set CC3 to true */ 102 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 105 orcr cc7,cc7,cc3 /* set CC3 to true */ 126 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 129 orcr cc7,cc7,cc3 /* set CC3 to true */ 148 orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
|
/linux-4.1.27/scripts/ |
H A D | config | 18 --set-str option string 20 --set-val option value 103 name_re="^($name=|# $name is not set)" 104 before_re="^($before=|# $before is not set)" 107 txt_append "^# $before is not set" "$new" "$FN" 110 txt_subst "^# $name is not set" "$new" "$FN" 120 txt_delete "^# $name is not set" "$FN" 166 set_var "${CONFIG_}$ARG" "# ${CONFIG_}$ARG is not set" 173 --set-str) 179 --set-val) 188 if grep -q "# ${CONFIG_}$ARG is not set" $FN ; then 209 set_var "${CONFIG_}$B" "# ${CONFIG_}$B is not set" "${CONFIG_}$A"
|
H A D | headers.sh | 5 set -e
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | ptlrpcd.c | 41 * ptlrpcd is a special thread with its own set where other user might add 143 * Move all request from an existing request set to the ptlrpcd queue. 144 * All requests from the set must be in phase RQ_PHASE_NEW. 146 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set) ptlrpcd_add_rqset() argument 156 list_for_each_safe(pos, tmp, &set->set_requests) { ptlrpcd_add_rqset() 167 list_splice_init(&set->set_requests, &new->set_new_requests); ptlrpcd_add_rqset() 168 i = atomic_read(&set->set_remaining); ptlrpcd_add_rqset() 170 atomic_set(&set->set_remaining, 0); ptlrpcd_add_rqset() 254 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set) ptlrpc_reqset_get() argument 256 atomic_inc(&set->set_refcount); ptlrpc_reqset_get() 260 * Check if there is more work to do on ptlrpcd set. 267 struct ptlrpc_request_set *set = pc->pc_set; ptlrpcd_check() local 271 if (atomic_read(&set->set_new_count)) { ptlrpcd_check() 272 spin_lock(&set->set_new_req_lock); ptlrpcd_check() 273 if (likely(!list_empty(&set->set_new_requests))) { ptlrpcd_check() 274 list_splice_init(&set->set_new_requests, ptlrpcd_check() 275 &set->set_requests); ptlrpcd_check() 276 atomic_add(atomic_read(&set->set_new_count), ptlrpcd_check() 277 &set->set_remaining); ptlrpcd_check() 278 atomic_set(&set->set_new_count, 0); ptlrpcd_check() 284 spin_unlock(&set->set_new_req_lock); ptlrpcd_check() 295 * interpreters assume that env is set up), nor repeat ptlrpcd_check() 306 if (atomic_read(&set->set_remaining)) ptlrpcd_check() 307 rc |= ptlrpc_check_set(env, set); ptlrpcd_check() 311 list_for_each_safe(pos, tmp, &set->set_requests) { ptlrpcd_check() 325 rc = atomic_read(&set->set_new_count); ptlrpcd_check() 352 rc = ptlrpcd_steal_rqset(set, ps); ptlrpcd_check() 369 * thread which spins on a set which contains the rpcs and sends them. 375 struct ptlrpc_request_set *set = pc->pc_set; ptlrpcd() local 396 * the future, ptlrpcd thread (or a thread-set) has to given ptlrpcd() 408 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when ptlrpcd() 409 * there are requests in the set. New requests come in on the set's ptlrpcd() 410 * new_req_list and ptlrpcd_check() moves them into the set. ptlrpcd() 416 timeout = ptlrpc_set_next_timeout(set); ptlrpcd() 418 ptlrpc_expired_set, set); ptlrpcd() 421 l_wait_event(set->set_waitq, ptlrpcd() 430 ptlrpc_abort_set(set); ptlrpcd() 436 * copied all raced new rpcs into the set so we can kill them. ptlrpcd() 443 if (!list_empty(&set->set_requests)) ptlrpcd() 444 ptlrpc_set_wait(set); ptlrpcd() 619 * ptlrpcd thread (or a thread-set) has to be given an argument, ptlrpcd_start() 649 struct ptlrpc_request_set *set = pc->pc_set; ptlrpcd_start() local 654 ptlrpc_set_destroy(set); ptlrpcd_start() 678 struct ptlrpc_request_set *set = pc->pc_set; ptlrpcd_free() local 691 ptlrpc_set_destroy(set); ptlrpcd_free() 757 * RPCs processed in the same set under some cases. The ptlrpcd may ptlrpcd_init()
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
H A D | lustre_ver.h | 18 * by this amount (set in lustre/autoconf/lustre-version.ac). */ 23 * (set in lustre/autoconf/lustre-version.ac) */
|
/linux-4.1.27/drivers/pcmcia/ |
H A D | m32r_pcc.h | 52 #define PCMOD_CBSZ (1UL<<(31-23)) /* set for 8bit */ 54 #define PCMOD_DBEX (1UL<<(31-31)) /* set for excahnge */
|
/linux-4.1.27/arch/mips/bcm63xx/ |
H A D | clk.c | 20 void (*set)(struct clk *, int); member in struct:clk 31 if (clk->set && (clk->usage++) == 0) clk_enable_unlocked() 32 clk->set(clk, 1); clk_enable_unlocked() 37 if (clk->set && (--clk->usage) == 0) clk_disable_unlocked() 38 clk->set(clk, 0); clk_disable_unlocked() 73 .set = enet_misc_set, 100 .set = enetx_set, 105 .set = enetx_set, 119 .set = ephy_set, 149 .set = enetsw_set, 164 .set = pcm_set, 183 .set = usbh_set, 200 .set = usbd_set, 225 .set = spi_set, 246 .set = hsspi_set, 272 .set = xtm_set, 287 .set = ipsec_set, 303 .set = pcie_set,
|
/linux-4.1.27/drivers/media/dvb-frontends/ |
H A D | m88ds3103.h | 25 * Default: none, must set 32 * Default: none, must set 39 * Default: none, must set 95 * Default: none, must set 102 * 1: pin high set to VOLTAGE_13, pin low to set VOLTAGE_18. 103 * 0: pin high set to VOLTAGE_18, pin low to set VOLTAGE_13.
|
/linux-4.1.27/arch/sh/include/mach-x3proto/mach/ |
H A D | ilsel.h | 41 int ilsel_enable(ilsel_source_t set); 42 int ilsel_enable_fixed(ilsel_source_t set, unsigned int level);
|
/linux-4.1.27/arch/sh/kernel/cpu/sh3/ |
H A D | serial-sh770x.c | 13 /* We need to set SCPCR to enable RTS/CTS */ sh770x_sci_init_pins() 19 /* We need to set SCPCR to enable RTS/CTS */ sh770x_sci_init_pins()
|
/linux-4.1.27/arch/arm/include/uapi/asm/ |
H A D | hwcap.h | 21 #define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */ 26 #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
|
/linux-4.1.27/drivers/video/console/ |
H A D | newport_con.c | 75 npregs->set.wrmask = 0xffffffff; newport_render_background() 76 npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | newport_render_background() 79 npregs->set.colori = ci; newport_render_background() 80 npregs->set.xystarti = newport_render_background() 120 npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | newport_show_logo() 123 npregs->set.xystarti = ((newport_xsize - logo->width) << 16) | (0); newport_show_logo() 124 npregs->set.xyendi = ((newport_xsize - 1) << 16); newport_show_logo() 141 npregs->set.wrmask = 0xffffffff; newport_clear_screen() 142 npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | newport_clear_screen() 145 npregs->set.colori = ci; newport_clear_screen() 146 npregs->set.xystarti = (xstart << 16) | ystart; newport_clear_screen() 169 npregs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_RAM | newport_reset() 174 npregs->set.dcbdata0.byshort.s1 = 0xff00; newport_reset() 176 npregs->set.dcbdata0.byshort.s1 = 0x0000; newport_reset() 182 npregs->set.dcbmode = (DCB_XMAP0 | R_DCB_XMAP9_PROTOCOL | newport_reset() 184 npregs->set.dcbdata0.bybytes.b3 &= ~XM9_PUPMODE; newport_reset() 185 npregs->set.dcbmode = (DCB_XMAP1 | R_DCB_XMAP9_PROTOCOL | newport_reset() 187 npregs->set.dcbdata0.bybytes.b3 &= ~XM9_PUPMODE; newport_reset() 209 npregs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_RAM | newport_get_screensize() 213 linetable[i] = npregs->set.dcbdata0.byshort.s1; newport_get_screensize() 220 npregs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_RAM | newport_get_screensize() 224 treg = npregs->set.dcbdata0.byshort.s1; newport_get_screensize() 229 treg = npregs->set.dcbdata0.byshort.s1; newport_get_screensize() 254 npregs->set.dcbmode = (DCB_CMAP0 | NCMAP_PROTOCOL | newport_get_revisions() 256 tmp = npregs->set.dcbdata0.bybytes.b3; newport_get_revisions() 261 npregs->set.dcbmode = (DCB_CMAP1 | NCMAP_PROTOCOL | newport_get_revisions() 263 tmp = npregs->set.dcbdata0.bybytes.b3; newport_get_revisions() 269 npregs->set.dcbmode = (DCB_XMAP0 | R_DCB_XMAP9_PROTOCOL | newport_get_revisions() 271 xmap9_rev = npregs->set.dcbdata0.bybytes.b3 & 7; newport_get_revisions() 273 npregs->set.dcbmode = (DCB_BT445 | BT445_PROTOCOL | newport_get_revisions() 275 npregs->set.dcbdata0.bybytes.b3 = BT445_REVISION_REG; newport_get_revisions() 276 npregs->set.dcbmode = (DCB_BT445 | BT445_PROTOCOL | newport_get_revisions() 278 bt445_rev = (npregs->set.dcbdata0.bybytes.b3 >> 4) - 0x0a; newport_get_revisions() 310 npregs->set.xstarti = TESTVAL; newport_startup() 311 if (npregs->set._xstart.word != XSTI_TO_FXSTART(TESTVAL)) newport_startup() 386 npregs->set.colori = charattr & 0xf; newport_putc() 387 npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | newport_putc() 392 npregs->set.xystarti = (xpos << 16) | ((ypos + topscan) & 0x3ff); newport_putc() 393 npregs->set.xyendi = ((xpos + 7) << 16); newport_putc() 421 npregs->set.colori = charattr & 0xf; newport_putcs() 422 npregs->set.drawmode0 = (NPORT_DMODE0_DRAW | NPORT_DMODE0_BLOCK | newport_putcs() 432 npregs->set.xystarti = newport_putcs() 434 npregs->set.xyendi = ((xpos + 7) << 16); newport_putcs() 709 npregs->set.drawmode0 = (NPORT_DMODE0_S2S | NPORT_DMODE0_BLOCK | newport_bmove() 712 npregs->set.xystarti = (xs << 16) | ys; newport_bmove() 713 npregs->set.xyendi = (xe << 16) | ye; newport_bmove()
|
/linux-4.1.27/include/net/netfilter/ |
H A D | nf_tables.h | 59 * struct nft_regs - nf_tables register set 87 * struct nft_ctx - nf_tables rule/set context 156 * struct nft_set_elem - generic representation of set elements 175 const struct nft_set *set, 181 * struct nft_set_desc - description of set elements 185 * @size: number of set elements 222 * struct nft_set_ops - nf_tables set operations 224 * @lookup: look up an element within the set 225 * @insert: insert new element into set 228 * @remove: remove element from set 229 * @walk: iterate over all set elemeennts 230 * @privsize: function to return size of set private data 231 * @init: initialize private data of new set instance 232 * @destroy: destroy private data of set instance 239 bool (*lookup)(const struct nft_set *set, 242 bool (*update)(struct nft_set *set, 251 int (*insert)(const struct nft_set *set, 253 void (*activate)(const struct nft_set *set, 255 void * (*deactivate)(const struct nft_set *set, 257 void (*remove)(const struct nft_set *set, 260 const struct nft_set *set, 267 int (*init)(const struct nft_set *set, 270 void (*destroy)(const struct nft_set *set); 282 * struct nft_set - nf_tables set instance 284 * @list: table set list node 285 * @bindings: list of set bindings 286 * @name: name of the set 289 * @size: maximum set size 294 * @policy: set parameterization (see enum nft_set_policies) 295 * @ops: set ops 297 * @flags: set flags 300 * @data: private set data 324 static inline void *nft_set_priv(const struct nft_set *set) nft_set_priv() argument 326 return (void *)set->data; nft_set_priv() 339 static inline unsigned long nft_set_gc_interval(const struct nft_set *set) nft_set_gc_interval() argument 341 return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ; nft_set_gc_interval() 345 * struct nft_set_binding - nf_tables set binding 347 * @list: set bindings list node 348 * @chain: chain containing the rule bound to the set 349 * @flags: set action flags 351 * A set binding contains all information necessary for validation 352 * of new elements added to a bound set. 360 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, 362 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 366 * enum nft_set_extensions - set extension type IDs 389 * struct nft_set_ext_type - set extension type 402 * struct nft_set_ext_tmpl - set extension template 413 * struct nft_set_ext - set extensions 507 static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set, nft_set_elem_ext() argument 510 return elem + set->ops->elemsize; nft_set_elem_ext() 513 void *nft_set_elem_init(const struct nft_set *set, 517 void nft_set_elem_destroy(const struct nft_set *set, void *elem); 520 * struct nft_set_gc_batch_head - nf_tables set garbage collection batch 523 * @set: set the elements belong to 528 const struct nft_set *set; member in struct:nft_set_gc_batch_head 537 * struct nft_set_gc_batch - nf_tables set garbage collection batch 547 struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set, 558 nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb, nft_set_gc_batch_check() argument 566 return nft_set_gc_batch_alloc(set, gfp); nft_set_gc_batch_check() 882 MODULE_ALIAS("nft-set") 887 * they're active in. A set bit means they're inactive in the generation 893 * it is set inactive in the next generation. After committing the ruleset, 924 static inline void nft_set_elem_change_active(const struct nft_set *set, nft_set_elem_change_active() argument 927 ext->genmask ^= nft_genmask_next(read_pnet(&set->pnet)); nft_set_elem_change_active() 988 struct nft_set *set; member in struct:nft_trans_set 993 (((struct nft_trans_set *)trans->data)->set) 1024 struct nft_set *set; member in struct:nft_trans_elem 1029 (((struct nft_trans_elem *)trans->data)->set)
|
/linux-4.1.27/drivers/clk/ |
H A D | clk-gate.c | 35 * set2dis = 1 -> clear bit -> set = 0 36 * set2dis = 0 -> set bit -> set = 1 39 * set2dis = 1 -> set bit -> set = 1 40 * set2dis = 0 -> clear bit -> set = 0 47 int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; clk_gate_endisable() local 51 set ^= enable; clk_gate_endisable() 58 if (set) clk_gate_endisable() 63 if (set) clk_gate_endisable() 94 /* if a set bit disables this clk, flip it before masking */ clk_gate_is_enabled()
|
/linux-4.1.27/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_ldu.c | 84 * that the guest will set the same layout as the host. vmw_ldu_commit_list() 210 static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) vmw_ldu_crtc_set_config() argument 221 if (!set) vmw_ldu_crtc_set_config() 224 if (!set->crtc) vmw_ldu_crtc_set_config() 228 crtc = set->crtc; vmw_ldu_crtc_set_config() 230 vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; vmw_ldu_crtc_set_config() 233 if (set->num_connectors > 1) { vmw_ldu_crtc_set_config() 238 if (set->num_connectors == 1 && vmw_ldu_crtc_set_config() 239 set->connectors[0] != &ldu->base.connector) { vmw_ldu_crtc_set_config() 241 set->connectors[0], &ldu->base.connector); vmw_ldu_crtc_set_config() 259 if (set->num_connectors == 0 || !set->mode || !set->fb) { vmw_ldu_crtc_set_config() 272 /* we now know we want to set a mode */ vmw_ldu_crtc_set_config() 273 mode = set->mode; vmw_ldu_crtc_set_config() 274 fb = set->fb; vmw_ldu_crtc_set_config() 276 if (set->x + mode->hdisplay > fb->width || vmw_ldu_crtc_set_config() 277 set->y + mode->vdisplay > fb->height) { vmw_ldu_crtc_set_config() 278 DRM_ERROR("set outside of framebuffer\n"); vmw_ldu_crtc_set_config() 287 crtc->x = set->x; vmw_ldu_crtc_set_config() 288 crtc->y = set->y; vmw_ldu_crtc_set_config()
|
/linux-4.1.27/drivers/net/wireless/ath/ath10k/ |
H A D | rx_desc.h | 63 * and last_mpdu are set in the MSDU then this is a not an 65 * A-MPDU shall have both first_mpdu and last_mpdu bits set to 67 * is set. 71 * PPDU end status will only be valid when this bit is set. 74 * Multicast / broadcast indicator. Only set when the MAC 75 * address 1 bit 0 is set indicating mcast/bcast and the BSSID 76 * matches one of the 4 BSSID registers. Only set when 77 * first_msdu is set. 81 * count. Only set when first_msdu is set. 85 * timeout. Only set when first_msdu is set. 88 * Power management bit set in the 802.11 header. Only set 89 * when first_msdu is set. 92 * Set if packet is not a non-QoS data frame. Only set when 93 * first_msdu is set. 97 * data format. Only set when first_msdu is set. 100 * Set if packet is a management packet. Only set when 101 * first_msdu is set. 104 * Set if packet is a control packet. Only set when first_msdu 105 * is set. 108 * Set if more bit in frame control is set. Only set when 109 * first_msdu is set. 113 * control field is set. Only set when first_msdu is set. 121 * set when either the more_frag bit is set in the frame 122 * control or the fragment number is not zero. Only set when 123 * first_msdu is set. 126 * Set if the order bit in the frame control is set. Only set 127 * when first_msdu is set. 183 * fragmented. If set the FW should look at the rx_frag_info 201 * If set indicates that the RX packet data, RX header data, RX 277 * when first_msdu is set. 280 * Set if the from DS bit is set in the frame control. Only 281 * valid when first_msdu is set. 284 * Set if the to DS bit is set in the frame control. Only 285 * valid when first_msdu is set. 289 * first_msdu is set. 293 * first_msdu is set. 301 * first_msdu is set. 314 * Only valid when first_msdu_is set 328 * Only valid when first_msdu is set. 382 * set. 388 * ML-MIMO is used. Only valid when last_mpdu is set. 467 * ipv6_proto is set. 475 * Only valid if tcp_prot or udp_prot is set. The value 0 512 * Set if the ipv4_proto or ipv6_proto are set and the IP 516 * Set if the ipv4_proto or ipv6_proto are set and the IP 520 * Indicates that either the IP More frag bit is set or IP frag 521 * number is non-zero. If set indicates that this is a 525 * Set if only the TCP Ack bit is set in the TCP flags and if 564 * set. 587 * first_msdu is set. This field is taken directly from the 593 * last_msdu are set in the MSDU then this is a non-aggregated 595 * have both first_mpdu and last_mpdu bits set to 0. 599 * only valid when last_msdu is set. 606 * valid when first_mpdu and first_msdu are set. 779 * Legacy signal rate select. If set then l_sig_rate indicates 1016 * PPDU end status is only valid when ppdu_done bit is set.
|
/linux-4.1.27/drivers/media/platform/xilinx/ |
H A D | xilinx-vip.c | 147 * format are set to the given media bus format. The new format size is stored 161 * xvip_clr_or_set - Clear or set the register with a bitmask 164 * @mask: bitmask to be set or cleared 165 * @set: boolean flag indicating whether to set or clear 167 * Clear or set the register at address @addr with a bitmask @mask depending on 168 * the boolean flag @set. When the flag @set is true, the bitmask is set in 170 * when the flag @set is false. 172 * Fox eample, this function can be used to set a control with a boolean value 173 * requested by users. If the caller knows whether to set or clear in the first 177 void xvip_clr_or_set(struct xvip_device *xvip, u32 addr, u32 mask, bool set) xvip_clr_or_set() argument 182 reg = set ? reg | mask : reg & ~mask; xvip_clr_or_set() 188 * xvip_clr_and_set - Clear and set the register with a bitmask 192 * @set: bitmask to be set 194 * Clear a bit(s) of mask @clr in the register at address @addr, then set 195 * a bit(s) of mask @set in the register after. 197 void xvip_clr_and_set(struct xvip_device *xvip, u32 addr, u32 clr, u32 set) xvip_clr_and_set() argument 203 reg |= set; xvip_clr_and_set()
|
/linux-4.1.27/net/llc/ |
H A D | llc_pdu.c | 28 * @pdu_frame: input frame that p/f bit must be set into it. 85 * @p_bit: The P bit to set in the PDU 101 * @p_bit: The P bit to set in the PDU 114 pdu->ctrl_1 |= (ns << 1) & 0xFE; /* set N(S) in bits 2..8 */ llc_pdu_init_as_i_cmd() 115 pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ llc_pdu_init_as_i_cmd() 121 * @p_bit: The P bit to set in the PDU 135 pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ llc_pdu_init_as_rej_cmd() 141 * @p_bit: The P bit to set in the PDU 155 pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ llc_pdu_init_as_rnr_cmd() 161 * @p_bit: The P bit to set in the PDU 174 pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ llc_pdu_init_as_rr_cmd() 180 * @p_bit: The P bit to set in the PDU 196 * @f_bit: The F bit to set in the PDU 213 * @f_bit: The F bit to set in the PDU 250 * @f_bit: The F bit to set in the PDU 264 pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ llc_pdu_init_as_rr_rsp() 270 * @f_bit: The F bit to set in the PDU 284 pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ llc_pdu_init_as_rej_rsp() 290 * @f_bit: The F bit to set in the PDU 304 pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */ llc_pdu_init_as_rnr_rsp() 310 * @f_bit: The F bit to set in the PDU
|
/linux-4.1.27/drivers/scsi/isci/ |
H A D | scu_task_context.h | 399 u32 control; /* /< must be set to 0 */ 506 * for the task context. This is set to 0 by the driver but can be read by 548 * This field must be set to true if this is an initiator generated request. 554 * This field must be set to one of the valid connection rates valid values 571 * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values 580 * This filed must be set to the TCi allocated for this task 585 * This field is reserved and must be set to 0x00 590 * For a normal task request this must be set to 0. If this is an abort of 591 * this task request it must be set to 1. 596 * This field must be set to true for the SCU hardware to process the task. 601 * This field must be set to SCU_TASK_CONTEXT_TYPE 634 * This field is set to true if the remote node should be suspended. 663 * This field is reserved and the must be set to 0x00 668 * This field is set to true when TLR is to be enabled 685 * This field is set to true when strict ordering is to be enabled 692 * set to 1. 697 * This field is reserved and the driver should set to 0x00 702 * This field is set to true when the SCU hardware task timeout control is to 708 * This field is reserved and the driver should set it to 0x00 713 * This field should be set to true when block guard is to be enabled 718 * This field is reserved and the driver should set to 0x00 741 * This field is reserved and the driver must set it to 0x00 746 * This field must be set to true if the mirrored request processing is to be 758 * This is the target TLR enable bit it must be set to 0 when creatning the 764 * This field is reserved and the driver must set it to 0x00 777 * This filed is set to the number of bytes to be transfered in the request. 782 * This field is reserved and the driver should set it to 0x00 820 * This field is set to the task phase of the SCU hardware. The driver must 821 * set this to 0x01 826 * This field is set to the transport layer task status. The driver must set 837 * This field is set the maximum number of retries for a STP non-data FIS 842 * This field is reserved and the driver must set it to 0x00 858 * This field is reserved and the driver must set it to 0x00 863 u32 write_data_length; /* read only set to 0 */ 866 struct transport_snapshot snapshot; /* read only set to 0 */ 873 u32 active_sgl_element:2; /* read only set to 0 */ 874 u32 sgl_exhausted:1; /* read only set to 0 */ 875 u32 payload_data_transfer_error:4; /* read only set to 0 */ 876 u32 frame_buffer_offset:11; /* read only set to 0 */ 893 u32 active_sgl_element_pair; /* read only set to 0 */
|
/linux-4.1.27/arch/x86/oprofile/ |
H A D | op_counter.h | 15 /* Per-perfctr configuration as set via
|
/linux-4.1.27/include/uapi/asm-generic/ |
H A D | swab.h | 8 * set __SWAB_64_THRU_32__. In user space, this is only
|
/linux-4.1.27/include/uapi/linux/netfilter/ |
H A D | nfnetlink_acct.h | 20 NFACCT_F_OVERQUOTA = (1 << 2), /* can't be set from userspace */
|
H A D | nf_conntrack_common.h | 34 /* It's an expected connection: bit 0 set. This bit never changed */ 38 /* We've seen packets both ways: bit 1 set. Can be set, not unset. */ 104 IPCT_HELPER, /* new helper has been set */ 105 IPCT_MARK, /* new mark has been set */ 108 IPCT_SECMARK, /* new security mark has been set */ 109 IPCT_LABEL, /* new connlabel has been set */
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | efi.h | 43 * start of kernel and may not cross a 2MiB boundary. We set alignment to 54 * On ARM systems, virtually remapped UEFI runtime services are set up in two 62 * into a private set of page tables. If this all succeeds, the Runtime 63 * Services are enabled and the EFI_RUNTIME_SERVICES bit set.
|
H A D | signal32.h | 27 int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, 29 int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 36 sigset_t *set, struct pt_regs *regs) compat_setup_frame() 41 static inline int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, compat_setup_rt_frame() argument 35 compat_setup_frame(int usid, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) compat_setup_frame() argument
|
/linux-4.1.27/arch/blackfin/mach-bf609/include/mach/ |
H A D | cdefBF609.h | 10 /* include cdefBF60x_base.h for the set of #defines that are common to all ADSP-BF60x bfin_read_()rocessors */
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | shmparam.h | 8 * To reduce the chance of this, we set SHMLBA to 1MB. --davidm 00/12/20
|
/linux-4.1.27/arch/m68k/sun3x/ |
H A D | time.h | 4 extern int sun3x_hwclk(int set, struct rtc_time *t);
|
/linux-4.1.27/arch/arm/plat-versatile/include/plat/ |
H A D | clock.h | 8 int (*set)(struct clk *, unsigned long); member in struct:clk_ops
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | hwcap.h | 9 * instruction set this cpu supports.
|
/linux-4.1.27/arch/arm/mach-omap2/ |
H A D | clock44xx.h | 13 * set, then the DPLL's lock frequency is multiplied by 4 (OMAP4430 TRM
|
/linux-4.1.27/tools/lib/util/ |
H A D | find_next_bit.c | 22 * Find the next set bit in a memory region. 57 if (tmp == 0UL) /* Are any bits set? */ find_next_bit() 66 * Find the first set bit in a memory region. 84 if (tmp == 0UL) /* Are any bits set? */ find_first_bit()
|
/linux-4.1.27/tools/perf/bench/ |
H A D | mem-memcpy-x86-64-asm.S | 8 * the ELF stack should not be restricted at all and set it RWX.
|
H A D | mem-memset-x86-64-asm.S | 9 * the ELF stack should not be restricted at all and set it RWX.
|
/linux-4.1.27/include/linux/amba/ |
H A D | pl061.h | 10 * If the IRQ functionality in not desired this must be set to 0.
|
/linux-4.1.27/include/linux/crush/ |
H A D | mapper.h | 6 * output set.
|
/linux-4.1.27/arch/metag/lib/ |
H A D | divsi3.S | 13 !! Since core is signed divide case, just set control variable 60 !! D0Re0 is used to form the result, already set to Zero 62 !! D0Ar6 is curbit which is set to 1 at the start and shifted up 75 ORN D1Ar5,D1Ar5,#31 ! if N bit set, set to 31 78 ORN D1Ar3,D1Ar3,#31 ! if N bit set, set to 31
|
/linux-4.1.27/arch/frv/kernel/ |
H A D | futex.c | 26 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ atomic_futex_op_xchg_set() 29 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ atomic_futex_op_xchg_set() 58 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ atomic_futex_op_xchg_add() 61 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ atomic_futex_op_xchg_add() 91 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ atomic_futex_op_xchg_or() 94 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ atomic_futex_op_xchg_or() 124 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ atomic_futex_op_xchg_and() 127 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ atomic_futex_op_xchg_and() 157 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ atomic_futex_op_xchg_xor() 160 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ atomic_futex_op_xchg_xor()
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmsmac/ |
H A D | brcms_trace_brcmsmac.h | 44 __field(uint, set) 49 __entry->set = t->set; 53 "ms=%u set=%u periodic=%u", 54 __entry->ms, __entry->set, __entry->periodic
|