Searched refs:tb (Results 1 - 200 of 338) sorted by relevance

12

/linux-4.1.27/fs/reiserfs/
H A Ddo_balan.c19 static inline void buffer_info_init_left(struct tree_balance *tb, buffer_info_init_left() argument
22 bi->tb = tb; buffer_info_init_left()
23 bi->bi_bh = tb->L[0]; buffer_info_init_left()
24 bi->bi_parent = tb->FL[0]; buffer_info_init_left()
25 bi->bi_position = get_left_neighbor_position(tb, 0); buffer_info_init_left()
28 static inline void buffer_info_init_right(struct tree_balance *tb, buffer_info_init_right() argument
31 bi->tb = tb; buffer_info_init_right()
32 bi->bi_bh = tb->R[0]; buffer_info_init_right()
33 bi->bi_parent = tb->FR[0]; buffer_info_init_right()
34 bi->bi_position = get_right_neighbor_position(tb, 0); buffer_info_init_right()
37 static inline void buffer_info_init_tbS0(struct tree_balance *tb, buffer_info_init_tbS0() argument
40 bi->tb = tb; buffer_info_init_tbS0()
41 bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path); buffer_info_init_tbS0()
42 bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0); buffer_info_init_tbS0()
43 bi->bi_position = PATH_H_POSITION(tb->tb_path, 1); buffer_info_init_tbS0()
46 static inline void buffer_info_init_bh(struct tree_balance *tb, buffer_info_init_bh() argument
50 bi->tb = tb; buffer_info_init_bh()
56 inline void do_balance_mark_leaf_dirty(struct tree_balance *tb, do_balance_mark_leaf_dirty() argument
59 journal_mark_dirty(tb->transaction_handle, bh); do_balance_mark_leaf_dirty()
67 * if deleting something ( tb->insert_size[0] < 0 )
77 static void balance_leaf_when_delete_del(struct tree_balance *tb) balance_leaf_when_delete_del() argument
79 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_when_delete_del()
80 int item_pos = PATH_LAST_POSITION(tb->tb_path); balance_leaf_when_delete_del()
86 RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0], balance_leaf_when_delete_del()
88 -tb->insert_size[0], ih); balance_leaf_when_delete_del()
90 buffer_info_init_tbS0(tb, &bi); balance_leaf_when_delete_del()
93 if (!item_pos && tb->CFL[0]) { balance_leaf_when_delete_del()
95 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); balance_leaf_when_delete_del()
97 if (!PATH_H_POSITION(tb->tb_path, 1)) balance_leaf_when_delete_del()
98 replace_key(tb, tb->CFL[0], tb->lkey[0], balance_leaf_when_delete_del()
99 PATH_H_PPARENT(tb->tb_path, 0), 0); balance_leaf_when_delete_del()
103 RFALSE(!item_pos && !tb->CFL[0], balance_leaf_when_delete_del()
104 "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0], balance_leaf_when_delete_del()
105 tb->L[0]); balance_leaf_when_delete_del()
109 static void balance_leaf_when_delete_cut(struct tree_balance *tb) balance_leaf_when_delete_cut() argument
111 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_when_delete_cut()
112 int item_pos = PATH_LAST_POSITION(tb->tb_path); balance_leaf_when_delete_cut()
114 int pos_in_item = tb->tb_path->pos_in_item; balance_leaf_when_delete_cut()
116 buffer_info_init_tbS0(tb, &bi); balance_leaf_when_delete_cut()
123 * when we cut a directory tb->insert_size[0] means balance_leaf_when_delete_cut()
126 tb->insert_size[0] = -1; balance_leaf_when_delete_cut()
128 -tb->insert_size[0]); balance_leaf_when_delete_cut()
130 RFALSE(!item_pos && !pos_in_item && !tb->CFL[0], balance_leaf_when_delete_cut()
132 tb->CFL[0]); balance_leaf_when_delete_cut()
134 if (!item_pos && !pos_in_item && tb->CFL[0]) balance_leaf_when_delete_cut()
135 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); balance_leaf_when_delete_cut()
138 -tb->insert_size[0]); balance_leaf_when_delete_cut()
146 static int balance_leaf_when_delete_left(struct tree_balance *tb) balance_leaf_when_delete_left() argument
148 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_when_delete_left()
152 if (tb->lnum[0] == -1) { balance_leaf_when_delete_left()
154 if (tb->rnum[0] == -1) { balance_leaf_when_delete_left()
155 if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) { balance_leaf_when_delete_left()
160 if (PATH_H_POSITION(tb->tb_path, 1) == 0 && balance_leaf_when_delete_left()
161 1 < B_NR_ITEMS(tb->FR[0])) balance_leaf_when_delete_left()
162 replace_key(tb, tb->CFL[0], balance_leaf_when_delete_left()
163 tb->lkey[0], tb->FR[0], 1); balance_leaf_when_delete_left()
165 leaf_move_items(LEAF_FROM_S_TO_L, tb, n, -1, balance_leaf_when_delete_left()
167 leaf_move_items(LEAF_FROM_R_TO_L, tb, balance_leaf_when_delete_left()
168 B_NR_ITEMS(tb->R[0]), -1, balance_leaf_when_delete_left()
171 reiserfs_invalidate_buffer(tb, tbS0); balance_leaf_when_delete_left()
172 reiserfs_invalidate_buffer(tb, tb->R[0]); balance_leaf_when_delete_left()
178 leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, NULL); balance_leaf_when_delete_left()
179 leaf_move_items(LEAF_FROM_L_TO_R, tb, balance_leaf_when_delete_left()
180 B_NR_ITEMS(tb->L[0]), -1, NULL); balance_leaf_when_delete_left()
183 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); balance_leaf_when_delete_left()
185 reiserfs_invalidate_buffer(tb, tbS0); balance_leaf_when_delete_left()
186 reiserfs_invalidate_buffer(tb, tb->L[0]); balance_leaf_when_delete_left()
191 RFALSE(tb->rnum[0] != 0, balance_leaf_when_delete_left()
192 "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]); balance_leaf_when_delete_left()
194 leaf_shift_left(tb, n, -1); balance_leaf_when_delete_left()
196 reiserfs_invalidate_buffer(tb, tbS0); balance_leaf_when_delete_left()
206 RFALSE((tb->lnum[0] + tb->rnum[0] < n) || balance_leaf_when_delete_left()
207 (tb->lnum[0] + tb->rnum[0] > n + 1), balance_leaf_when_delete_left()
210 tb->rnum[0], tb->lnum[0], n); balance_leaf_when_delete_left()
211 RFALSE((tb->lnum[0] + tb->rnum[0] == n) && balance_leaf_when_delete_left()
212 (tb->lbytes != -1 || tb->rbytes != -1), balance_leaf_when_delete_left()
215 tb->rbytes, tb->lbytes); balance_leaf_when_delete_left()
216 RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) && balance_leaf_when_delete_left()
217 (tb->lbytes < 1 || tb->rbytes != -1), balance_leaf_when_delete_left()
220 tb->rbytes, tb->lbytes); balance_leaf_when_delete_left()
222 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); balance_leaf_when_delete_left()
223 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); balance_leaf_when_delete_left()
225 reiserfs_invalidate_buffer(tb, tbS0); balance_leaf_when_delete_left()
239 static int balance_leaf_when_delete(struct tree_balance *tb, int flag) balance_leaf_when_delete() argument
241 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_when_delete()
242 int item_pos = PATH_LAST_POSITION(tb->tb_path); balance_leaf_when_delete()
247 RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1, balance_leaf_when_delete()
248 "vs- 12000: level: wrong FR %z", tb->FR[0]); balance_leaf_when_delete()
249 RFALSE(tb->blknum[0] > 1, balance_leaf_when_delete()
250 "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]); balance_leaf_when_delete()
251 RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0), balance_leaf_when_delete()
255 buffer_info_init_tbS0(tb, &bi); balance_leaf_when_delete()
261 balance_leaf_when_delete_del(tb); balance_leaf_when_delete()
263 balance_leaf_when_delete_cut(tb); balance_leaf_when_delete()
274 if (tb->lnum[0]) balance_leaf_when_delete()
275 return balance_leaf_when_delete_left(tb); balance_leaf_when_delete()
277 if (tb->rnum[0] == -1) { balance_leaf_when_delete()
279 leaf_shift_right(tb, n, -1); balance_leaf_when_delete()
280 reiserfs_invalidate_buffer(tb, tbS0); balance_leaf_when_delete()
284 RFALSE(tb->rnum[0], balance_leaf_when_delete()
285 "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]); balance_leaf_when_delete()
289 static unsigned int balance_leaf_insert_left(struct tree_balance *tb, balance_leaf_insert_left() argument
295 int n = B_NR_ITEMS(tb->L[0]); balance_leaf_insert_left()
298 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { balance_leaf_insert_left()
303 ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1); balance_leaf_insert_left()
306 new_item_len = ih_item_len(ih) - tb->lbytes; balance_leaf_insert_left()
316 buffer_info_init_left(tb, &bi); balance_leaf_insert_left()
317 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, balance_leaf_insert_left()
318 min_t(int, tb->zeroes_num, ih_item_len(ih))); balance_leaf_insert_left()
328 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; balance_leaf_insert_left()
330 add_le_ih_k_offset(ih, tb->lbytes << shift); balance_leaf_insert_left()
333 if (tb->lbytes > tb->zeroes_num) { balance_leaf_insert_left()
334 body_shift_bytes = tb->lbytes - tb->zeroes_num; balance_leaf_insert_left()
335 tb->zeroes_num = 0; balance_leaf_insert_left()
337 tb->zeroes_num -= tb->lbytes; balance_leaf_insert_left()
345 ret = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes); balance_leaf_insert_left()
348 buffer_info_init_left(tb, &bi); balance_leaf_insert_left()
349 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, balance_leaf_insert_left()
350 tb->zeroes_num); balance_leaf_insert_left()
351 tb->insert_size[0] = 0; balance_leaf_insert_left()
352 tb->zeroes_num = 0; balance_leaf_insert_left()
357 static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, balance_leaf_paste_left_shift_dirent() argument
361 int n = B_NR_ITEMS(tb->L[0]); balance_leaf_paste_left_shift_dirent()
364 RFALSE(tb->zeroes_num, balance_leaf_paste_left_shift_dirent()
368 if (tb->lbytes > tb->pos_in_item) { balance_leaf_paste_left_shift_dirent()
371 int ret, l_pos_in_item = tb->pos_in_item; balance_leaf_paste_left_shift_dirent()
377 ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1); balance_leaf_paste_left_shift_dirent()
378 if (ret && !tb->item_pos) { balance_leaf_paste_left_shift_dirent()
379 pasted = item_head(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1); balance_leaf_paste_left_shift_dirent()
381 (tb->lbytes - 1); balance_leaf_paste_left_shift_dirent()
385 buffer_info_init_left(tb, &bi); balance_leaf_paste_left_shift_dirent()
386 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, balance_leaf_paste_left_shift_dirent()
387 l_pos_in_item, tb->insert_size[0], balance_leaf_paste_left_shift_dirent()
388 body, tb->zeroes_num); balance_leaf_paste_left_shift_dirent()
401 leaf_paste_entries(&bi, n + tb->item_pos - ret, balance_leaf_paste_left_shift_dirent()
404 body + DEH_SIZE, tb->insert_size[0]); balance_leaf_paste_left_shift_dirent()
405 tb->insert_size[0] = 0; balance_leaf_paste_left_shift_dirent()
412 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); balance_leaf_paste_left_shift_dirent()
416 tb->pos_in_item -= tb->lbytes; balance_leaf_paste_left_shift_dirent()
419 static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb, balance_leaf_paste_left_shift() argument
423 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_paste_left_shift()
424 int n = B_NR_ITEMS(tb->L[0]); balance_leaf_paste_left_shift()
428 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { balance_leaf_paste_left_shift()
429 balance_leaf_paste_left_shift_dirent(tb, ih, body); balance_leaf_paste_left_shift()
433 RFALSE(tb->lbytes <= 0, balance_leaf_paste_left_shift()
435 "lbytes=%d", tb->lbytes); balance_leaf_paste_left_shift()
436 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)), balance_leaf_paste_left_shift()
439 ih_item_len(item_head(tbS0, tb->item_pos)), tb->pos_in_item); balance_leaf_paste_left_shift()
442 if (tb->lbytes >= tb->pos_in_item) { balance_leaf_paste_left_shift()
448 tbS0_pos_ih = item_head(tbS0, tb->item_pos); balance_leaf_paste_left_shift()
455 l_n = tb->lbytes - tb->pos_in_item; balance_leaf_paste_left_shift()
458 tb->insert_size[0] -= l_n; balance_leaf_paste_left_shift()
460 RFALSE(tb->insert_size[0] <= 0, balance_leaf_paste_left_shift()
462 "L[0]. insert_size=%d", tb->insert_size[0]); balance_leaf_paste_left_shift()
464 ret = leaf_shift_left(tb, tb->lnum[0], balance_leaf_paste_left_shift()
467 tbL0_ih = item_head(tb->L[0], n + tb->item_pos - ret); balance_leaf_paste_left_shift()
470 buffer_info_init_left(tb, &bi); balance_leaf_paste_left_shift()
471 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, balance_leaf_paste_left_shift()
473 min_t(int, l_n, tb->zeroes_num)); balance_leaf_paste_left_shift()
484 leaf_key(tb->L[0], n + tb->item_pos - ret)), balance_leaf_paste_left_shift()
488 int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; balance_leaf_paste_left_shift()
496 left_delim_key = internal_key(tb->CFL[0], tb->lkey[0]); balance_leaf_paste_left_shift()
503 if (l_n > tb->zeroes_num) { balance_leaf_paste_left_shift()
504 body_shift_bytes = l_n - tb->zeroes_num; balance_leaf_paste_left_shift()
505 tb->zeroes_num = 0; balance_leaf_paste_left_shift()
507 tb->zeroes_num -= l_n; balance_leaf_paste_left_shift()
508 tb->pos_in_item = 0; balance_leaf_paste_left_shift()
511 leaf_key(tb->L[0], balance_leaf_paste_left_shift()
512 B_NR_ITEMS(tb->L[0]) - 1)) || balance_leaf_paste_left_shift()
521 tb->pos_in_item -= tb->lbytes; balance_leaf_paste_left_shift()
523 RFALSE(tb->pos_in_item <= 0, balance_leaf_paste_left_shift()
525 tb->pos_in_item); balance_leaf_paste_left_shift()
531 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); balance_leaf_paste_left_shift()
538 static void balance_leaf_paste_left_whole(struct tree_balance *tb, balance_leaf_paste_left_whole() argument
542 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_paste_left_whole()
543 int n = B_NR_ITEMS(tb->L[0]); balance_leaf_paste_left_whole()
549 if (!tb->item_pos && balance_leaf_paste_left_whole()
555 pasted = item_head(tb->L[0], n - 1); balance_leaf_paste_left_whole()
557 tb->pos_in_item += ih_entry_count(pasted); balance_leaf_paste_left_whole()
559 tb->pos_in_item += ih_item_len(pasted); balance_leaf_paste_left_whole()
566 ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes); balance_leaf_paste_left_whole()
569 buffer_info_init_left(tb, &bi); balance_leaf_paste_left_whole()
570 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, tb->pos_in_item, balance_leaf_paste_left_whole()
571 tb->insert_size[0], body, tb->zeroes_num); balance_leaf_paste_left_whole()
574 pasted = item_head(tb->L[0], n + tb->item_pos - ret); balance_leaf_paste_left_whole()
576 leaf_paste_entries(&bi, n + tb->item_pos - ret, balance_leaf_paste_left_whole()
577 tb->pos_in_item, 1, balance_leaf_paste_left_whole()
579 body + DEH_SIZE, tb->insert_size[0]); balance_leaf_paste_left_whole()
588 tb->insert_size[0] = 0; balance_leaf_paste_left_whole()
589 tb->zeroes_num = 0; balance_leaf_paste_left_whole()
592 static unsigned int balance_leaf_paste_left(struct tree_balance *tb, balance_leaf_paste_left() argument
597 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) balance_leaf_paste_left()
598 return balance_leaf_paste_left_shift(tb, ih, body); balance_leaf_paste_left()
600 balance_leaf_paste_left_whole(tb, ih, body); balance_leaf_paste_left()
605 static unsigned int balance_leaf_left(struct tree_balance *tb, balance_leaf_left() argument
609 if (tb->lnum[0] <= 0) balance_leaf_left()
613 if (tb->item_pos < tb->lnum[0]) { balance_leaf_left()
617 return balance_leaf_insert_left(tb, ih, body); balance_leaf_left()
619 return balance_leaf_paste_left(tb, ih, body); balance_leaf_left()
622 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); balance_leaf_left()
627 static void balance_leaf_insert_right(struct tree_balance *tb, balance_leaf_insert_right() argument
632 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_insert_right()
638 if (n - tb->rnum[0] >= tb->item_pos) { balance_leaf_insert_right()
639 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); balance_leaf_insert_right()
646 if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { balance_leaf_insert_right()
652 leaf_shift_right(tb, tb->rnum[0] - 1, -1); balance_leaf_insert_right()
666 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; balance_leaf_insert_right()
667 offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << shift); balance_leaf_insert_right()
669 put_ih_item_len(ih, tb->rbytes); balance_leaf_insert_right()
672 buffer_info_init_right(tb, &bi); balance_leaf_insert_right()
673 if ((old_len - tb->rbytes) > tb->zeroes_num) { balance_leaf_insert_right()
675 r_body = body + (old_len - tb->rbytes) - tb->zeroes_num; balance_leaf_insert_right()
678 r_zeroes_number = tb->zeroes_num - balance_leaf_insert_right()
679 (old_len - tb->rbytes); balance_leaf_insert_right()
680 tb->zeroes_num -= r_zeroes_number; balance_leaf_insert_right()
686 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); balance_leaf_insert_right()
693 put_ih_item_len(ih, old_len - tb->rbytes); balance_leaf_insert_right()
695 tb->insert_size[0] -= tb->rbytes; balance_leaf_insert_right()
701 ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes); balance_leaf_insert_right()
704 buffer_info_init_right(tb, &bi); balance_leaf_insert_right()
705 leaf_insert_into_buf(&bi, tb->item_pos - n + tb->rnum[0] - 1, balance_leaf_insert_right()
706 ih, body, tb->zeroes_num); balance_leaf_insert_right()
708 if (tb->item_pos - n + tb->rnum[0] - 1 == 0) balance_leaf_insert_right()
709 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); balance_leaf_insert_right()
711 tb->zeroes_num = tb->insert_size[0] = 0; balance_leaf_insert_right()
716 static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, balance_leaf_paste_right_shift_dirent() argument
720 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_paste_right_shift_dirent()
724 RFALSE(tb->zeroes_num, balance_leaf_paste_right_shift_dirent()
726 entry_count = ih_entry_count(item_head(tbS0, tb->item_pos)); balance_leaf_paste_right_shift_dirent()
729 if (entry_count - tb->rbytes < tb->pos_in_item) { balance_leaf_paste_right_shift_dirent()
732 RFALSE(tb->rbytes - 1 >= entry_count || !tb->insert_size[0], balance_leaf_paste_right_shift_dirent()
734 "rbytes=%d, entry_count=%d", tb->rbytes, entry_count); balance_leaf_paste_right_shift_dirent()
741 leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1); balance_leaf_paste_right_shift_dirent()
744 paste_entry_position = tb->pos_in_item - entry_count + balance_leaf_paste_right_shift_dirent()
745 tb->rbytes - 1; balance_leaf_paste_right_shift_dirent()
746 buffer_info_init_right(tb, &bi); balance_leaf_paste_right_shift_dirent()
748 tb->insert_size[0], body, tb->zeroes_num); balance_leaf_paste_right_shift_dirent()
753 body + DEH_SIZE, tb->insert_size[0]); balance_leaf_paste_right_shift_dirent()
757 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); balance_leaf_paste_right_shift_dirent()
759 tb->insert_size[0] = 0; balance_leaf_paste_right_shift_dirent()
760 tb->pos_in_item++; balance_leaf_paste_right_shift_dirent()
763 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); balance_leaf_paste_right_shift_dirent()
767 static void balance_leaf_paste_right_shift(struct tree_balance *tb, balance_leaf_paste_right_shift() argument
771 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_paste_right_shift()
778 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { balance_leaf_paste_right_shift()
779 balance_leaf_paste_right_shift_dirent(tb, ih, body); balance_leaf_paste_right_shift()
789 n_shift = tb->rbytes - tb->insert_size[0]; balance_leaf_paste_right_shift()
793 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)), balance_leaf_paste_right_shift()
795 "pos_in_item=%d", tb->pos_in_item, balance_leaf_paste_right_shift()
796 ih_item_len(item_head(tbS0, tb->item_pos))); balance_leaf_paste_right_shift()
798 leaf_shift_right(tb, tb->rnum[0], n_shift); balance_leaf_paste_right_shift()
804 n_rem = tb->insert_size[0] - tb->rbytes; balance_leaf_paste_right_shift()
810 version = ih_version(item_head(tb->R[0], 0)); balance_leaf_paste_right_shift()
812 if (is_indirect_le_key(version, leaf_key(tb->R[0], 0))) { balance_leaf_paste_right_shift()
813 int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; balance_leaf_paste_right_shift()
817 add_le_key_k_offset(version, leaf_key(tb->R[0], 0), temp_rem); balance_leaf_paste_right_shift()
818 add_le_key_k_offset(version, internal_key(tb->CFR[0], tb->rkey[0]), balance_leaf_paste_right_shift()
821 do_balance_mark_internal_dirty(tb, tb->CFR[0], 0); balance_leaf_paste_right_shift()
824 buffer_info_init_right(tb, &bi); balance_leaf_paste_right_shift()
825 if (n_rem > tb->zeroes_num) { balance_leaf_paste_right_shift()
827 r_body = body + n_rem - tb->zeroes_num; balance_leaf_paste_right_shift()
830 r_zeroes_number = tb->zeroes_num - n_rem; balance_leaf_paste_right_shift()
831 tb->zeroes_num -= r_zeroes_number; balance_leaf_paste_right_shift()
834 leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem, balance_leaf_paste_right_shift()
837 if (is_indirect_le_ih(item_head(tb->R[0], 0))) balance_leaf_paste_right_shift()
838 set_ih_free_space(item_head(tb->R[0], 0), 0); balance_leaf_paste_right_shift()
840 tb->insert_size[0] = n_rem; balance_leaf_paste_right_shift()
842 tb->pos_in_item++; balance_leaf_paste_right_shift()
845 static void balance_leaf_paste_right_whole(struct tree_balance *tb, balance_leaf_paste_right_whole() argument
849 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_paste_right_whole()
854 buffer_info_init_right(tb, &bi); balance_leaf_paste_right_whole()
855 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); balance_leaf_paste_right_whole()
858 if (tb->pos_in_item >= 0) { balance_leaf_paste_right_whole()
859 buffer_info_init_right(tb, &bi); balance_leaf_paste_right_whole()
860 leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->rnum[0], balance_leaf_paste_right_whole()
861 tb->pos_in_item, tb->insert_size[0], body, balance_leaf_paste_right_whole()
862 tb->zeroes_num); balance_leaf_paste_right_whole()
866 pasted = item_head(tb->R[0], tb->item_pos - n + tb->rnum[0]); balance_leaf_paste_right_whole()
867 if (is_direntry_le_ih(pasted) && tb->pos_in_item >= 0) { balance_leaf_paste_right_whole()
868 leaf_paste_entries(&bi, tb->item_pos - n + tb->rnum[0], balance_leaf_paste_right_whole()
869 tb->pos_in_item, 1, balance_leaf_paste_right_whole()
871 body + DEH_SIZE, tb->insert_size[0]); balance_leaf_paste_right_whole()
873 if (!tb->pos_in_item) { balance_leaf_paste_right_whole()
875 RFALSE(tb->item_pos - n + tb->rnum[0], balance_leaf_paste_right_whole()
880 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); balance_leaf_paste_right_whole()
886 tb->zeroes_num = tb->insert_size[0] = 0; balance_leaf_paste_right_whole()
889 static void balance_leaf_paste_right(struct tree_balance *tb, balance_leaf_paste_right() argument
893 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_paste_right()
897 if (n - tb->rnum[0] > tb->item_pos) { balance_leaf_paste_right()
898 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); balance_leaf_paste_right()
904 if (tb->item_pos == n - tb->rnum[0] && tb->rbytes != -1) balance_leaf_paste_right()
906 balance_leaf_paste_right_shift(tb, ih, body); balance_leaf_paste_right()
909 balance_leaf_paste_right_whole(tb, ih, body); balance_leaf_paste_right()
913 static void balance_leaf_right(struct tree_balance *tb, balance_leaf_right() argument
917 if (tb->rnum[0] <= 0) balance_leaf_right()
923 balance_leaf_insert_right(tb, ih, body); balance_leaf_right()
925 balance_leaf_paste_right(tb, ih, body); balance_leaf_right()
928 static void balance_leaf_new_nodes_insert(struct tree_balance *tb, balance_leaf_new_nodes_insert() argument
935 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_new_nodes_insert()
941 if (n - tb->snum[i] >= tb->item_pos) { balance_leaf_new_nodes_insert()
942 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, balance_leaf_new_nodes_insert()
943 tb->snum[i], tb->sbytes[i], tb->S_new[i]); balance_leaf_new_nodes_insert()
950 if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) { balance_leaf_new_nodes_insert()
956 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1, balance_leaf_new_nodes_insert()
957 tb->S_new[i]); balance_leaf_new_nodes_insert()
970 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; balance_leaf_new_nodes_insert()
973 ((old_len - tb->sbytes[i]) << shift)); balance_leaf_new_nodes_insert()
975 put_ih_item_len(ih, tb->sbytes[i]); balance_leaf_new_nodes_insert()
978 buffer_info_init_bh(tb, &bi, tb->S_new[i]); balance_leaf_new_nodes_insert()
980 if ((old_len - tb->sbytes[i]) > tb->zeroes_num) { balance_leaf_new_nodes_insert()
982 r_body = body + (old_len - tb->sbytes[i]) - balance_leaf_new_nodes_insert()
983 tb->zeroes_num; balance_leaf_new_nodes_insert()
986 r_zeroes_number = tb->zeroes_num - (old_len - balance_leaf_new_nodes_insert()
987 tb->sbytes[i]); balance_leaf_new_nodes_insert()
988 tb->zeroes_num -= r_zeroes_number; balance_leaf_new_nodes_insert()
998 put_ih_item_len(ih, old_len - tb->sbytes[i]); balance_leaf_new_nodes_insert()
999 tb->insert_size[0] -= tb->sbytes[i]; balance_leaf_new_nodes_insert()
1007 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, balance_leaf_new_nodes_insert()
1008 tb->snum[i] - 1, tb->sbytes[i], tb->S_new[i]); balance_leaf_new_nodes_insert()
1011 buffer_info_init_bh(tb, &bi, tb->S_new[i]); balance_leaf_new_nodes_insert()
1012 leaf_insert_into_buf(&bi, tb->item_pos - n + tb->snum[i] - 1, balance_leaf_new_nodes_insert()
1013 ih, body, tb->zeroes_num); balance_leaf_new_nodes_insert()
1015 tb->zeroes_num = tb->insert_size[0] = 0; balance_leaf_new_nodes_insert()
1020 static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, balance_leaf_new_nodes_paste_dirent() argument
1027 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_new_nodes_paste_dirent()
1028 struct item_head *aux_ih = item_head(tbS0, tb->item_pos); balance_leaf_new_nodes_paste_dirent()
1032 if (entry_count - tb->sbytes[i] < tb->pos_in_item && balance_leaf_new_nodes_paste_dirent()
1033 tb->pos_in_item <= entry_count) { balance_leaf_new_nodes_paste_dirent()
1036 RFALSE(!tb->insert_size[0], balance_leaf_new_nodes_paste_dirent()
1038 RFALSE(tb->sbytes[i] - 1 >= entry_count, balance_leaf_new_nodes_paste_dirent()
1040 tb->sbytes[i] - 1, entry_count); balance_leaf_new_nodes_paste_dirent()
1047 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], balance_leaf_new_nodes_paste_dirent()
1048 tb->sbytes[i] - 1, tb->S_new[i]); balance_leaf_new_nodes_paste_dirent()
1054 buffer_info_init_bh(tb, &bi, tb->S_new[i]); balance_leaf_new_nodes_paste_dirent()
1055 leaf_paste_in_buffer(&bi, 0, tb->pos_in_item - entry_count + balance_leaf_new_nodes_paste_dirent()
1056 tb->sbytes[i] - 1, tb->insert_size[0], balance_leaf_new_nodes_paste_dirent()
1057 body, tb->zeroes_num); balance_leaf_new_nodes_paste_dirent()
1060 leaf_paste_entries(&bi, 0, tb->pos_in_item - entry_count + balance_leaf_new_nodes_paste_dirent()
1061 tb->sbytes[i] - 1, 1, balance_leaf_new_nodes_paste_dirent()
1063 body + DEH_SIZE, tb->insert_size[0]); balance_leaf_new_nodes_paste_dirent()
1065 tb->insert_size[0] = 0; balance_leaf_new_nodes_paste_dirent()
1066 tb->pos_in_item++; balance_leaf_new_nodes_paste_dirent()
1069 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], balance_leaf_new_nodes_paste_dirent()
1070 tb->sbytes[i], tb->S_new[i]); balance_leaf_new_nodes_paste_dirent()
1075 static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, balance_leaf_new_nodes_paste_shift() argument
1082 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_new_nodes_paste_shift()
1083 struct item_head *aux_ih = item_head(tbS0, tb->item_pos); balance_leaf_new_nodes_paste_shift()
1092 balance_leaf_new_nodes_paste_dirent(tb, ih, body, insert_key, balance_leaf_new_nodes_paste_shift()
1100 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)) || balance_leaf_new_nodes_paste_shift()
1101 tb->insert_size[0] <= 0, balance_leaf_new_nodes_paste_shift()
1107 n_shift = tb->sbytes[i] - tb->insert_size[0]; balance_leaf_new_nodes_paste_shift()
1110 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], n_shift, balance_leaf_new_nodes_paste_shift()
1111 tb->S_new[i]); balance_leaf_new_nodes_paste_shift()
1117 n_rem = tb->insert_size[0] - tb->sbytes[i]; balance_leaf_new_nodes_paste_shift()
1122 buffer_info_init_bh(tb, &bi, tb->S_new[i]); balance_leaf_new_nodes_paste_shift()
1123 if (n_rem > tb->zeroes_num) { balance_leaf_new_nodes_paste_shift()
1125 r_body = body + n_rem - tb->zeroes_num; balance_leaf_new_nodes_paste_shift()
1128 r_zeroes_number = tb->zeroes_num - n_rem; balance_leaf_new_nodes_paste_shift()
1129 tb->zeroes_num -= r_zeroes_number; balance_leaf_new_nodes_paste_shift()
1132 leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem, balance_leaf_new_nodes_paste_shift()
1135 tmp = item_head(tb->S_new[i], 0); balance_leaf_new_nodes_paste_shift()
1139 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; balance_leaf_new_nodes_paste_shift()
1143 tb->insert_size[0] = n_rem; balance_leaf_new_nodes_paste_shift()
1145 tb->pos_in_item++; balance_leaf_new_nodes_paste_shift()
1148 static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, balance_leaf_new_nodes_paste_whole() argument
1156 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_new_nodes_paste_whole()
1163 struct item_head *ih_check = item_head(tbS0, tb->item_pos); balance_leaf_new_nodes_paste_whole()
1166 (tb->pos_in_item != ih_item_len(ih_check) || balance_leaf_new_nodes_paste_whole()
1167 tb->insert_size[0] <= 0)) balance_leaf_new_nodes_paste_whole()
1168 reiserfs_panic(tb->tb_sb, balance_leaf_new_nodes_paste_whole()
1173 leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], balance_leaf_new_nodes_paste_whole()
1174 tb->sbytes[i], tb->S_new[i]); balance_leaf_new_nodes_paste_whole()
1181 buffer_info_init_bh(tb, &bi, tb->S_new[i]); balance_leaf_new_nodes_paste_whole()
1182 leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->snum[i], balance_leaf_new_nodes_paste_whole()
1183 tb->pos_in_item, tb->insert_size[0], balance_leaf_new_nodes_paste_whole()
1184 body, tb->zeroes_num); balance_leaf_new_nodes_paste_whole()
1186 pasted = item_head(tb->S_new[i], tb->item_pos - n + balance_leaf_new_nodes_paste_whole()
1187 tb->snum[i]); balance_leaf_new_nodes_paste_whole()
1189 leaf_paste_entries(&bi, tb->item_pos - n + tb->snum[i], balance_leaf_new_nodes_paste_whole()
1190 tb->pos_in_item, 1, balance_leaf_new_nodes_paste_whole()
1192 body + DEH_SIZE, tb->insert_size[0]); balance_leaf_new_nodes_paste_whole()
1198 tb->zeroes_num = tb->insert_size[0] = 0; balance_leaf_new_nodes_paste_whole()
1201 static void balance_leaf_new_nodes_paste(struct tree_balance *tb, balance_leaf_new_nodes_paste() argument
1208 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_new_nodes_paste()
1212 if (n - tb->snum[i] > tb->item_pos) { balance_leaf_new_nodes_paste()
1213 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, balance_leaf_new_nodes_paste()
1214 tb->snum[i], tb->sbytes[i], tb->S_new[i]); balance_leaf_new_nodes_paste()
1220 if (tb->item_pos == n - tb->snum[i] && tb->sbytes[i] != -1) balance_leaf_new_nodes_paste()
1222 balance_leaf_new_nodes_paste_shift(tb, ih, body, insert_key, balance_leaf_new_nodes_paste()
1226 balance_leaf_new_nodes_paste_whole(tb, ih, body, insert_key, balance_leaf_new_nodes_paste()
1231 static void balance_leaf_new_nodes(struct tree_balance *tb, balance_leaf_new_nodes() argument
1239 for (i = tb->blknum[0] - 2; i >= 0; i--) { balance_leaf_new_nodes()
1242 RFALSE(!tb->snum[i], balance_leaf_new_nodes()
1244 tb->snum[i]); balance_leaf_new_nodes()
1248 tb->S_new[i] = get_FEB(tb); balance_leaf_new_nodes()
1251 set_blkh_level(B_BLK_HEAD(tb->S_new[i]), DISK_LEAF_NODE_LEVEL); balance_leaf_new_nodes()
1254 balance_leaf_new_nodes_insert(tb, ih, body, insert_key, balance_leaf_new_nodes()
1257 balance_leaf_new_nodes_paste(tb, ih, body, insert_key, balance_leaf_new_nodes()
1260 memcpy(insert_key + i, leaf_key(tb->S_new[i], 0), KEY_SIZE); balance_leaf_new_nodes()
1261 insert_ptr[i] = tb->S_new[i]; balance_leaf_new_nodes()
1263 RFALSE(!buffer_journaled(tb->S_new[i]) balance_leaf_new_nodes()
1264 || buffer_journal_dirty(tb->S_new[i]) balance_leaf_new_nodes()
1265 || buffer_dirty(tb->S_new[i]), balance_leaf_new_nodes()
1267 i, tb->S_new[i]); balance_leaf_new_nodes()
1271 static void balance_leaf_finish_node_insert(struct tree_balance *tb, balance_leaf_finish_node_insert() argument
1275 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_finish_node_insert()
1277 buffer_info_init_tbS0(tb, &bi); balance_leaf_finish_node_insert()
1278 leaf_insert_into_buf(&bi, tb->item_pos, ih, body, tb->zeroes_num); balance_leaf_finish_node_insert()
1281 if (tb->item_pos == 0) { balance_leaf_finish_node_insert()
1282 if (tb->CFL[0]) /* can be 0 in reiserfsck */ balance_leaf_finish_node_insert()
1283 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); balance_leaf_finish_node_insert()
1288 static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, balance_leaf_finish_node_paste_dirent() argument
1292 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_finish_node_paste_dirent()
1293 struct item_head *pasted = item_head(tbS0, tb->item_pos); balance_leaf_finish_node_paste_dirent()
1296 if (tb->pos_in_item >= 0 && tb->pos_in_item <= ih_entry_count(pasted)) { balance_leaf_finish_node_paste_dirent()
1297 RFALSE(!tb->insert_size[0], balance_leaf_finish_node_paste_dirent()
1301 buffer_info_init_tbS0(tb, &bi); balance_leaf_finish_node_paste_dirent()
1302 leaf_paste_in_buffer(&bi, tb->item_pos, tb->pos_in_item, balance_leaf_finish_node_paste_dirent()
1303 tb->insert_size[0], body, tb->zeroes_num); balance_leaf_finish_node_paste_dirent()
1306 leaf_paste_entries(&bi, tb->item_pos, tb->pos_in_item, 1, balance_leaf_finish_node_paste_dirent()
1308 body + DEH_SIZE, tb->insert_size[0]); balance_leaf_finish_node_paste_dirent()
1310 if (!tb->item_pos && !tb->pos_in_item) { balance_leaf_finish_node_paste_dirent()
1311 RFALSE(!tb->CFL[0] || !tb->L[0], balance_leaf_finish_node_paste_dirent()
1313 if (tb->CFL[0]) balance_leaf_finish_node_paste_dirent()
1314 replace_key(tb, tb->CFL[0], tb->lkey[0], balance_leaf_finish_node_paste_dirent()
1318 tb->insert_size[0] = 0; balance_leaf_finish_node_paste_dirent()
1322 static void balance_leaf_finish_node_paste(struct tree_balance *tb, balance_leaf_finish_node_paste() argument
1326 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf_finish_node_paste()
1328 struct item_head *pasted = item_head(tbS0, tb->item_pos); balance_leaf_finish_node_paste()
1332 balance_leaf_finish_node_paste_dirent(tb, ih, body); balance_leaf_finish_node_paste()
1338 if (tb->pos_in_item == ih_item_len(pasted)) { balance_leaf_finish_node_paste()
1339 RFALSE(tb->insert_size[0] <= 0, balance_leaf_finish_node_paste()
1341 tb->insert_size[0]); balance_leaf_finish_node_paste()
1342 buffer_info_init_tbS0(tb, &bi); balance_leaf_finish_node_paste()
1343 leaf_paste_in_buffer(&bi, tb->item_pos, balance_leaf_finish_node_paste()
1344 tb->pos_in_item, tb->insert_size[0], body, balance_leaf_finish_node_paste()
1345 tb->zeroes_num); balance_leaf_finish_node_paste()
1350 tb->insert_size[0] = 0; balance_leaf_finish_node_paste()
1353 else if (tb->insert_size[0]) { balance_leaf_finish_node_paste()
1355 reiserfs_panic(tb->tb_sb, "PAP-12285", balance_leaf_finish_node_paste()
1356 "insert_size must be 0 (%d)", tb->insert_size[0]); balance_leaf_finish_node_paste()
1366 static void balance_leaf_finish_node(struct tree_balance *tb, balance_leaf_finish_node() argument
1371 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { balance_leaf_finish_node()
1373 balance_leaf_finish_node_insert(tb, ih, body); balance_leaf_finish_node()
1375 balance_leaf_finish_node_paste(tb, ih, body); balance_leaf_finish_node()
1381 * @tb: tree balance state
1393 static int balance_leaf(struct tree_balance *tb, struct item_head *ih, balance_leaf() argument
1398 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); balance_leaf()
1400 PROC_INFO_INC(tb->tb_sb, balance_at[0]); balance_leaf()
1403 if (tb->insert_size[0] < 0) balance_leaf()
1404 return balance_leaf_when_delete(tb, flag); balance_leaf()
1406 tb->item_pos = PATH_LAST_POSITION(tb->tb_path), balance_leaf()
1407 tb->pos_in_item = tb->tb_path->pos_in_item, balance_leaf()
1408 tb->zeroes_num = 0; balance_leaf()
1410 tb->zeroes_num = ih_item_len(ih); balance_leaf()
1417 && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) balance_leaf()
1418 tb->pos_in_item *= UNFM_P_SIZE; balance_leaf()
1420 body += balance_leaf_left(tb, ih, body, flag); balance_leaf()
1422 /* tb->lnum[0] > 0 */ balance_leaf()
1424 tb->item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0)); balance_leaf()
1426 balance_leaf_right(tb, ih, body, flag); balance_leaf()
1428 /* tb->rnum[0] > 0 */ balance_leaf()
1429 RFALSE(tb->blknum[0] > 3, balance_leaf()
1430 "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]); balance_leaf()
1431 RFALSE(tb->blknum[0] < 0, balance_leaf()
1432 "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]); balance_leaf()
1439 if (tb->blknum[0] == 0) { /* node S[0] is empty now */ balance_leaf()
1441 RFALSE(!tb->lnum[0] || !tb->rnum[0], balance_leaf()
1445 * delimiting key of the tb->L[0]'s and left delimiting key are balance_leaf()
1448 if (tb->CFL[0]) { balance_leaf()
1449 if (!tb->CFR[0]) balance_leaf()
1450 reiserfs_panic(tb->tb_sb, "vs-12195", balance_leaf()
1452 copy_key(internal_key(tb->CFL[0], tb->lkey[0]), balance_leaf()
1453 internal_key(tb->CFR[0], tb->rkey[0])); balance_leaf()
1454 do_balance_mark_internal_dirty(tb, tb->CFL[0], 0); balance_leaf()
1457 reiserfs_invalidate_buffer(tb, tbS0); balance_leaf()
1461 balance_leaf_new_nodes(tb, ih, body, insert_key, insert_ptr, flag); balance_leaf()
1463 balance_leaf_finish_node(tb, ih, body, flag); balance_leaf()
1466 if (flag == M_PASTE && tb->insert_size[0]) { balance_leaf()
1468 reiserfs_panic(tb->tb_sb, balance_leaf()
1470 tb->insert_size[0]); balance_leaf()
1494 struct buffer_head *get_FEB(struct tree_balance *tb) get_FEB() argument
1500 if (tb->FEB[i] != NULL) get_FEB()
1504 reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty"); get_FEB()
1506 buffer_info_init_bh(tb, &bi, tb->FEB[i]); get_FEB()
1508 set_buffer_uptodate(tb->FEB[i]); get_FEB()
1509 tb->used[i] = tb->FEB[i]; get_FEB()
1510 tb->FEB[i] = NULL; get_FEB()
1512 return tb->used[i]; get_FEB()
1516 static void store_thrown(struct tree_balance *tb, struct buffer_head *bh) store_thrown() argument
1521 reiserfs_warning(tb->tb_sb, "reiserfs-12320", store_thrown()
1523 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) store_thrown()
1524 if (!tb->thrown[i]) { store_thrown()
1525 tb->thrown[i] = bh; store_thrown()
1529 reiserfs_warning(tb->tb_sb, "reiserfs-12321", store_thrown()
1533 static void free_thrown(struct tree_balance *tb) free_thrown() argument
1537 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) { free_thrown()
1538 if (tb->thrown[i]) { free_thrown()
1539 blocknr = tb->thrown[i]->b_blocknr; free_thrown()
1540 if (buffer_dirty(tb->thrown[i])) free_thrown()
1541 reiserfs_warning(tb->tb_sb, "reiserfs-12322", free_thrown()
1544 brelse(tb->thrown[i]); /* incremented in store_thrown */ free_thrown()
1545 reiserfs_free_block(tb->transaction_handle, NULL, free_thrown()
1551 void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh) reiserfs_invalidate_buffer() argument
1559 store_thrown(tb, bh); reiserfs_invalidate_buffer()
1563 void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest, replace_key() argument
1587 do_balance_mark_internal_dirty(tb, dest, 0); replace_key()
1590 int get_left_neighbor_position(struct tree_balance *tb, int h) get_left_neighbor_position() argument
1592 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); get_left_neighbor_position()
1594 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL, get_left_neighbor_position()
1596 h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h)); get_left_neighbor_position()
1599 return B_NR_ITEMS(tb->FL[h]); get_left_neighbor_position()
1604 int get_right_neighbor_position(struct tree_balance *tb, int h) get_right_neighbor_position() argument
1606 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); get_right_neighbor_position()
1608 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL, get_right_neighbor_position()
1610 h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]); get_right_neighbor_position()
1612 if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h))) get_right_neighbor_position()
1647 static int locked_or_not_in_tree(struct tree_balance *tb, locked_or_not_in_tree() argument
1652 reiserfs_warning(tb->tb_sb, "vs-12339", "%s (%b)", which, bh); locked_or_not_in_tree()
1658 static int check_before_balancing(struct tree_balance *tb) check_before_balancing() argument
1662 if (REISERFS_SB(tb->tb_sb)->cur_tb) { check_before_balancing()
1663 reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule " check_before_balancing()
1674 if (tb->lnum[0]) { check_before_balancing()
1675 retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]"); check_before_balancing()
1676 retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]"); check_before_balancing()
1677 retval |= locked_or_not_in_tree(tb, tb->CFL[0], "CFL[0]"); check_before_balancing()
1678 check_leaf(tb->L[0]); check_before_balancing()
1680 if (tb->rnum[0]) { check_before_balancing()
1681 retval |= locked_or_not_in_tree(tb, tb->R[0], "R[0]"); check_before_balancing()
1682 retval |= locked_or_not_in_tree(tb, tb->FR[0], "FR[0]"); check_before_balancing()
1683 retval |= locked_or_not_in_tree(tb, tb->CFR[0], "CFR[0]"); check_before_balancing()
1684 check_leaf(tb->R[0]); check_before_balancing()
1686 retval |= locked_or_not_in_tree(tb, PATH_PLAST_BUFFER(tb->tb_path), check_before_balancing()
1688 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); check_before_balancing()
1693 static void check_after_balance_leaf(struct tree_balance *tb) check_after_balance_leaf() argument
1695 if (tb->lnum[0]) { check_after_balance_leaf()
1696 if (B_FREE_SPACE(tb->L[0]) != check_after_balance_leaf()
1697 MAX_CHILD_SIZE(tb->L[0]) - check_after_balance_leaf()
1699 (tb->FL[0], get_left_neighbor_position(tb, 0)))) { check_after_balance_leaf()
1701 reiserfs_panic(tb->tb_sb, "PAP-12355", check_after_balance_leaf()
1705 if (tb->rnum[0]) { check_after_balance_leaf()
1706 if (B_FREE_SPACE(tb->R[0]) != check_after_balance_leaf()
1707 MAX_CHILD_SIZE(tb->R[0]) - check_after_balance_leaf()
1709 (tb->FR[0], get_right_neighbor_position(tb, 0)))) { check_after_balance_leaf()
1711 reiserfs_panic(tb->tb_sb, "PAP-12360", check_after_balance_leaf()
1715 if (PATH_H_PBUFFER(tb->tb_path, 1) && check_after_balance_leaf()
1716 (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) != check_after_balance_leaf()
1717 (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - check_after_balance_leaf()
1718 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), check_after_balance_leaf()
1719 PATH_H_POSITION(tb->tb_path, 1)))))) { check_after_balance_leaf()
1720 int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)); check_after_balance_leaf()
1721 int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - check_after_balance_leaf()
1722 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), check_after_balance_leaf()
1723 PATH_H_POSITION(tb->tb_path, check_after_balance_leaf()
1726 reiserfs_warning(tb->tb_sb, "reiserfs-12363", check_after_balance_leaf()
1727 "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; " check_after_balance_leaf()
1730 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)), check_after_balance_leaf()
1731 PATH_H_PBUFFER(tb->tb_path, 1), check_after_balance_leaf()
1732 PATH_H_POSITION(tb->tb_path, 1), check_after_balance_leaf()
1734 (PATH_H_PBUFFER(tb->tb_path, 1), check_after_balance_leaf()
1735 PATH_H_POSITION(tb->tb_path, 1))), check_after_balance_leaf()
1737 reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect"); check_after_balance_leaf()
1741 static void check_leaf_level(struct tree_balance *tb) check_leaf_level() argument
1743 check_leaf(tb->L[0]); check_leaf_level()
1744 check_leaf(tb->R[0]); check_leaf_level()
1745 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); check_leaf_level()
1748 static void check_internal_levels(struct tree_balance *tb) check_internal_levels() argument
1753 for (h = 1; tb->insert_size[h]; h++) { check_internal_levels()
1754 check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h), check_internal_levels()
1756 if (tb->lnum[h]) check_internal_levels()
1757 check_internal_node(tb->tb_sb, tb->L[h], "BAD L"); check_internal_levels()
1758 if (tb->rnum[h]) check_internal_levels()
1759 check_internal_node(tb->tb_sb, tb->R[h], "BAD R"); check_internal_levels()
1772 * necessary to add ordered locking of tb.
1800 static inline void do_balance_starts(struct tree_balance *tb) do_balance_starts() argument
1804 /* store_print_tb (tb); */ do_balance_starts()
1808 print_tb(flag, PATH_LAST_POSITION(tb->tb_path), do_balance_starts()
1809 tb->tb_path->pos_in_item, tb, "check"); do_balance_starts()
1811 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); do_balance_starts()
1813 REISERFS_SB(tb->tb_sb)->cur_tb = tb; do_balance_starts()
1817 static inline void do_balance_completed(struct tree_balance *tb) do_balance_completed() argument
1821 check_leaf_level(tb); do_balance_completed()
1822 check_internal_levels(tb); do_balance_completed()
1823 REISERFS_SB(tb->tb_sb)->cur_tb = NULL; do_balance_completed()
1832 REISERFS_SB(tb->tb_sb)->s_do_balance++; do_balance_completed()
1835 unfix_nodes(tb); do_balance_completed()
1837 free_thrown(tb); do_balance_completed()
1843 * @tb: tree_balance structure
1858 void do_balance(struct tree_balance *tb, struct item_head *ih, do_balance() argument
1875 tb->tb_mode = flag; do_balance()
1876 tb->need_balance_dirty = 0; do_balance()
1878 if (FILESYSTEM_CHANGED_TB(tb)) { do_balance()
1879 reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has " do_balance()
1883 if (!tb->insert_size[0]) { do_balance()
1884 reiserfs_warning(tb->tb_sb, "PAP-12350", do_balance()
1886 unfix_nodes(tb); do_balance()
1890 atomic_inc(&fs_generation(tb->tb_sb)); do_balance()
1891 do_balance_starts(tb); do_balance()
1898 child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) + do_balance()
1899 balance_leaf(tb, ih, body, flag, insert_key, insert_ptr); do_balance()
1902 check_after_balance_leaf(tb); do_balance()
1906 for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++) do_balance()
1907 child_pos = balance_internal(tb, h, child_pos, insert_key, do_balance()
1910 do_balance_completed(tb); do_balance()
H A Dibalance.c28 struct tree_balance *tb, internal_define_dest_src_infos()
41 src_bi->tb = tb; internal_define_dest_src_infos()
42 src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); internal_define_dest_src_infos()
43 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); internal_define_dest_src_infos()
44 src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); internal_define_dest_src_infos()
45 dest_bi->tb = tb; internal_define_dest_src_infos()
46 dest_bi->bi_bh = tb->L[h]; internal_define_dest_src_infos()
47 dest_bi->bi_parent = tb->FL[h]; internal_define_dest_src_infos()
48 dest_bi->bi_position = get_left_neighbor_position(tb, h); internal_define_dest_src_infos()
49 *d_key = tb->lkey[h]; internal_define_dest_src_infos()
50 *cf = tb->CFL[h]; internal_define_dest_src_infos()
53 src_bi->tb = tb; internal_define_dest_src_infos()
54 src_bi->bi_bh = tb->L[h]; internal_define_dest_src_infos()
55 src_bi->bi_parent = tb->FL[h]; internal_define_dest_src_infos()
56 src_bi->bi_position = get_left_neighbor_position(tb, h); internal_define_dest_src_infos()
57 dest_bi->tb = tb; internal_define_dest_src_infos()
58 dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); internal_define_dest_src_infos()
59 dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); internal_define_dest_src_infos()
61 dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); internal_define_dest_src_infos()
62 *d_key = tb->lkey[h]; internal_define_dest_src_infos()
63 *cf = tb->CFL[h]; internal_define_dest_src_infos()
68 src_bi->tb = tb; internal_define_dest_src_infos()
69 src_bi->bi_bh = tb->R[h]; internal_define_dest_src_infos()
70 src_bi->bi_parent = tb->FR[h]; internal_define_dest_src_infos()
71 src_bi->bi_position = get_right_neighbor_position(tb, h); internal_define_dest_src_infos()
72 dest_bi->tb = tb; internal_define_dest_src_infos()
73 dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); internal_define_dest_src_infos()
74 dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); internal_define_dest_src_infos()
75 dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); internal_define_dest_src_infos()
76 *d_key = tb->rkey[h]; internal_define_dest_src_infos()
77 *cf = tb->CFR[h]; internal_define_dest_src_infos()
81 src_bi->tb = tb; internal_define_dest_src_infos()
82 src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); internal_define_dest_src_infos()
83 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); internal_define_dest_src_infos()
84 src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); internal_define_dest_src_infos()
85 dest_bi->tb = tb; internal_define_dest_src_infos()
86 dest_bi->bi_bh = tb->R[h]; internal_define_dest_src_infos()
87 dest_bi->bi_parent = tb->FR[h]; internal_define_dest_src_infos()
88 dest_bi->bi_position = get_right_neighbor_position(tb, h); internal_define_dest_src_infos()
89 *d_key = tb->rkey[h]; internal_define_dest_src_infos()
90 *cf = tb->CFR[h]; internal_define_dest_src_infos()
94 dest_bi->tb = tb; internal_define_dest_src_infos()
95 dest_bi->bi_bh = tb->L[h]; internal_define_dest_src_infos()
96 dest_bi->bi_parent = tb->FL[h]; internal_define_dest_src_infos()
97 dest_bi->bi_position = get_left_neighbor_position(tb, h); internal_define_dest_src_infos()
101 dest_bi->tb = tb; internal_define_dest_src_infos()
102 dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); internal_define_dest_src_infos()
103 dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); internal_define_dest_src_infos()
104 dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); internal_define_dest_src_infos()
108 dest_bi->tb = tb; internal_define_dest_src_infos()
109 dest_bi->bi_bh = tb->R[h]; internal_define_dest_src_infos()
110 dest_bi->bi_parent = tb->FR[h]; internal_define_dest_src_infos()
111 dest_bi->bi_position = get_right_neighbor_position(tb, h); internal_define_dest_src_infos()
115 reiserfs_panic(tb->tb_sb, "ibalance-1", internal_define_dest_src_infos()
180 do_balance_mark_internal_dirty(cur_bi->tb, cur, 0); internal_insert_childs()
191 do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent, internal_insert_childs()
257 do_balance_mark_internal_dirty(cur_bi->tb, cur, 0); internal_delete_pointers_items()
268 do_balance_mark_internal_dirty(cur_bi->tb, cur_bi->bi_parent, internal_delete_pointers_items()
365 do_balance_mark_internal_dirty(dest_bi->tb, dest, 0); internal_copy_pointers_items()
378 do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent, internal_copy_pointers_items()
468 do_balance_mark_internal_dirty(dest_bi->tb, dest, 0); internal_insert_key()
475 do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent, internal_insert_key()
493 struct tree_balance *tb, internal_shift_left()
500 internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi, internal_shift_left()
515 replace_key(tb, cf, d_key_position, internal_shift_left()
519 replace_key(tb, cf, d_key_position, src_bi.bi_bh, internal_shift_left()
534 static void internal_shift1_left(struct tree_balance *tb, internal_shift1_left() argument
541 internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, internal_shift1_left()
565 struct tree_balance *tb, internal_shift_right()
573 internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi, internal_shift_right()
585 RFALSE(src_bi.bi_bh != PATH_H_PBUFFER(tb->tb_path, h) /*tb->S[h] */ || internal_shift_right()
586 dest_bi.bi_bh != tb->R[h], internal_shift_right()
587 "src (%p) must be == tb->S[h](%p) when it disappears", internal_shift_right()
588 src_bi.bi_bh, PATH_H_PBUFFER(tb->tb_path, h)); internal_shift_right()
590 if (tb->CFL[h]) internal_shift_right()
591 replace_key(tb, cf, d_key_position, tb->CFL[h], internal_shift_right()
592 tb->lkey[h]); internal_shift_right()
594 replace_key(tb, cf, d_key_position, src_bi.bi_bh, internal_shift_right()
609 static void internal_shift1_right(struct tree_balance *tb, internal_shift1_right() argument
616 internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, internal_shift1_right()
632 static void balance_internal_when_delete(struct tree_balance *tb, balance_internal_when_delete() argument
637 struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h); balance_internal_when_delete()
640 insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE)); balance_internal_when_delete()
643 bi.tb = tb; balance_internal_when_delete()
645 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h); balance_internal_when_delete()
646 bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1); balance_internal_when_delete()
650 RFALSE(tb->blknum[h] > 1, balance_internal_when_delete()
651 "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]); balance_internal_when_delete()
655 if (tb->lnum[h] == 0 && tb->rnum[h] == 0) { balance_internal_when_delete()
656 if (tb->blknum[h] == 0) { balance_internal_when_delete()
668 if (!tb->L[h - 1] || !B_NR_ITEMS(tb->L[h - 1])) balance_internal_when_delete()
669 new_root = tb->R[h - 1]; balance_internal_when_delete()
671 new_root = tb->L[h - 1]; balance_internal_when_delete()
675 PUT_SB_ROOT_BLOCK(tb->tb_sb, new_root->b_blocknr); balance_internal_when_delete()
676 /*REISERFS_SB(tb->tb_sb)->s_rs->s_tree_height --; */ balance_internal_when_delete()
677 PUT_SB_TREE_HEIGHT(tb->tb_sb, balance_internal_when_delete()
678 SB_TREE_HEIGHT(tb->tb_sb) - 1); balance_internal_when_delete()
680 do_balance_mark_sb_dirty(tb, balance_internal_when_delete()
681 REISERFS_SB(tb->tb_sb)->s_sbh, balance_internal_when_delete()
690 reiserfs_invalidate_buffer(tb, tbSh); balance_internal_when_delete()
697 if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) { balance_internal_when_delete()
699 RFALSE(tb->rnum[h] != 0, balance_internal_when_delete()
700 "invalid tb->rnum[%d]==%d when joining S[h] with L[h]", balance_internal_when_delete()
701 h, tb->rnum[h]); balance_internal_when_delete()
703 internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1); balance_internal_when_delete()
704 reiserfs_invalidate_buffer(tb, tbSh); balance_internal_when_delete()
710 if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) { balance_internal_when_delete()
711 RFALSE(tb->lnum[h] != 0, balance_internal_when_delete()
712 "invalid tb->lnum[%d]==%d when joining S[h] with R[h]", balance_internal_when_delete()
713 h, tb->lnum[h]); balance_internal_when_delete()
715 internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1); balance_internal_when_delete()
717 reiserfs_invalidate_buffer(tb, tbSh); balance_internal_when_delete()
722 if (tb->lnum[h] < 0) { balance_internal_when_delete()
723 RFALSE(tb->rnum[h] != 0, balance_internal_when_delete()
724 "wrong tb->rnum[%d]==%d when borrow from L[h]", h, balance_internal_when_delete()
725 tb->rnum[h]); balance_internal_when_delete()
726 internal_shift_right(INTERNAL_SHIFT_FROM_L_TO_S, tb, h, balance_internal_when_delete()
727 -tb->lnum[h]); balance_internal_when_delete()
732 if (tb->rnum[h] < 0) { balance_internal_when_delete()
733 RFALSE(tb->lnum[h] != 0, balance_internal_when_delete()
734 "invalid tb->lnum[%d]==%d when borrow from R[h]", balance_internal_when_delete()
735 h, tb->lnum[h]); balance_internal_when_delete()
736 internal_shift_left(INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]); /*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]); */ balance_internal_when_delete()
741 if (tb->lnum[h] > 0) { balance_internal_when_delete()
742 RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1, balance_internal_when_delete()
743 "invalid tb->lnum[%d]==%d or tb->rnum[%d]==%d when S[h](item number == %d) is split between them", balance_internal_when_delete()
744 h, tb->lnum[h], h, tb->rnum[h], n); balance_internal_when_delete()
746 internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]); /*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]); */ balance_internal_when_delete()
747 internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, balance_internal_when_delete()
748 tb->rnum[h]); balance_internal_when_delete()
750 reiserfs_invalidate_buffer(tb, tbSh); balance_internal_when_delete()
754 reiserfs_panic(tb->tb_sb, "ibalance-2", balance_internal_when_delete()
755 "unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d", balance_internal_when_delete()
756 h, tb->lnum[h], h, tb->rnum[h]); balance_internal_when_delete()
760 static void replace_lkey(struct tree_balance *tb, int h, struct item_head *key) replace_lkey() argument
762 RFALSE(tb->L[h] == NULL || tb->CFL[h] == NULL, replace_lkey()
764 tb->L[h], tb->CFL[h]); replace_lkey()
766 if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0) replace_lkey()
769 memcpy(internal_key(tb->CFL[h], tb->lkey[h]), key, KEY_SIZE); replace_lkey()
771 do_balance_mark_internal_dirty(tb, tb->CFL[h], 0); replace_lkey()
775 static void replace_rkey(struct tree_balance *tb, int h, struct item_head *key) replace_rkey() argument
777 RFALSE(tb->R[h] == NULL || tb->CFR[h] == NULL, replace_rkey()
779 tb->R[h], tb->CFR[h]); replace_rkey()
780 RFALSE(B_NR_ITEMS(tb->R[h]) == 0, replace_rkey()
782 B_NR_ITEMS(tb->R[h])); replace_rkey()
784 memcpy(internal_key(tb->CFR[h], tb->rkey[h]), key, KEY_SIZE); replace_rkey()
786 do_balance_mark_internal_dirty(tb, tb->CFR[h], 0); replace_rkey()
803 int balance_internal(struct tree_balance *tb, balance_internal() argument
811 struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h); balance_internal()
816 * else it is tb->S[h]->b_item_order balance_internal()
827 PROC_INFO_INC(tb->tb_sb, balance_at[h]); balance_internal()
830 (tbSh) ? PATH_H_POSITION(tb->tb_path, balance_internal()
831 h + 1) /*tb->S[h]->b_item_order */ : 0; balance_internal()
837 insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE)); balance_internal()
849 balance_internal_when_delete(tb, h, child_pos); balance_internal()
854 if (tb->lnum[h] > 0) { balance_internal()
860 n = B_NR_ITEMS(tb->L[h]); /* number of items in L[h] */ balance_internal()
861 if (tb->lnum[h] <= child_pos) { balance_internal()
863 internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, balance_internal()
864 tb->lnum[h]); balance_internal()
865 child_pos -= tb->lnum[h]; balance_internal()
866 } else if (tb->lnum[h] > child_pos + insert_num) { balance_internal()
868 internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, balance_internal()
869 tb->lnum[h] - insert_num); balance_internal()
871 bi.tb = tb; balance_internal()
872 bi.bi_bh = tb->L[h]; balance_internal()
873 bi.bi_parent = tb->FL[h]; balance_internal()
874 bi.bi_position = get_left_neighbor_position(tb, h); balance_internal()
876 /*tb->L[h], tb->S[h-1]->b_next */ balance_internal()
889 internal_shift1_left(tb, h, child_pos + 1); balance_internal()
891 k = tb->lnum[h] - child_pos - 1; balance_internal()
892 bi.tb = tb; balance_internal()
893 bi.bi_bh = tb->L[h]; balance_internal()
894 bi.bi_parent = tb->FL[h]; balance_internal()
895 bi.bi_position = get_left_neighbor_position(tb, h); balance_internal()
897 /*tb->L[h], tb->S[h-1]->b_next, */ balance_internal()
901 replace_lkey(tb, h, insert_key + k); balance_internal()
913 do_balance_mark_internal_dirty(tb, tbSh, 0); balance_internal()
922 /* tb->lnum[h] > 0 */ balance_internal()
923 if (tb->rnum[h] > 0) { balance_internal()
930 if (n - tb->rnum[h] >= child_pos) balance_internal()
932 internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, balance_internal()
933 tb->rnum[h]); balance_internal()
934 else if (n + insert_num - tb->rnum[h] < child_pos) { balance_internal()
936 internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, balance_internal()
937 tb->rnum[h] - insert_num); balance_internal()
940 bi.tb = tb; balance_internal()
941 bi.bi_bh = tb->R[h]; balance_internal()
942 bi.bi_parent = tb->FR[h]; balance_internal()
943 bi.bi_position = get_right_neighbor_position(tb, h); balance_internal()
945 /*tb->R[h],tb->S[h-1]->b_next */ balance_internal()
947 tb->rnum[h] - 1, balance_internal()
955 internal_shift1_right(tb, h, n - child_pos + 1); balance_internal()
957 k = tb->rnum[h] - n + child_pos - 1; balance_internal()
958 bi.tb = tb; balance_internal()
959 bi.bi_bh = tb->R[h]; balance_internal()
960 bi.bi_parent = tb->FR[h]; balance_internal()
961 bi.bi_position = get_right_neighbor_position(tb, h); balance_internal()
963 /*tb->R[h], tb->R[h]->b_child, */ balance_internal()
967 replace_rkey(tb, h, insert_key + insert_num - k - 1); balance_internal()
973 dc = B_N_CHILD(tb->R[h], 0); balance_internal()
983 do_balance_mark_internal_dirty(tb, tb->R[h], 0); balance_internal()
990 RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level"); balance_internal()
991 RFALSE(tb->blknum[h] < 0, "blknum can not be < 0"); balance_internal()
993 if (!tb->blknum[h]) { /* node S[h] is empty now */ balance_internal()
997 reiserfs_invalidate_buffer(tb, tbSh); balance_internal()
1004 struct buffer_head *tbSh_1 = PATH_H_PBUFFER(tb->tb_path, h - 1); balance_internal()
1007 if (tb->blknum[h] != 1) balance_internal()
1011 tbSh = get_FEB(tb); balance_internal()
1022 tb->insert_size[h] -= DC_SIZE; balance_internal()
1025 do_balance_mark_internal_dirty(tb, tbSh, 0); balance_internal()
1032 PATH_OFFSET_PBUFFER(tb->tb_path, ILLEGAL_PATH_ELEMENT_OFFSET) = balance_internal()
1036 PUT_SB_ROOT_BLOCK(tb->tb_sb, tbSh->b_blocknr); balance_internal()
1037 PUT_SB_TREE_HEIGHT(tb->tb_sb, SB_TREE_HEIGHT(tb->tb_sb) + 1); balance_internal()
1038 do_balance_mark_sb_dirty(tb, REISERFS_SB(tb->tb_sb)->s_sbh, 1); balance_internal()
1041 if (tb->blknum[h] == 2) { balance_internal()
1046 S_new = get_FEB(tb); balance_internal()
1050 dest_bi.tb = tb; balance_internal()
1054 src_bi.tb = tb; balance_internal()
1056 src_bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h); balance_internal()
1057 src_bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1); balance_internal()
1090 /*S_new,tb->S[h-1]->b_next, */ balance_internal()
1129 do_balance_mark_internal_dirty(tb, S_new, 0); balance_internal()
1146 bi.tb = tb; balance_internal()
1148 bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h); balance_internal()
1149 bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1); balance_internal()
1151 /* ( tb->S[h-1]->b_parent == tb->S[h] ) ? tb->S[h-1]->b_next : tb->S[h]->b_child->b_next, */ balance_internal()
27 internal_define_dest_src_infos(int shift_mode, struct tree_balance *tb, int h, struct buffer_info *dest_bi, struct buffer_info *src_bi, int *d_key, struct buffer_head **cf) internal_define_dest_src_infos() argument
488 internal_shift_left( int mode, struct tree_balance *tb, int h, int pointer_amount) internal_shift_left() argument
560 internal_shift_right( int mode, struct tree_balance *tb, int h, int pointer_amount) internal_shift_right() argument
H A Dfix_node.c51 static void create_virtual_node(struct tree_balance *tb, int h) create_virtual_node() argument
54 struct virtual_node *vn = tb->tb_vn; create_virtual_node()
56 struct buffer_head *Sh; /* this comes from tb->S[h] */ create_virtual_node()
58 Sh = PATH_H_PBUFFER(tb->tb_path, h); create_virtual_node()
62 MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h]; create_virtual_node()
76 vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1); create_virtual_node()
115 op_create_vi(vn, vi, is_affected, tb->insert_size[0]); create_virtual_node()
116 if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr) create_virtual_node()
117 reiserfs_panic(tb->tb_sb, "vs-8030", create_virtual_node()
125 vn->vn_vi[new_num].vi_item_len += tb->insert_size[0]; create_virtual_node()
137 vi->vi_item_len = tb->insert_size[0]; create_virtual_node()
143 tb->insert_size[0]); create_virtual_node()
150 if (tb->CFR[0]) { create_virtual_node()
153 key = internal_key(tb->CFR[0], tb->rkey[0]); create_virtual_node()
178 reiserfs_panic(tb->tb_sb, "vs-8045", create_virtual_node()
194 static void check_left(struct tree_balance *tb, int h, int cur_free) check_left() argument
197 struct virtual_node *vn = tb->tb_vn; check_left()
205 tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE); check_left()
213 tb->lnum[h] = 0; check_left()
214 tb->lbytes = -1; check_left()
218 RFALSE(!PATH_H_PPARENT(tb->tb_path, 0), check_left()
230 tb->lnum[0] = vn->vn_nr_item; check_left()
231 tb->lbytes = -1; check_left()
241 tb->lnum[0] = 0; check_left()
248 tb->lnum[0]++; check_left()
260 tb->lbytes = -1; check_left()
265 tb->lbytes = op_check_left(vi, cur_free, 0, 0); check_left()
266 if (tb->lbytes != -1) check_left()
268 tb->lnum[0]++; check_left()
280 static void check_right(struct tree_balance *tb, int h, int cur_free) check_right() argument
283 struct virtual_node *vn = tb->tb_vn; check_right()
291 tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE); check_right()
299 tb->rnum[h] = 0; check_right()
300 tb->rbytes = -1; check_right()
304 RFALSE(!PATH_H_PPARENT(tb->tb_path, 0), check_right()
316 tb->rnum[h] = vn->vn_nr_item; check_right()
317 tb->rbytes = -1; check_right()
327 tb->rnum[0] = 0; check_right()
334 tb->rnum[0]++; check_right()
345 tb->rbytes = -1; check_right()
355 tb->rbytes = op_check_right(vi, cur_free); check_right()
356 if (tb->rbytes != -1) check_right()
358 tb->rnum[0]++; check_right()
374 static int get_num_ver(int mode, struct tree_balance *tb, int h, get_num_ver() argument
381 struct virtual_node *vn = tb->tb_vn; get_num_ver()
418 RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE), get_num_ver()
421 max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h)); get_num_ver()
534 reiserfs_warning(tb->tb_sb, "vs-8111", get_num_ver()
573 reiserfs_warning(tb->tb_sb, "vs-8115", get_num_ver()
606 * Performs write of results of analysis of balancing into structure tb,
609 * tb tree_balance structure;
623 static void set_parameters(struct tree_balance *tb, int h, int lnum, set_parameters() argument
627 tb->lnum[h] = lnum; set_parameters()
628 tb->rnum[h] = rnum; set_parameters()
629 tb->blknum[h] = blk_num; set_parameters()
634 tb->s0num = *s012++; set_parameters()
635 tb->snum[0] = *s012++; set_parameters()
636 tb->snum[1] = *s012++; set_parameters()
637 tb->sbytes[0] = *s012++; set_parameters()
638 tb->sbytes[1] = *s012; set_parameters()
640 tb->lbytes = lb; set_parameters()
641 tb->rbytes = rb; set_parameters()
643 PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum); set_parameters()
644 PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum); set_parameters()
646 PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb); set_parameters()
647 PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb); set_parameters()
651 * check if node disappears if we shift tb->lnum[0] items to left
652 * neighbor and tb->rnum[0] to the right one.
654 static int is_leaf_removable(struct tree_balance *tb) is_leaf_removable() argument
656 struct virtual_node *vn = tb->tb_vn; is_leaf_removable()
665 to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0); is_leaf_removable()
666 to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0); is_leaf_removable()
674 set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0, is_leaf_removable()
680 if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1) is_leaf_removable()
688 if (tb->lbytes + tb->rbytes >= size) { is_leaf_removable()
689 set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL, is_leaf_removable()
690 tb->lbytes, -1); is_leaf_removable()
698 static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree) are_leaves_removable() argument
700 struct virtual_node *vn = tb->tb_vn; are_leaves_removable()
704 S0 = PATH_H_PBUFFER(tb->tb_path, 0); are_leaves_removable()
723 if (tb->CFR[0] are_leaves_removable()
725 internal_key(tb->CFR[0], are_leaves_removable()
726 tb->rkey[0]))) are_leaves_removable()
751 set_parameters(tb, 0, -1, -1, -1, NULL, -1, -1); are_leaves_removable()
752 PROC_INFO_INC(tb->tb_sb, leaves_removable); are_leaves_removable()
768 set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
773 set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
774 tb->lbytes, -1);\
776 set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
787 set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
792 set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
793 -1, tb->rbytes);\
795 set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
799 static void free_buffers_in_tb(struct tree_balance *tb) free_buffers_in_tb() argument
803 pathrelse(tb->tb_path); free_buffers_in_tb()
806 brelse(tb->L[i]); free_buffers_in_tb()
807 brelse(tb->R[i]); free_buffers_in_tb()
808 brelse(tb->FL[i]); free_buffers_in_tb()
809 brelse(tb->FR[i]); free_buffers_in_tb()
810 brelse(tb->CFL[i]); free_buffers_in_tb()
811 brelse(tb->CFR[i]); free_buffers_in_tb()
813 tb->L[i] = NULL; free_buffers_in_tb()
814 tb->R[i] = NULL; free_buffers_in_tb()
815 tb->FL[i] = NULL; free_buffers_in_tb()
816 tb->FR[i] = NULL; free_buffers_in_tb()
817 tb->CFL[i] = NULL; free_buffers_in_tb()
818 tb->CFR[i] = NULL; free_buffers_in_tb()
829 static int get_empty_nodes(struct tree_balance *tb, int h) get_empty_nodes() argument
831 struct buffer_head *new_bh, *Sh = PATH_H_PBUFFER(tb->tb_path, h); get_empty_nodes()
836 struct super_block *sb = tb->tb_sb; get_empty_nodes()
842 * number_of_freeblk = tb->cur_blknum can be non-zero if a schedule get_empty_nodes()
859 for (counter = 0, number_of_freeblk = tb->cur_blknum; get_empty_nodes()
862 (tb->blknum[counter]) ? (tb->blknum[counter] - get_empty_nodes()
867 amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1; get_empty_nodes()
881 if (reiserfs_new_form_blocknrs(tb, blocknrs, get_empty_nodes()
900 RFALSE(tb->FEB[tb->cur_blknum], get_empty_nodes()
904 tb->FEB[tb->cur_blknum++] = new_bh; get_empty_nodes()
907 if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb)) get_empty_nodes()
917 static int get_lfree(struct tree_balance *tb, int h) get_lfree() argument
922 if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL || get_lfree()
923 (l = tb->FL[h]) == NULL) get_lfree()
927 order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1; get_lfree()
940 static int get_rfree(struct tree_balance *tb, int h) get_rfree() argument
945 if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL || get_rfree()
946 (r = tb->FR[h]) == NULL) get_rfree()
950 order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1; get_rfree()
961 static int is_left_neighbor_in_cache(struct tree_balance *tb, int h) is_left_neighbor_in_cache() argument
964 struct super_block *sb = tb->tb_sb; is_left_neighbor_in_cache()
969 if (!tb->FL[h]) is_left_neighbor_in_cache()
973 father = PATH_H_PBUFFER(tb->tb_path, h + 1); is_left_neighbor_in_cache()
977 !B_IS_IN_TREE(tb->FL[h]) || is_left_neighbor_in_cache()
979 !buffer_uptodate(tb->FL[h]), is_left_neighbor_in_cache()
981 father, tb->FL[h]); is_left_neighbor_in_cache()
987 left_neighbor_position = (father == tb->FL[h]) ? is_left_neighbor_in_cache()
988 tb->lkey[h] : B_NR_ITEMS(tb->FL[h]); is_left_neighbor_in_cache()
991 B_N_CHILD_NUM(tb->FL[h], left_neighbor_position); is_left_neighbor_in_cache()
1025 static int get_far_parent(struct tree_balance *tb, get_far_parent() argument
1032 struct treepath *path = tb->tb_path; get_far_parent()
1094 (tb->tb_path, get_far_parent()
1096 SB_ROOT_BLOCK(tb->tb_sb)) { get_far_parent()
1112 int depth = reiserfs_write_unlock_nested(tb->tb_sb); get_far_parent()
1114 reiserfs_write_lock_nested(tb->tb_sb, depth); get_far_parent()
1115 if (FILESYSTEM_CHANGED_TB(tb)) { get_far_parent()
1131 LEFT_PARENTS) ? (tb->lkey[h - 1] = get_far_parent()
1133 1) : (tb->rkey[h - get_far_parent()
1141 (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father, get_far_parent()
1146 if (FILESYSTEM_CHANGED_TB(tb)) { get_far_parent()
1174 static int get_parents(struct tree_balance *tb, int h) get_parents() argument
1176 struct treepath *path = tb->tb_path; get_parents()
1179 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h); get_parents()
1189 brelse(tb->FL[h]); get_parents()
1190 brelse(tb->CFL[h]); get_parents()
1191 brelse(tb->FR[h]); get_parents()
1192 brelse(tb->CFR[h]); get_parents()
1193 tb->FL[h] = NULL; get_parents()
1194 tb->CFL[h] = NULL; get_parents()
1195 tb->FR[h] = NULL; get_parents()
1196 tb->CFR[h] = NULL; get_parents()
1208 tb->lkey[h] = position - 1; get_parents()
1218 if ((ret = get_far_parent(tb, h + 1, &curf, get_parents()
1224 brelse(tb->FL[h]); get_parents()
1225 tb->FL[h] = curf; /* New initialization of FL[h]. */ get_parents()
1226 brelse(tb->CFL[h]); get_parents()
1227 tb->CFL[h] = curcf; /* New initialization of CFL[h]. */ get_parents()
1244 get_far_parent(tb, h + 1, &curf, &curcf, get_parents()
1253 tb->rkey[h] = position; get_parents()
1256 brelse(tb->FR[h]); get_parents()
1258 tb->FR[h] = curf; get_parents()
1260 brelse(tb->CFR[h]); get_parents()
1262 tb->CFR[h] = curcf; get_parents()
1276 struct tree_balance *tb, int h) can_node_be_removed()
1278 struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h); can_node_be_removed()
1279 int levbytes = tb->insert_size[h]; can_node_be_removed()
1284 if (tb->CFR[h]) can_node_be_removed()
1285 r_key = internal_key(tb->CFR[h], tb->rkey[h]); can_node_be_removed()
1300 tb->s0num = can_node_be_removed()
1303 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); can_node_be_removed()
1307 PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]); can_node_be_removed()
1316 * tb tree_balance structure;
1326 static int ip_check_balance(struct tree_balance *tb, int h) ip_check_balance() argument
1328 struct virtual_node *vn = tb->tb_vn; ip_check_balance()
1375 Sh = PATH_H_PBUFFER(tb->tb_path, h); ip_check_balance()
1376 levbytes = tb->insert_size[h]; ip_check_balance()
1381 reiserfs_panic(tb->tb_sb, "vs-8210", ip_check_balance()
1383 switch (ret = get_empty_nodes(tb, h)) { ip_check_balance()
1386 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); ip_check_balance()
1393 reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect " ip_check_balance()
1399 ret = get_parents(tb, h); ip_check_balance()
1406 rfree = get_rfree(tb, h); ip_check_balance()
1407 lfree = get_lfree(tb, h); ip_check_balance()
1410 if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) == ip_check_balance()
1414 create_virtual_node(tb, h); ip_check_balance()
1418 * neighbor (in tb structure) and the maximal number of bytes ip_check_balance()
1422 check_left(tb, h, lfree); ip_check_balance()
1426 * neighbor (in tb structure) and the maximal number of bytes ip_check_balance()
1430 check_right(tb, h, rfree); ip_check_balance()
1436 if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) { ip_check_balance()
1448 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + ip_check_balance()
1450 tb->rnum[h]); ip_check_balance()
1451 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, ip_check_balance()
1461 (tb->lnum[h] >= vn->vn_nr_item + 1 || ip_check_balance()
1462 tb->rnum[h] >= vn->vn_nr_item + 1), ip_check_balance()
1464 RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) || ip_check_balance()
1465 (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))), ip_check_balance()
1472 if (!h && is_leaf_removable(tb)) ip_check_balance()
1485 tb->s0num = vn->vn_nr_item; ip_check_balance()
1486 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); ip_check_balance()
1514 lpar = tb->lnum[h]; ip_check_balance()
1515 rpar = tb->rnum[h]; ip_check_balance()
1525 nver = get_num_ver(vn->vn_mode, tb, h, ip_check_balance()
1536 nver1 = get_num_ver(vn->vn_mode, tb, h, ip_check_balance()
1552 lnver = get_num_ver(vn->vn_mode, tb, h, ip_check_balance()
1553 lpar - ((h || tb->lbytes == -1) ? 0 : 1), ip_check_balance()
1559 lnver1 = get_num_ver(vn->vn_mode, tb, h, ip_check_balance()
1561 ((tb->lbytes != -1) ? 1 : 0), ip_check_balance()
1562 tb->lbytes, 0, -1, ip_check_balance()
1577 rnver = get_num_ver(vn->vn_mode, tb, h, ip_check_balance()
1580 ((tb-> ip_check_balance()
1588 rnver1 = get_num_ver(vn->vn_mode, tb, h, ip_check_balance()
1591 ((tb->rbytes != -1) ? 1 : 0)), ip_check_balance()
1592 tb->rbytes, ip_check_balance()
1607 lrnver = get_num_ver(vn->vn_mode, tb, h, ip_check_balance()
1608 lpar - ((h || tb->lbytes == -1) ? 0 : 1), ip_check_balance()
1611 ((tb-> ip_check_balance()
1619 lrnver1 = get_num_ver(vn->vn_mode, tb, h, ip_check_balance()
1621 ((tb->lbytes != -1) ? 1 : 0), ip_check_balance()
1622 tb->lbytes, ip_check_balance()
1624 ((tb->rbytes != -1) ? 1 : 0)), ip_check_balance()
1625 tb->rbytes, ip_check_balance()
1641 (tb->lnum[h] != 1 || ip_check_balance()
1642 tb->rnum[h] != 1 || ip_check_balance()
1646 set_parameters(tb, h, tb->lnum[h], tb->rnum[h], ip_check_balance()
1648 tb->lbytes, tb->rbytes); ip_check_balance()
1650 set_parameters(tb, h, ip_check_balance()
1651 tb->lnum[h] - ip_check_balance()
1652 ((tb->lbytes == -1) ? 0 : 1), ip_check_balance()
1653 tb->rnum[h] - ip_check_balance()
1654 ((tb->rbytes == -1) ? 0 : 1), ip_check_balance()
1665 set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1, ip_check_balance()
1697 if (is_left_neighbor_in_cache(tb, h)) { ip_check_balance()
1716 * tb tree_balance structure;
1728 static int dc_check_balance_internal(struct tree_balance *tb, int h) dc_check_balance_internal() argument
1730 struct virtual_node *vn = tb->tb_vn; dc_check_balance_internal()
1740 Sh = PATH_H_PBUFFER(tb->tb_path, h); dc_check_balance_internal()
1741 Fh = PATH_H_PPARENT(tb->tb_path, h); dc_check_balance_internal()
1746 * using tb->insert_size[h], which is negative in this case, dc_check_balance_internal()
1751 create_virtual_node(tb, h); dc_check_balance_internal()
1756 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); dc_check_balance_internal()
1764 set_parameters(tb, h, 0, 0, 0, NULL, -1, -1); dc_check_balance_internal()
1768 if ((ret = get_parents(tb, h)) != CARRY_ON) dc_check_balance_internal()
1772 rfree = get_rfree(tb, h); dc_check_balance_internal()
1773 lfree = get_lfree(tb, h); dc_check_balance_internal()
1776 check_left(tb, h, lfree); dc_check_balance_internal()
1777 check_right(tb, h, rfree); dc_check_balance_internal()
1790 if (tb->lnum[h] >= vn->vn_nr_item + 1) { dc_check_balance_internal()
1796 PATH_H_B_ITEM_ORDER(tb->tb_path, dc_check_balance_internal()
1798 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1; dc_check_balance_internal()
1799 n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / dc_check_balance_internal()
1801 set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, dc_check_balance_internal()
1807 if (tb->rnum[h] >= vn->vn_nr_item + 1) { dc_check_balance_internal()
1813 PATH_H_B_ITEM_ORDER(tb->tb_path, dc_check_balance_internal()
1816 n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / dc_check_balance_internal()
1818 set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, dc_check_balance_internal()
1828 if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) { dc_check_balance_internal()
1832 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - dc_check_balance_internal()
1833 tb->rnum[h] + vn->vn_nr_item + 1) / 2 - dc_check_balance_internal()
1834 (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]); dc_check_balance_internal()
1835 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, dc_check_balance_internal()
1841 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); dc_check_balance_internal()
1850 if (tb->lnum[h] >= vn->vn_nr_item + 1) dc_check_balance_internal()
1851 if (is_left_neighbor_in_cache(tb, h) dc_check_balance_internal()
1852 || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) { dc_check_balance_internal()
1858 PATH_H_B_ITEM_ORDER(tb->tb_path, dc_check_balance_internal()
1860 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1; dc_check_balance_internal()
1861 n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE + dc_check_balance_internal()
1863 set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1); dc_check_balance_internal()
1868 if (tb->rnum[h] >= vn->vn_nr_item + 1) { dc_check_balance_internal()
1874 PATH_H_B_ITEM_ORDER(tb->tb_path, dc_check_balance_internal()
1876 n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE + dc_check_balance_internal()
1878 set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1); dc_check_balance_internal()
1883 if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) { dc_check_balance_internal()
1887 ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] + dc_check_balance_internal()
1889 tb->rnum[h]); dc_check_balance_internal()
1890 set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL, dc_check_balance_internal()
1896 RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root"); dc_check_balance_internal()
1899 if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) { dc_check_balance_internal()
1903 (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item + dc_check_balance_internal()
1905 set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1); dc_check_balance_internal()
1909 set_parameters(tb, h, 0, dc_check_balance_internal()
1910 -((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item + dc_check_balance_internal()
1920 * tb tree_balance structure;
1929 static int dc_check_balance_leaf(struct tree_balance *tb, int h) dc_check_balance_leaf() argument
1931 struct virtual_node *vn = tb->tb_vn; dc_check_balance_leaf()
1951 S0 = PATH_H_PBUFFER(tb->tb_path, 0); dc_check_balance_leaf()
1952 F0 = PATH_H_PPARENT(tb->tb_path, 0); dc_check_balance_leaf()
1954 levbytes = tb->insert_size[h]; dc_check_balance_leaf()
1963 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); dc_check_balance_leaf()
1967 if ((ret = get_parents(tb, h)) != CARRY_ON) dc_check_balance_leaf()
1971 rfree = get_rfree(tb, h); dc_check_balance_leaf()
1972 lfree = get_lfree(tb, h); dc_check_balance_leaf()
1974 create_virtual_node(tb, h); dc_check_balance_leaf()
1977 if (are_leaves_removable(tb, lfree, rfree)) dc_check_balance_leaf()
1986 check_left(tb, h, lfree); dc_check_balance_leaf()
1987 check_right(tb, h, rfree); dc_check_balance_leaf()
1990 if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1) dc_check_balance_leaf()
1991 if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */ dc_check_balance_leaf()
1992 !tb->FR[h]) { dc_check_balance_leaf()
1994 RFALSE(!tb->FL[h], dc_check_balance_leaf()
1998 set_parameters(tb, h, -1, 0, 0, NULL, -1, -1); dc_check_balance_leaf()
2003 if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) { dc_check_balance_leaf()
2004 set_parameters(tb, h, 0, -1, 0, NULL, -1, -1); dc_check_balance_leaf()
2012 if (is_leaf_removable(tb)) dc_check_balance_leaf()
2016 tb->s0num = vn->vn_nr_item; dc_check_balance_leaf()
2017 set_parameters(tb, h, 0, 0, 1, NULL, -1, -1); dc_check_balance_leaf()
2026 * tb tree_balance structure;
2035 static int dc_check_balance(struct tree_balance *tb, int h) dc_check_balance() argument
2037 RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)), dc_check_balance()
2041 return dc_check_balance_internal(tb, h); dc_check_balance()
2043 return dc_check_balance_leaf(tb, h); dc_check_balance()
2051 * tb tree_balance structure:
2053 * tb is a large structure that must be read about in the header
2066 struct tree_balance *tb, check_balance()
2074 vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf); check_balance()
2075 vn->vn_free_ptr = (char *)(tb->tb_vn + 1); check_balance()
2086 if (tb->insert_size[h] > 0) check_balance()
2087 return ip_check_balance(tb, h); check_balance()
2090 return dc_check_balance(tb, h); check_balance()
2094 static int get_direct_parent(struct tree_balance *tb, int h) get_direct_parent() argument
2097 struct treepath *path = tb->tb_path; get_direct_parent()
2099 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h); get_direct_parent()
2108 b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) { get_direct_parent()
2134 int depth = reiserfs_write_unlock_nested(tb->tb_sb); get_direct_parent()
2136 reiserfs_write_lock_nested(tb->tb_sb, depth); get_direct_parent()
2137 if (FILESYSTEM_CHANGED_TB(tb)) get_direct_parent()
2155 static int get_neighbors(struct tree_balance *tb, int h) get_neighbors() argument
2158 path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1); get_neighbors()
2160 struct super_block *sb = tb->tb_sb; get_neighbors()
2166 if (tb->lnum[h]) { get_neighbors()
2169 bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset); get_neighbors()
2171 RFALSE(bh == tb->FL[h] && get_neighbors()
2172 !PATH_OFFSET_POSITION(tb->tb_path, path_offset), get_neighbors()
2177 tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb-> get_neighbors()
2179 son_number = B_N_CHILD_NUM(tb->FL[h], child_position); get_neighbors()
2180 depth = reiserfs_write_unlock_nested(tb->tb_sb); get_neighbors()
2182 reiserfs_write_lock_nested(tb->tb_sb, depth); get_neighbors()
2185 if (FILESYSTEM_CHANGED_TB(tb)) { get_neighbors()
2191 RFALSE(!B_IS_IN_TREE(tb->FL[h]) || get_neighbors()
2192 child_position > B_NR_ITEMS(tb->FL[h]) || get_neighbors()
2193 B_N_CHILD_NUM(tb->FL[h], child_position) != get_neighbors()
2199 dc_size(B_N_CHILD(tb->FL[0], child_position)), get_neighbors()
2202 brelse(tb->L[h]); get_neighbors()
2203 tb->L[h] = bh; get_neighbors()
2207 if (tb->rnum[h]) { get_neighbors()
2209 bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset); get_neighbors()
2211 RFALSE(bh == tb->FR[h] && get_neighbors()
2212 PATH_OFFSET_POSITION(tb->tb_path, get_neighbors()
2218 (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0; get_neighbors()
2219 son_number = B_N_CHILD_NUM(tb->FR[h], child_position); get_neighbors()
2220 depth = reiserfs_write_unlock_nested(tb->tb_sb); get_neighbors()
2222 reiserfs_write_lock_nested(tb->tb_sb, depth); get_neighbors()
2225 if (FILESYSTEM_CHANGED_TB(tb)) { get_neighbors()
2230 brelse(tb->R[h]); get_neighbors()
2231 tb->R[h] = bh; get_neighbors()
2236 dc_size(B_N_CHILD(tb->FR[0], child_position)), get_neighbors()
2239 dc_size(B_N_CHILD(tb->FR[0], child_position))); get_neighbors()
2268 static int get_mem_for_virtual_node(struct tree_balance *tb) get_mem_for_virtual_node() argument
2274 size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path)); get_mem_for_virtual_node()
2277 if (size > tb->vn_buf_size) { get_mem_for_virtual_node()
2278 if (tb->vn_buf) { get_mem_for_virtual_node()
2280 kfree(tb->vn_buf); get_mem_for_virtual_node()
2286 tb->vn_buf_size = size; get_mem_for_virtual_node()
2297 free_buffers_in_tb(tb); get_mem_for_virtual_node()
2300 tb->vn_buf_size = 0; get_mem_for_virtual_node()
2302 tb->vn_buf = buf; get_mem_for_virtual_node()
2307 tb->vn_buf = buf; get_mem_for_virtual_node()
2310 if (check_fs && FILESYSTEM_CHANGED_TB(tb)) get_mem_for_virtual_node()
2367 static int wait_tb_buffers_until_unlocked(struct tree_balance *tb) wait_tb_buffers_until_unlocked() argument
2379 for (i = tb->tb_path->path_length; wait_tb_buffers_until_unlocked()
2381 if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) { wait_tb_buffers_until_unlocked()
2388 if (PATH_PLAST_BUFFER(tb->tb_path) == wait_tb_buffers_until_unlocked()
2389 PATH_OFFSET_PBUFFER(tb->tb_path, i)) wait_tb_buffers_until_unlocked()
2390 tb_buffer_sanity_check(tb->tb_sb, wait_tb_buffers_until_unlocked()
2392 (tb->tb_path, wait_tb_buffers_until_unlocked()
2394 tb->tb_path-> wait_tb_buffers_until_unlocked()
2397 if (!clear_all_dirty_bits(tb->tb_sb, wait_tb_buffers_until_unlocked()
2399 (tb->tb_path, wait_tb_buffers_until_unlocked()
2402 PATH_OFFSET_PBUFFER(tb->tb_path, wait_tb_buffers_until_unlocked()
2408 for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i]; wait_tb_buffers_until_unlocked()
2411 if (tb->lnum[i]) { wait_tb_buffers_until_unlocked()
2413 if (tb->L[i]) { wait_tb_buffers_until_unlocked()
2414 tb_buffer_sanity_check(tb->tb_sb, wait_tb_buffers_until_unlocked()
2415 tb->L[i], wait_tb_buffers_until_unlocked()
2418 (tb->tb_sb, tb->L[i])) wait_tb_buffers_until_unlocked()
2419 locked = tb->L[i]; wait_tb_buffers_until_unlocked()
2422 if (!locked && tb->FL[i]) { wait_tb_buffers_until_unlocked()
2423 tb_buffer_sanity_check(tb->tb_sb, wait_tb_buffers_until_unlocked()
2424 tb->FL[i], wait_tb_buffers_until_unlocked()
2427 (tb->tb_sb, tb->FL[i])) wait_tb_buffers_until_unlocked()
2428 locked = tb->FL[i]; wait_tb_buffers_until_unlocked()
2431 if (!locked && tb->CFL[i]) { wait_tb_buffers_until_unlocked()
2432 tb_buffer_sanity_check(tb->tb_sb, wait_tb_buffers_until_unlocked()
2433 tb->CFL[i], wait_tb_buffers_until_unlocked()
2436 (tb->tb_sb, tb->CFL[i])) wait_tb_buffers_until_unlocked()
2437 locked = tb->CFL[i]; wait_tb_buffers_until_unlocked()
2442 if (!locked && (tb->rnum[i])) { wait_tb_buffers_until_unlocked()
2444 if (tb->R[i]) { wait_tb_buffers_until_unlocked()
2445 tb_buffer_sanity_check(tb->tb_sb, wait_tb_buffers_until_unlocked()
2446 tb->R[i], wait_tb_buffers_until_unlocked()
2449 (tb->tb_sb, tb->R[i])) wait_tb_buffers_until_unlocked()
2450 locked = tb->R[i]; wait_tb_buffers_until_unlocked()
2453 if (!locked && tb->FR[i]) { wait_tb_buffers_until_unlocked()
2454 tb_buffer_sanity_check(tb->tb_sb, wait_tb_buffers_until_unlocked()
2455 tb->FR[i], wait_tb_buffers_until_unlocked()
2458 (tb->tb_sb, tb->FR[i])) wait_tb_buffers_until_unlocked()
2459 locked = tb->FR[i]; wait_tb_buffers_until_unlocked()
2462 if (!locked && tb->CFR[i]) { wait_tb_buffers_until_unlocked()
2463 tb_buffer_sanity_check(tb->tb_sb, wait_tb_buffers_until_unlocked()
2464 tb->CFR[i], wait_tb_buffers_until_unlocked()
2467 (tb->tb_sb, tb->CFR[i])) wait_tb_buffers_until_unlocked()
2468 locked = tb->CFR[i]; wait_tb_buffers_until_unlocked()
2483 if (tb->FEB[i]) { wait_tb_buffers_until_unlocked()
2485 (tb->tb_sb, tb->FEB[i])) wait_tb_buffers_until_unlocked()
2486 locked = tb->FEB[i]; wait_tb_buffers_until_unlocked()
2495 reiserfs_warning(tb->tb_sb, "reiserfs-8200", wait_tb_buffers_until_unlocked()
2502 return (FILESYSTEM_CHANGED_TB(tb)) ? wait_tb_buffers_until_unlocked()
2506 depth = reiserfs_write_unlock_nested(tb->tb_sb); wait_tb_buffers_until_unlocked()
2508 reiserfs_write_lock_nested(tb->tb_sb, depth); wait_tb_buffers_until_unlocked()
2509 if (FILESYSTEM_CHANGED_TB(tb)) wait_tb_buffers_until_unlocked()
2539 * tb tree_balance structure;
2549 int fix_nodes(int op_mode, struct tree_balance *tb, fix_nodes() argument
2552 int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path); fix_nodes()
2560 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); fix_nodes()
2562 ++REISERFS_SB(tb->tb_sb)->s_fix_nodes; fix_nodes()
2564 pos_in_item = tb->tb_path->pos_in_item; fix_nodes()
2566 tb->fs_gen = get_generation(tb->tb_sb); fix_nodes()
2574 reiserfs_prepare_for_journal(tb->tb_sb, fix_nodes()
2575 SB_BUFFER_WITH_SB(tb->tb_sb), 1); fix_nodes()
2576 journal_mark_dirty(tb->transaction_handle, fix_nodes()
2577 SB_BUFFER_WITH_SB(tb->tb_sb)); fix_nodes()
2578 if (FILESYSTEM_CHANGED_TB(tb)) fix_nodes()
2583 int depth = reiserfs_write_unlock_nested(tb->tb_sb); fix_nodes()
2585 reiserfs_write_lock_nested(tb->tb_sb, depth); fix_nodes()
2586 if (FILESYSTEM_CHANGED_TB(tb)) fix_nodes()
2590 if (REISERFS_SB(tb->tb_sb)->cur_tb) { fix_nodes()
2592 reiserfs_panic(tb->tb_sb, "PAP-8305", fix_nodes()
2597 reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is " fix_nodes()
2606 reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect " fix_nodes()
2616 reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect " fix_nodes()
2620 tb->insert_size[0]); fix_nodes()
2624 reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode " fix_nodes()
2629 if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH) fix_nodes()
2630 /* FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat */ fix_nodes()
2634 for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) { fix_nodes()
2635 ret = get_direct_parent(tb, h); fix_nodes()
2639 ret = check_balance(op_mode, tb, h, item_num, fix_nodes()
2644 ret = get_neighbors(tb, h); fix_nodes()
2648 tb->insert_size[h + 1] = 0; fix_nodes()
2658 ret = get_neighbors(tb, h); fix_nodes()
2666 ret = get_empty_nodes(tb, h); fix_nodes()
2674 if (!PATH_H_PBUFFER(tb->tb_path, h)) { fix_nodes()
2676 RFALSE(tb->blknum[h] != 1, fix_nodes()
2680 tb->insert_size[h + 1] = 0; fix_nodes()
2681 } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) { fix_nodes()
2688 if (tb->blknum[h] > 1) { fix_nodes()
2693 tb->insert_size[h + 1] = fix_nodes()
2695 KEY_SIZE) * (tb->blknum[h] - 1) + fix_nodes()
2698 tb->insert_size[h + 1] = 0; fix_nodes()
2700 tb->insert_size[h + 1] = fix_nodes()
2701 (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1); fix_nodes()
2704 ret = wait_tb_buffers_until_unlocked(tb); fix_nodes()
2706 if (FILESYSTEM_CHANGED_TB(tb)) { fix_nodes()
2731 pathrelse_and_restore(tb->tb_sb, tb->tb_path); fix_nodes()
2733 pathrelse(tb->tb_path); fix_nodes()
2738 reiserfs_restore_prepared_buffer(tb->tb_sb, fix_nodes()
2739 tb->L[i]); fix_nodes()
2740 reiserfs_restore_prepared_buffer(tb->tb_sb, fix_nodes()
2741 tb->R[i]); fix_nodes()
2742 reiserfs_restore_prepared_buffer(tb->tb_sb, fix_nodes()
2743 tb->FL[i]); fix_nodes()
2744 reiserfs_restore_prepared_buffer(tb->tb_sb, fix_nodes()
2745 tb->FR[i]); fix_nodes()
2746 reiserfs_restore_prepared_buffer(tb->tb_sb, fix_nodes()
2747 tb-> fix_nodes()
2749 reiserfs_restore_prepared_buffer(tb->tb_sb, fix_nodes()
2750 tb-> fix_nodes()
2754 brelse(tb->L[i]); fix_nodes()
2755 brelse(tb->R[i]); fix_nodes()
2756 brelse(tb->FL[i]); fix_nodes()
2757 brelse(tb->FR[i]); fix_nodes()
2758 brelse(tb->CFL[i]); fix_nodes()
2759 brelse(tb->CFR[i]); fix_nodes()
2761 tb->L[i] = NULL; fix_nodes()
2762 tb->R[i] = NULL; fix_nodes()
2763 tb->FL[i] = NULL; fix_nodes()
2764 tb->FR[i] = NULL; fix_nodes()
2765 tb->CFL[i] = NULL; fix_nodes()
2766 tb->CFR[i] = NULL; fix_nodes()
2771 if (tb->FEB[i]) fix_nodes()
2773 (tb->tb_sb, tb->FEB[i]); fix_nodes()
2781 void unfix_nodes(struct tree_balance *tb) unfix_nodes() argument
2786 pathrelse_and_restore(tb->tb_sb, tb->tb_path); unfix_nodes()
2790 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->L[i]); unfix_nodes()
2791 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->R[i]); unfix_nodes()
2792 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FL[i]); unfix_nodes()
2793 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FR[i]); unfix_nodes()
2794 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFL[i]); unfix_nodes()
2795 reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFR[i]); unfix_nodes()
2797 brelse(tb->L[i]); unfix_nodes()
2798 brelse(tb->R[i]); unfix_nodes()
2799 brelse(tb->FL[i]); unfix_nodes()
2800 brelse(tb->FR[i]); unfix_nodes()
2801 brelse(tb->CFL[i]); unfix_nodes()
2802 brelse(tb->CFR[i]); unfix_nodes()
2807 if (tb->FEB[i]) { unfix_nodes()
2808 b_blocknr_t blocknr = tb->FEB[i]->b_blocknr; unfix_nodes()
2813 brelse(tb->FEB[i]); unfix_nodes()
2814 reiserfs_free_block(tb->transaction_handle, NULL, unfix_nodes()
2817 if (tb->used[i]) { unfix_nodes()
2819 brelse(tb->used[i]); unfix_nodes()
2823 kfree(tb->vn_buf); unfix_nodes()
1275 can_node_be_removed(int mode, int lfree, int sfree, int rfree, struct tree_balance *tb, int h) can_node_be_removed() argument
2065 check_balance(int mode, struct tree_balance *tb, int h, int inum, int pos_in_item, struct item_head *ins_ih, const void *data) check_balance() argument
H A Dprints.c622 void store_print_tb(struct tree_balance *tb) store_print_tb() argument
628 if (!tb) store_print_tb()
636 REISERFS_SB(tb->tb_sb)->s_do_balance, store_print_tb()
637 tb->tb_mode, PATH_LAST_POSITION(tb->tb_path), store_print_tb()
638 tb->tb_path->pos_in_item); store_print_tb()
640 for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) { store_print_tb()
641 if (PATH_H_PATH_OFFSET(tb->tb_path, h) <= store_print_tb()
642 tb->tb_path->path_length store_print_tb()
643 && PATH_H_PATH_OFFSET(tb->tb_path, store_print_tb()
645 tbSh = PATH_H_PBUFFER(tb->tb_path, h); store_print_tb()
646 tbFh = PATH_H_PPARENT(tb->tb_path, h); store_print_tb()
656 (tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL), store_print_tb()
657 (tb->L[h]) ? atomic_read(&tb->L[h]->b_count) : -1, store_print_tb()
658 (tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL), store_print_tb()
659 (tb->R[h]) ? atomic_read(&tb->R[h]->b_count) : -1, store_print_tb()
661 (tb->FL[h]) ? (long long)(tb->FL[h]-> store_print_tb()
663 (tb->FR[h]) ? (long long)(tb->FR[h]-> store_print_tb()
665 (tb->CFL[h]) ? (long long)(tb->CFL[h]-> store_print_tb()
667 (tb->CFR[h]) ? (long long)(tb->CFR[h]-> store_print_tb()
675 tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0], store_print_tb()
676 tb->rbytes, tb->blknum[0], tb->s0num, tb->snum[0], store_print_tb()
677 tb->sbytes[0], tb->snum[1], tb->sbytes[1], store_print_tb()
678 tb->cur_blknum, tb->lkey[0], tb->rkey[0]); store_print_tb()
686 h, tb->insert_size[h], tb->lnum[h], tb->rnum[h], store_print_tb()
687 tb->blknum[h]); store_print_tb()
688 } while (tb->insert_size[h]); store_print_tb()
696 for (i = 0; i < ARRAY_SIZE(tb->FEB); i++) store_print_tb()
698 "%p (%llu %d)%s", tb->FEB[i], store_print_tb()
699 tb->FEB[i] ? (unsigned long long)tb->FEB[i]-> store_print_tb()
701 tb->FEB[i] ? atomic_read(&tb->FEB[i]->b_count) : 0, store_print_tb()
702 (i == ARRAY_SIZE(tb->FEB) - 1) ? "\n" : ", "); store_print_tb()
H A Dlbalance.c392 do_balance_mark_leaf_dirty(dest_bi->tb, dest, 0); leaf_copy_items_entirely()
405 do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent, leaf_copy_items_entirely()
629 static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb, leaf_define_dest_src_infos() argument
641 src_bi->tb = tb; leaf_define_dest_src_infos()
642 src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path); leaf_define_dest_src_infos()
643 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0); leaf_define_dest_src_infos()
646 src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0); leaf_define_dest_src_infos()
647 dest_bi->tb = tb; leaf_define_dest_src_infos()
648 dest_bi->bi_bh = tb->L[0]; leaf_define_dest_src_infos()
649 dest_bi->bi_parent = tb->FL[0]; leaf_define_dest_src_infos()
650 dest_bi->bi_position = get_left_neighbor_position(tb, 0); leaf_define_dest_src_infos()
655 src_bi->tb = tb; leaf_define_dest_src_infos()
656 src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path); leaf_define_dest_src_infos()
657 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0); leaf_define_dest_src_infos()
658 src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0); leaf_define_dest_src_infos()
659 dest_bi->tb = tb; leaf_define_dest_src_infos()
660 dest_bi->bi_bh = tb->R[0]; leaf_define_dest_src_infos()
661 dest_bi->bi_parent = tb->FR[0]; leaf_define_dest_src_infos()
662 dest_bi->bi_position = get_right_neighbor_position(tb, 0); leaf_define_dest_src_infos()
667 src_bi->tb = tb; leaf_define_dest_src_infos()
668 src_bi->bi_bh = tb->R[0]; leaf_define_dest_src_infos()
669 src_bi->bi_parent = tb->FR[0]; leaf_define_dest_src_infos()
670 src_bi->bi_position = get_right_neighbor_position(tb, 0); leaf_define_dest_src_infos()
671 dest_bi->tb = tb; leaf_define_dest_src_infos()
672 dest_bi->bi_bh = tb->L[0]; leaf_define_dest_src_infos()
673 dest_bi->bi_parent = tb->FL[0]; leaf_define_dest_src_infos()
674 dest_bi->bi_position = get_left_neighbor_position(tb, 0); leaf_define_dest_src_infos()
679 src_bi->tb = tb; leaf_define_dest_src_infos()
680 src_bi->bi_bh = tb->L[0]; leaf_define_dest_src_infos()
681 src_bi->bi_parent = tb->FL[0]; leaf_define_dest_src_infos()
682 src_bi->bi_position = get_left_neighbor_position(tb, 0); leaf_define_dest_src_infos()
683 dest_bi->tb = tb; leaf_define_dest_src_infos()
684 dest_bi->bi_bh = tb->R[0]; leaf_define_dest_src_infos()
685 dest_bi->bi_parent = tb->FR[0]; leaf_define_dest_src_infos()
686 dest_bi->bi_position = get_right_neighbor_position(tb, 0); leaf_define_dest_src_infos()
691 src_bi->tb = tb; leaf_define_dest_src_infos()
692 src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path); leaf_define_dest_src_infos()
693 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0); leaf_define_dest_src_infos()
694 src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0); leaf_define_dest_src_infos()
695 dest_bi->tb = tb; leaf_define_dest_src_infos()
715 int leaf_move_items(int shift_mode, struct tree_balance *tb, int mov_num, leaf_move_items() argument
722 leaf_define_dest_src_infos(shift_mode, tb, &dest_bi, &src_bi, leaf_move_items()
741 int leaf_shift_left(struct tree_balance *tb, int shift_num, int shift_bytes) leaf_shift_left() argument
743 struct buffer_head *S0 = PATH_PLAST_BUFFER(tb->tb_path); leaf_shift_left()
750 i = leaf_move_items(LEAF_FROM_S_TO_L, tb, shift_num, shift_bytes, NULL); leaf_shift_left()
760 if (tb->tb_mode == M_PASTE || tb->tb_mode == M_INSERT) { leaf_shift_left()
762 reiserfs_panic(tb->tb_sb, "vs-10275", leaf_shift_left()
764 "(%c)", tb->tb_mode); leaf_shift_left()
768 if (PATH_H_POSITION(tb->tb_path, 1) == 0) leaf_shift_left()
769 replace_key(tb, tb->CFL[0], tb->lkey[0], leaf_shift_left()
770 PATH_H_PPARENT(tb->tb_path, 0), 0); leaf_shift_left()
774 replace_key(tb, tb->CFL[0], tb->lkey[0], S0, 0); leaf_shift_left()
794 int leaf_shift_right(struct tree_balance *tb, int shift_num, int shift_bytes) leaf_shift_right() argument
803 leaf_move_items(LEAF_FROM_S_TO_R, tb, shift_num, shift_bytes, NULL); leaf_shift_right()
807 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); leaf_shift_right()
847 do_balance_mark_leaf_dirty(cur_bi->tb, bh, 0); leaf_delete_items()
962 do_balance_mark_leaf_dirty(bi->tb, bh, 1); leaf_insert_into_buf()
970 do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0); leaf_insert_into_buf()
1001 if (bi && bi->tb) leaf_paste_in_buffer()
1002 sb = bi->tb->tb_sb; leaf_paste_in_buffer()
1056 do_balance_mark_leaf_dirty(bi->tb, bh, 0); leaf_paste_in_buffer()
1062 do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0); leaf_paste_in_buffer()
1229 do_balance_mark_leaf_dirty(bi->tb, bh, 0); leaf_cut_from_buffer()
1235 do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0); leaf_cut_from_buffer()
1267 do_balance_mark_leaf_dirty(bi->tb, bh, 0); leaf_delete_items_entirely()
1298 do_balance_mark_leaf_dirty(bi->tb, bh, 0); leaf_delete_items_entirely()
1306 do_balance_mark_internal_dirty(bi->tb, bi->bi_parent, 0); leaf_delete_items_entirely()
/linux-4.1.27/drivers/thunderbolt/
H A Dtb.c11 #include "tb.h"
52 sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port)); tb_scan_port()
63 static void tb_free_invalid_tunnels(struct tb *tb) tb_free_invalid_tunnels() argument
67 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) tb_free_invalid_tunnels()
143 static void tb_activate_pcie_devices(struct tb *tb) tb_activate_pcie_devices() argument
153 for (i = 1; i <= tb->root_switch->config.max_port_number; i++) { tb_activate_pcie_devices()
154 if (tb_is_upstream_port(&tb->root_switch->ports[i])) tb_activate_pcie_devices()
156 if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT) tb_activate_pcie_devices()
158 if (!tb->root_switch->ports[i].remote) tb_activate_pcie_devices()
160 sw = tb->root_switch->ports[i].remote->sw; tb_activate_pcie_devices()
179 down_port = tb_find_unused_down_port(tb->root_switch); tb_activate_pcie_devices()
185 tunnel = tb_pci_alloc(tb, up_port, down_port); tb_activate_pcie_devices()
205 struct tb *tb; member in struct:tb_hotplug_event
214 * Executes on tb->wq.
219 struct tb *tb = ev->tb; tb_handle_hotplug() local
222 mutex_lock(&tb->lock); tb_handle_hotplug()
223 if (!tb->hotplug_active) tb_handle_hotplug()
226 sw = get_switch_at_route(tb->root_switch, ev->route); tb_handle_hotplug()
228 tb_warn(tb, tb_handle_hotplug()
234 tb_warn(tb, tb_handle_hotplug()
241 tb_warn(tb, tb_handle_hotplug()
250 tb_free_invalid_tunnels(tb); tb_handle_hotplug()
271 tb_activate_pcie_devices(tb); tb_handle_hotplug()
275 mutex_unlock(&tb->lock); tb_handle_hotplug()
287 struct tb *tb = data; tb_schedule_hotplug_handler() local
292 ev->tb = tb; tb_schedule_hotplug_handler()
296 queue_work(tb->wq, &ev->work); tb_schedule_hotplug_handler()
306 void thunderbolt_shutdown_and_free(struct tb *tb) thunderbolt_shutdown_and_free() argument
311 mutex_lock(&tb->lock); thunderbolt_shutdown_and_free()
314 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) { thunderbolt_shutdown_and_free()
319 if (tb->root_switch) thunderbolt_shutdown_and_free()
320 tb_switch_free(tb->root_switch); thunderbolt_shutdown_and_free()
321 tb->root_switch = NULL; thunderbolt_shutdown_and_free()
323 if (tb->ctl) { thunderbolt_shutdown_and_free()
324 tb_ctl_stop(tb->ctl); thunderbolt_shutdown_and_free()
325 tb_ctl_free(tb->ctl); thunderbolt_shutdown_and_free()
327 tb->ctl = NULL; thunderbolt_shutdown_and_free()
328 tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */ thunderbolt_shutdown_and_free()
331 mutex_unlock(&tb->lock); thunderbolt_shutdown_and_free()
332 if (tb->wq) { thunderbolt_shutdown_and_free()
333 flush_workqueue(tb->wq); thunderbolt_shutdown_and_free()
334 destroy_workqueue(tb->wq); thunderbolt_shutdown_and_free()
335 tb->wq = NULL; thunderbolt_shutdown_and_free()
337 mutex_destroy(&tb->lock); thunderbolt_shutdown_and_free()
338 kfree(tb); thunderbolt_shutdown_and_free()
349 struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi) thunderbolt_alloc_and_start()
351 struct tb *tb; thunderbolt_alloc_and_start() local
357 tb = kzalloc(sizeof(*tb), GFP_KERNEL); thunderbolt_alloc_and_start()
358 if (!tb) thunderbolt_alloc_and_start()
361 tb->nhi = nhi; thunderbolt_alloc_and_start()
362 mutex_init(&tb->lock); thunderbolt_alloc_and_start()
363 mutex_lock(&tb->lock); thunderbolt_alloc_and_start()
364 INIT_LIST_HEAD(&tb->tunnel_list); thunderbolt_alloc_and_start()
366 tb->wq = alloc_ordered_workqueue("thunderbolt", 0); thunderbolt_alloc_and_start()
367 if (!tb->wq) thunderbolt_alloc_and_start()
370 tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb); thunderbolt_alloc_and_start()
371 if (!tb->ctl) thunderbolt_alloc_and_start()
377 tb_ctl_start(tb->ctl); thunderbolt_alloc_and_start()
379 tb->root_switch = tb_switch_alloc(tb, 0); thunderbolt_alloc_and_start()
380 if (!tb->root_switch) thunderbolt_alloc_and_start()
384 tb_scan_switch(tb->root_switch); thunderbolt_alloc_and_start()
385 tb_activate_pcie_devices(tb); thunderbolt_alloc_and_start()
388 tb->hotplug_active = true; thunderbolt_alloc_and_start()
389 mutex_unlock(&tb->lock); thunderbolt_alloc_and_start()
390 return tb; thunderbolt_alloc_and_start()
393 mutex_unlock(&tb->lock); thunderbolt_alloc_and_start()
394 thunderbolt_shutdown_and_free(tb); thunderbolt_alloc_and_start()
398 void thunderbolt_suspend(struct tb *tb) thunderbolt_suspend() argument
400 tb_info(tb, "suspending...\n"); thunderbolt_suspend()
401 mutex_lock(&tb->lock); thunderbolt_suspend()
402 tb_switch_suspend(tb->root_switch); thunderbolt_suspend()
403 tb_ctl_stop(tb->ctl); thunderbolt_suspend()
404 tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */ thunderbolt_suspend()
405 mutex_unlock(&tb->lock); thunderbolt_suspend()
406 tb_info(tb, "suspend finished\n"); thunderbolt_suspend()
409 void thunderbolt_resume(struct tb *tb) thunderbolt_resume() argument
412 tb_info(tb, "resuming...\n"); thunderbolt_resume()
413 mutex_lock(&tb->lock); thunderbolt_resume()
414 tb_ctl_start(tb->ctl); thunderbolt_resume()
417 tb_switch_reset(tb, 0); thunderbolt_resume()
419 tb_switch_resume(tb->root_switch); thunderbolt_resume()
420 tb_free_invalid_tunnels(tb); thunderbolt_resume()
421 tb_free_unplugged_children(tb->root_switch); thunderbolt_resume()
422 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) thunderbolt_resume()
424 if (!list_empty(&tb->tunnel_list)) { thunderbolt_resume()
429 tb_info(tb, "tunnels restarted, sleeping for 100ms\n"); thunderbolt_resume()
433 tb->hotplug_active = true; thunderbolt_resume()
434 mutex_unlock(&tb->lock); thunderbolt_resume()
435 tb_info(tb, "resume finished\n"); thunderbolt_resume()
H A DMakefile2 thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
H A Dtunnel_pci.h10 #include "tb.h"
13 struct tb *tb; member in struct:tb_pci_tunnel
21 struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
H A Dtb.h21 struct tb *tb; member in struct:tb_switch
80 struct tb *tb; member in struct:tb_path
97 * struct tb - main thunderbolt bus structure
99 struct tb { struct
143 return tb_cfg_read(sw->tb->ctl, tb_sw_read()
155 return tb_cfg_write(sw->tb->ctl, tb_sw_write()
167 return tb_cfg_read(port->sw->tb->ctl, tb_port_read()
179 return tb_cfg_write(port->sw->tb->ctl, tb_port_write()
188 #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
189 #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
190 #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
191 #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
197 level(__sw->tb, "%llx: " fmt, \
208 level(__port->sw->tb, "%llx:%x: " fmt, \
219 struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
220 void thunderbolt_shutdown_and_free(struct tb *tb);
221 void thunderbolt_suspend(struct tb *tb);
222 void thunderbolt_resume(struct tb *tb);
224 struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route);
228 int tb_switch_reset(struct tb *tb, u64 route);
238 struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
H A Dswitch.c10 #include "tb.h"
43 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) tb_dump_port() argument
45 tb_info(tb, tb_dump_port()
50 tb_info(tb, " Max hop id (in/out): %d/%d\n", tb_dump_port()
52 tb_info(tb, " Max counters: %d\n", port->max_counters); tb_dump_port()
53 tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits); tb_dump_port()
203 tb_dump_port(port->sw->tb, &port->config); tb_init_port()
212 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) tb_dump_switch() argument
214 tb_info(tb, tb_dump_switch()
218 tb_info(tb, " Max Port Number: %d\n", sw->max_port_number); tb_dump_switch()
219 tb_info(tb, " Config:\n"); tb_dump_switch()
220 tb_info(tb, tb_dump_switch()
225 tb_info(tb, tb_dump_switch()
235 int tb_switch_reset(struct tb *tb, u64 route) tb_switch_reset() argument
243 tb_info(tb, "resetting switch at %llx\n", route); tb_switch_reset()
244 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, tb_switch_reset()
248 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); tb_switch_reset()
339 struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route) tb_switch_alloc() argument
344 int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); tb_switch_alloc()
352 sw->tb = tb; tb_switch_alloc()
353 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5)) tb_switch_alloc()
355 tb_info(tb, tb_switch_alloc()
358 tb_info(tb, "old switch config:\n"); tb_switch_alloc()
359 tb_dump_switch(tb, &sw->config); tb_switch_alloc()
433 if (sw == sw->tb->root_switch) { tb_sw_set_unpplugged()
H A Dpath.c10 #include "tb.h"
35 struct tb_path *tb_path_alloc(struct tb *tb, int num_hops) tb_path_alloc() argument
45 path->tb = tb; tb_path_alloc()
56 tb_WARN(path->tb, "trying to free an activated path\n") tb_path_free()
93 tb_WARN(path->tb, "trying to deactivate an inactive path\n"); tb_path_deactivate()
96 tb_info(path->tb, tb_path_deactivate()
120 tb_WARN(path->tb, "trying to activate already activated path\n"); tb_path_activate()
124 tb_info(path->tb, tb_path_activate()
212 tb_info(path->tb, "path activation complete\n"); tb_path_activate()
215 tb_WARN(path->tb, "path activation failed\n"); tb_path_activate()
H A Dtunnel_pci.c11 #include "tb.h"
16 level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \
58 struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up, tb_pci_alloc() argument
64 tunnel->tb = tb; tb_pci_alloc()
68 tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2); tb_pci_alloc()
71 tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2); tb_pci_alloc()
208 list_add(&tunnel->list, &tunnel->tb->tunnel_list); tb_pci_activate()
H A Dnhi.c20 #include "tb.h"
499 struct tb *tb = pci_get_drvdata(pdev); nhi_suspend_noirq() local
500 thunderbolt_suspend(tb); nhi_suspend_noirq()
507 struct tb *tb = pci_get_drvdata(pdev); nhi_resume_noirq() local
508 thunderbolt_resume(tb); nhi_resume_noirq()
538 struct tb *tb; nhi_probe() local
596 tb = thunderbolt_alloc_and_start(nhi); nhi_probe()
597 if (!tb) { nhi_probe()
605 pci_set_drvdata(pdev, tb); nhi_probe()
612 struct tb *tb = pci_get_drvdata(pdev); nhi_remove() local
613 struct tb_nhi *nhi = tb->nhi; nhi_remove()
614 thunderbolt_shutdown_and_free(tb); nhi_remove()
H A Dctl.h51 int err; /* negative errors, 0 for success, 1 for tb errors */
H A Dcap.c10 #include "tb.h"
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Dtrace.c17 struct trace_buffer *tb; trace_buffer_allocate() local
19 if (size < sizeof(*tb)) { trace_buffer_allocate()
24 tb = mmap(NULL, size, PROT_READ | PROT_WRITE, trace_buffer_allocate()
26 if (tb == MAP_FAILED) { trace_buffer_allocate()
31 tb->size = size; trace_buffer_allocate()
32 tb->tail = tb->data; trace_buffer_allocate()
33 tb->overflow = false; trace_buffer_allocate()
35 return tb; trace_buffer_allocate()
38 static bool trace_check_bounds(struct trace_buffer *tb, void *p) trace_check_bounds() argument
40 return p < ((void *)tb + tb->size); trace_check_bounds()
43 static bool trace_check_alloc(struct trace_buffer *tb, void *p) trace_check_alloc() argument
51 if (tb->overflow) trace_check_alloc()
54 if (!trace_check_bounds(tb, p)) { trace_check_alloc()
55 tb->overflow = true; trace_check_alloc()
62 static void *trace_alloc(struct trace_buffer *tb, int bytes) trace_alloc() argument
66 p = tb->tail; trace_alloc()
67 newtail = tb->tail + bytes; trace_alloc()
68 if (!trace_check_alloc(tb, newtail)) trace_alloc()
71 tb->tail = newtail; trace_alloc()
76 static struct trace_entry *trace_alloc_entry(struct trace_buffer *tb, int payload_size) trace_alloc_entry() argument
80 e = trace_alloc(tb, sizeof(*e) + payload_size); trace_alloc_entry()
87 int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value) trace_log_reg() argument
92 e = trace_alloc_entry(tb, sizeof(reg) + sizeof(value)); trace_log_reg()
104 int trace_log_counter(struct trace_buffer *tb, u64 value) trace_log_counter() argument
109 e = trace_alloc_entry(tb, sizeof(value)); trace_log_counter()
120 int trace_log_string(struct trace_buffer *tb, char *str) trace_log_string() argument
129 e = trace_alloc_entry(tb, len + 1); trace_log_string()
142 int trace_log_indent(struct trace_buffer *tb) trace_log_indent() argument
146 e = trace_alloc_entry(tb, 0); trace_log_indent()
155 int trace_log_outdent(struct trace_buffer *tb) trace_log_outdent() argument
159 e = trace_alloc_entry(tb, 0); trace_log_outdent()
269 void trace_buffer_print(struct trace_buffer *tb) trace_buffer_print() argument
276 printf(" address %p \n", tb); trace_buffer_print()
277 printf(" tail %p\n", tb->tail); trace_buffer_print()
278 printf(" size %llu\n", tb->size); trace_buffer_print()
279 printf(" overflow %s\n", tb->overflow ? "TRUE" : "false"); trace_buffer_print()
282 p = tb->data; trace_buffer_print()
287 while (trace_check_bounds(tb, p) && p < tb->tail) { trace_buffer_print()
297 void trace_print_location(struct trace_buffer *tb) trace_print_location() argument
299 printf("Trace buffer 0x%llx bytes @ %p\n", tb->size, tb); trace_print_location()
H A Dtrace.h33 int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value);
34 int trace_log_counter(struct trace_buffer *tb, u64 value);
35 int trace_log_string(struct trace_buffer *tb, char *str);
36 int trace_log_indent(struct trace_buffer *tb);
37 int trace_log_outdent(struct trace_buffer *tb);
38 void trace_buffer_print(struct trace_buffer *tb);
39 void trace_print_location(struct trace_buffer *tb);
/linux-4.1.27/drivers/iio/common/st_sensors/
H A Dst_sensors_spi.c29 static int st_sensors_spi_read(struct st_sensor_transfer_buffer *tb, st_sensors_spi_read() argument
36 .tx_buf = tb->tx_buf, st_sensors_spi_read()
41 .rx_buf = tb->rx_buf, st_sensors_spi_read()
47 mutex_lock(&tb->buf_lock); st_sensors_spi_read()
49 tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_MULTIREAD; st_sensors_spi_read()
51 tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_READ; st_sensors_spi_read()
57 memcpy(data, tb->rx_buf, len); st_sensors_spi_read()
58 mutex_unlock(&tb->buf_lock); st_sensors_spi_read()
62 mutex_unlock(&tb->buf_lock); st_sensors_spi_read()
66 static int st_sensors_spi_read_byte(struct st_sensor_transfer_buffer *tb, st_sensors_spi_read_byte() argument
69 return st_sensors_spi_read(tb, dev, reg_addr, 1, res_byte, false); st_sensors_spi_read_byte()
73 struct st_sensor_transfer_buffer *tb, struct device *dev, st_sensors_spi_read_multiple_byte()
76 return st_sensors_spi_read(tb, dev, reg_addr, len, data, multiread_bit); st_sensors_spi_read_multiple_byte()
79 static int st_sensors_spi_write_byte(struct st_sensor_transfer_buffer *tb, st_sensors_spi_write_byte() argument
85 .tx_buf = tb->tx_buf, st_sensors_spi_write_byte()
90 mutex_lock(&tb->buf_lock); st_sensors_spi_write_byte()
91 tb->tx_buf[0] = reg_addr; st_sensors_spi_write_byte()
92 tb->tx_buf[1] = data; st_sensors_spi_write_byte()
95 mutex_unlock(&tb->buf_lock); st_sensors_spi_write_byte()
72 st_sensors_spi_read_multiple_byte( struct st_sensor_transfer_buffer *tb, struct device *dev, u8 reg_addr, int len, u8 *data, bool multiread_bit) st_sensors_spi_read_multiple_byte() argument
H A Dst_sensors_buffer.c48 len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, st_sensors_get_buffer_element()
53 len = sdata->tf->read_multiple_byte(&sdata->tb, st_sensors_get_buffer_element()
65 len = sdata->tf->read_multiple_byte(&sdata->tb, st_sensors_get_buffer_element()
85 len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, st_sensors_get_buffer_element()
H A Dst_sensors_i2c.c29 static int st_sensors_i2c_read_byte(struct st_sensor_transfer_buffer *tb, st_sensors_i2c_read_byte() argument
45 struct st_sensor_transfer_buffer *tb, struct device *dev, st_sensors_i2c_read_multiple_byte()
55 static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb, st_sensors_i2c_write_byte() argument
44 st_sensors_i2c_read_multiple_byte( struct st_sensor_transfer_buffer *tb, struct device *dev, u8 reg_addr, int len, u8 *data, bool multiread_bit) st_sensors_i2c_read_multiple_byte() argument
/linux-4.1.27/include/linux/
H A Dtty_flip.h19 struct tty_buffer *tb = port->buf.tail; tty_insert_flip_char() local
22 change = (tb->flags & TTYB_NORMAL) && (flag != TTY_NORMAL); tty_insert_flip_char()
23 if (!change && tb->used < tb->size) { tty_insert_flip_char()
24 if (~tb->flags & TTYB_NORMAL) tty_insert_flip_char()
25 *flag_buf_ptr(tb, tb->used) = flag; tty_insert_flip_char()
26 *char_buf_ptr(tb, tb->used++) = ch; tty_insert_flip_char()
H A Drtnetlink.h112 struct nlattr *tb[],
118 struct nlattr *tb[],
/linux-4.1.27/net/netfilter/ipset/
H A Dip_set_hash_ipportip.c112 hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportip4_uadt() argument
123 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || hash_ipportip4_uadt()
124 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_ipportip4_uadt()
125 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_ipportip4_uadt()
126 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ipportip4_uadt()
127 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ipportip4_uadt()
128 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ipportip4_uadt()
129 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ipportip4_uadt()
130 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ipportip4_uadt()
131 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_ipportip4_uadt()
134 if (tb[IPSET_ATTR_LINENO]) hash_ipportip4_uadt()
135 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ipportip4_uadt()
137 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) || hash_ipportip4_uadt()
138 ip_set_get_extensions(set, tb, &ext); hash_ipportip4_uadt()
142 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2); hash_ipportip4_uadt()
146 if (tb[IPSET_ATTR_PORT]) hash_ipportip4_uadt()
147 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_ipportip4_uadt()
151 if (tb[IPSET_ATTR_PROTO]) { hash_ipportip4_uadt()
152 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_ipportip4_uadt()
164 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || hash_ipportip4_uadt()
165 tb[IPSET_ATTR_PORT_TO])) { hash_ipportip4_uadt()
171 if (tb[IPSET_ATTR_IP_TO]) { hash_ipportip4_uadt()
172 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_ipportip4_uadt()
177 } else if (tb[IPSET_ATTR_CIDR]) { hash_ipportip4_uadt()
178 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_ipportip4_uadt()
186 if (with_ports && tb[IPSET_ATTR_PORT_TO]) { hash_ipportip4_uadt()
187 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_ipportip4_uadt()
285 hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportip6_uadt() argument
296 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || hash_ipportip6_uadt()
297 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_ipportip6_uadt()
298 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_ipportip6_uadt()
299 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ipportip6_uadt()
300 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ipportip6_uadt()
301 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ipportip6_uadt()
302 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ipportip6_uadt()
303 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ipportip6_uadt()
304 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || hash_ipportip6_uadt()
305 tb[IPSET_ATTR_IP_TO] || hash_ipportip6_uadt()
306 tb[IPSET_ATTR_CIDR])) hash_ipportip6_uadt()
309 if (tb[IPSET_ATTR_LINENO]) hash_ipportip6_uadt()
310 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ipportip6_uadt()
312 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || hash_ipportip6_uadt()
313 ip_set_get_extensions(set, tb, &ext); hash_ipportip6_uadt()
317 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2); hash_ipportip6_uadt()
321 if (tb[IPSET_ATTR_PORT]) hash_ipportip6_uadt()
322 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_ipportip6_uadt()
326 if (tb[IPSET_ATTR_PROTO]) { hash_ipportip6_uadt()
327 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_ipportip6_uadt()
338 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { hash_ipportip6_uadt()
344 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_ipportip6_uadt()
H A Dip_set_hash_netnet.c159 hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], hash_netnet4_uadt() argument
172 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || hash_netnet4_uadt()
173 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_netnet4_uadt()
174 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_netnet4_uadt()
175 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_netnet4_uadt()
176 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_netnet4_uadt()
177 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_netnet4_uadt()
178 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_netnet4_uadt()
179 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_netnet4_uadt()
182 if (tb[IPSET_ATTR_LINENO]) hash_netnet4_uadt()
183 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_netnet4_uadt()
185 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || hash_netnet4_uadt()
186 ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) || hash_netnet4_uadt()
187 ip_set_get_extensions(set, tb, &ext); hash_netnet4_uadt()
191 if (tb[IPSET_ATTR_CIDR]) { hash_netnet4_uadt()
192 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_netnet4_uadt()
198 if (tb[IPSET_ATTR_CIDR2]) { hash_netnet4_uadt()
199 cidr2 = nla_get_u8(tb[IPSET_ATTR_CIDR2]); hash_netnet4_uadt()
205 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_netnet4_uadt()
206 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_netnet4_uadt()
211 if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] || hash_netnet4_uadt()
212 tb[IPSET_ATTR_IP2_TO])) { hash_netnet4_uadt()
221 if (tb[IPSET_ATTR_IP_TO]) { hash_netnet4_uadt()
222 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_netnet4_uadt()
233 if (tb[IPSET_ATTR_IP2_TO]) { hash_netnet4_uadt()
234 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); hash_netnet4_uadt()
390 hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[], hash_netnet6_uadt() argument
399 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || hash_netnet6_uadt()
400 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_netnet6_uadt()
401 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_netnet6_uadt()
402 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_netnet6_uadt()
403 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_netnet6_uadt()
404 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_netnet6_uadt()
405 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_netnet6_uadt()
406 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_netnet6_uadt()
408 if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO])) hash_netnet6_uadt()
411 if (tb[IPSET_ATTR_LINENO]) hash_netnet6_uadt()
412 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_netnet6_uadt()
414 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) || hash_netnet6_uadt()
415 ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) || hash_netnet6_uadt()
416 ip_set_get_extensions(set, tb, &ext); hash_netnet6_uadt()
420 if (tb[IPSET_ATTR_CIDR]) hash_netnet6_uadt()
421 e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_netnet6_uadt()
423 if (tb[IPSET_ATTR_CIDR2]) hash_netnet6_uadt()
424 e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]); hash_netnet6_uadt()
433 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_netnet6_uadt()
434 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_netnet6_uadt()
H A Dip_set_hash_ipportnet.c164 hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportnet4_uadt() argument
177 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || hash_ipportnet4_uadt()
178 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_ipportnet4_uadt()
179 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_ipportnet4_uadt()
180 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ipportnet4_uadt()
181 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_ipportnet4_uadt()
182 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ipportnet4_uadt()
183 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ipportnet4_uadt()
184 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ipportnet4_uadt()
185 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ipportnet4_uadt()
186 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_ipportnet4_uadt()
189 if (tb[IPSET_ATTR_LINENO]) hash_ipportnet4_uadt()
190 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ipportnet4_uadt()
192 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || hash_ipportnet4_uadt()
193 ip_set_get_extensions(set, tb, &ext); hash_ipportnet4_uadt()
197 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); hash_ipportnet4_uadt()
201 if (tb[IPSET_ATTR_CIDR2]) { hash_ipportnet4_uadt()
202 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); hash_ipportnet4_uadt()
208 if (tb[IPSET_ATTR_PORT]) hash_ipportnet4_uadt()
209 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_ipportnet4_uadt()
213 if (tb[IPSET_ATTR_PROTO]) { hash_ipportnet4_uadt()
214 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_ipportnet4_uadt()
225 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_ipportnet4_uadt()
226 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_ipportnet4_uadt()
231 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; hash_ipportnet4_uadt()
233 !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || hash_ipportnet4_uadt()
234 tb[IPSET_ATTR_IP2_TO])) { hash_ipportnet4_uadt()
243 if (tb[IPSET_ATTR_IP_TO]) { hash_ipportnet4_uadt()
244 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_ipportnet4_uadt()
249 } else if (tb[IPSET_ATTR_CIDR]) { hash_ipportnet4_uadt()
250 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_ipportnet4_uadt()
258 if (tb[IPSET_ATTR_PORT_TO]) { hash_ipportnet4_uadt()
259 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_ipportnet4_uadt()
265 if (tb[IPSET_ATTR_IP2_TO]) { hash_ipportnet4_uadt()
266 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); hash_ipportnet4_uadt()
420 hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipportnet6_uadt() argument
432 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || hash_ipportnet6_uadt()
433 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_ipportnet6_uadt()
434 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_ipportnet6_uadt()
435 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ipportnet6_uadt()
436 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_ipportnet6_uadt()
437 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ipportnet6_uadt()
438 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ipportnet6_uadt()
439 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ipportnet6_uadt()
440 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ipportnet6_uadt()
441 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || hash_ipportnet6_uadt()
442 tb[IPSET_ATTR_IP_TO] || hash_ipportnet6_uadt()
443 tb[IPSET_ATTR_CIDR])) hash_ipportnet6_uadt()
445 if (unlikely(tb[IPSET_ATTR_IP_TO])) hash_ipportnet6_uadt()
448 if (tb[IPSET_ATTR_LINENO]) hash_ipportnet6_uadt()
449 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ipportnet6_uadt()
451 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || hash_ipportnet6_uadt()
452 ip_set_get_extensions(set, tb, &ext); hash_ipportnet6_uadt()
456 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2); hash_ipportnet6_uadt()
460 if (tb[IPSET_ATTR_CIDR2]) { hash_ipportnet6_uadt()
461 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); hash_ipportnet6_uadt()
469 if (tb[IPSET_ATTR_PORT]) hash_ipportnet6_uadt()
470 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_ipportnet6_uadt()
474 if (tb[IPSET_ATTR_PROTO]) { hash_ipportnet6_uadt()
475 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_ipportnet6_uadt()
486 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_ipportnet6_uadt()
487 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_ipportnet6_uadt()
492 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { hash_ipportnet6_uadt()
499 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_ipportnet6_uadt()
H A Dip_set_hash_netportnet.c174 hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], hash_netportnet4_uadt() argument
188 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || hash_netportnet4_uadt()
189 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_netportnet4_uadt()
190 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_netportnet4_uadt()
191 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_netportnet4_uadt()
192 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_netportnet4_uadt()
193 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_netportnet4_uadt()
194 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_netportnet4_uadt()
195 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_netportnet4_uadt()
196 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_netportnet4_uadt()
197 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_netportnet4_uadt()
200 if (tb[IPSET_ATTR_LINENO]) hash_netportnet4_uadt()
201 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_netportnet4_uadt()
203 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || hash_netportnet4_uadt()
204 ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) || hash_netportnet4_uadt()
205 ip_set_get_extensions(set, tb, &ext); hash_netportnet4_uadt()
209 if (tb[IPSET_ATTR_CIDR]) { hash_netportnet4_uadt()
210 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_netportnet4_uadt()
216 if (tb[IPSET_ATTR_CIDR2]) { hash_netportnet4_uadt()
217 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); hash_netportnet4_uadt()
223 if (tb[IPSET_ATTR_PORT]) hash_netportnet4_uadt()
224 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_netportnet4_uadt()
228 if (tb[IPSET_ATTR_PROTO]) { hash_netportnet4_uadt()
229 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_netportnet4_uadt()
240 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_netportnet4_uadt()
241 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_netportnet4_uadt()
246 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; hash_netportnet4_uadt()
248 !(tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) { hash_netportnet4_uadt()
257 if (tb[IPSET_ATTR_IP_TO]) { hash_netportnet4_uadt()
258 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_netportnet4_uadt()
269 if (tb[IPSET_ATTR_PORT_TO]) { hash_netportnet4_uadt()
270 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_netportnet4_uadt()
276 if (tb[IPSET_ATTR_IP2_TO]) { hash_netportnet4_uadt()
277 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); hash_netportnet4_uadt()
451 hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[], hash_netportnet6_uadt() argument
463 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || hash_netportnet6_uadt()
464 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_netportnet6_uadt()
465 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_netportnet6_uadt()
466 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_netportnet6_uadt()
467 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_netportnet6_uadt()
468 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_netportnet6_uadt()
469 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_netportnet6_uadt()
470 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_netportnet6_uadt()
471 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_netportnet6_uadt()
472 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_netportnet6_uadt()
474 if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO])) hash_netportnet6_uadt()
477 if (tb[IPSET_ATTR_LINENO]) hash_netportnet6_uadt()
478 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_netportnet6_uadt()
480 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) || hash_netportnet6_uadt()
481 ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) || hash_netportnet6_uadt()
482 ip_set_get_extensions(set, tb, &ext); hash_netportnet6_uadt()
486 if (tb[IPSET_ATTR_CIDR]) hash_netportnet6_uadt()
487 e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_netportnet6_uadt()
489 if (tb[IPSET_ATTR_CIDR2]) hash_netportnet6_uadt()
490 e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]); hash_netportnet6_uadt()
499 if (tb[IPSET_ATTR_PORT]) hash_netportnet6_uadt()
500 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_netportnet6_uadt()
504 if (tb[IPSET_ATTR_PROTO]) { hash_netportnet6_uadt()
505 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_netportnet6_uadt()
516 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_netportnet6_uadt()
517 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_netportnet6_uadt()
522 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { hash_netportnet6_uadt()
529 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_netportnet6_uadt()
H A Dip_set_hash_ipport.c110 hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipport4_uadt() argument
121 if (unlikely(!tb[IPSET_ATTR_IP] || hash_ipport4_uadt()
122 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_ipport4_uadt()
123 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_ipport4_uadt()
124 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ipport4_uadt()
125 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ipport4_uadt()
126 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ipport4_uadt()
127 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ipport4_uadt()
128 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ipport4_uadt()
129 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_ipport4_uadt()
132 if (tb[IPSET_ATTR_LINENO]) hash_ipport4_uadt()
133 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ipport4_uadt()
135 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) || hash_ipport4_uadt()
136 ip_set_get_extensions(set, tb, &ext); hash_ipport4_uadt()
140 if (tb[IPSET_ATTR_PORT]) hash_ipport4_uadt()
141 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_ipport4_uadt()
145 if (tb[IPSET_ATTR_PROTO]) { hash_ipport4_uadt()
146 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_ipport4_uadt()
158 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || hash_ipport4_uadt()
159 tb[IPSET_ATTR_PORT_TO])) { hash_ipport4_uadt()
165 if (tb[IPSET_ATTR_IP_TO]) { hash_ipport4_uadt()
166 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_ipport4_uadt()
171 } else if (tb[IPSET_ATTR_CIDR]) { hash_ipport4_uadt()
172 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_ipport4_uadt()
180 if (with_ports && tb[IPSET_ATTR_PORT_TO]) { hash_ipport4_uadt()
181 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_ipport4_uadt()
277 hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipport6_uadt() argument
288 if (unlikely(!tb[IPSET_ATTR_IP] || hash_ipport6_uadt()
289 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_ipport6_uadt()
290 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_ipport6_uadt()
291 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ipport6_uadt()
292 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ipport6_uadt()
293 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ipport6_uadt()
294 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ipport6_uadt()
295 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ipport6_uadt()
296 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || hash_ipport6_uadt()
297 tb[IPSET_ATTR_IP_TO] || hash_ipport6_uadt()
298 tb[IPSET_ATTR_CIDR])) hash_ipport6_uadt()
301 if (tb[IPSET_ATTR_LINENO]) hash_ipport6_uadt()
302 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ipport6_uadt()
304 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || hash_ipport6_uadt()
305 ip_set_get_extensions(set, tb, &ext); hash_ipport6_uadt()
309 if (tb[IPSET_ATTR_PORT]) hash_ipport6_uadt()
310 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_ipport6_uadt()
314 if (tb[IPSET_ATTR_PROTO]) { hash_ipport6_uadt()
315 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_ipport6_uadt()
326 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { hash_ipport6_uadt()
332 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_ipport6_uadt()
H A Dip_set_hash_ipmark.c103 hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipmark4_uadt() argument
113 if (unlikely(!tb[IPSET_ATTR_IP] || hash_ipmark4_uadt()
114 !ip_set_attr_netorder(tb, IPSET_ATTR_MARK) || hash_ipmark4_uadt()
115 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ipmark4_uadt()
116 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ipmark4_uadt()
117 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ipmark4_uadt()
118 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ipmark4_uadt()
119 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ipmark4_uadt()
120 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_ipmark4_uadt()
123 if (tb[IPSET_ATTR_LINENO]) hash_ipmark4_uadt()
124 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ipmark4_uadt()
126 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) || hash_ipmark4_uadt()
127 ip_set_get_extensions(set, tb, &ext); hash_ipmark4_uadt()
131 e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK])); hash_ipmark4_uadt()
135 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) { hash_ipmark4_uadt()
141 if (tb[IPSET_ATTR_IP_TO]) { hash_ipmark4_uadt()
142 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_ipmark4_uadt()
147 } else if (tb[IPSET_ATTR_CIDR]) { hash_ipmark4_uadt()
148 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_ipmark4_uadt()
237 hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ipmark6_uadt() argument
246 if (unlikely(!tb[IPSET_ATTR_IP] || hash_ipmark6_uadt()
247 !ip_set_attr_netorder(tb, IPSET_ATTR_MARK) || hash_ipmark6_uadt()
248 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ipmark6_uadt()
249 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ipmark6_uadt()
250 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ipmark6_uadt()
251 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ipmark6_uadt()
252 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ipmark6_uadt()
253 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || hash_ipmark6_uadt()
254 tb[IPSET_ATTR_IP_TO] || hash_ipmark6_uadt()
255 tb[IPSET_ATTR_CIDR])) hash_ipmark6_uadt()
258 if (tb[IPSET_ATTR_LINENO]) hash_ipmark6_uadt()
259 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ipmark6_uadt()
261 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || hash_ipmark6_uadt()
262 ip_set_get_extensions(set, tb, &ext); hash_ipmark6_uadt()
266 e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK])); hash_ipmark6_uadt()
H A Dip_set_hash_netport.c158 hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], hash_netport4_uadt() argument
170 if (unlikely(!tb[IPSET_ATTR_IP] || hash_netport4_uadt()
171 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_netport4_uadt()
172 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_netport4_uadt()
173 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_netport4_uadt()
174 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_netport4_uadt()
175 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_netport4_uadt()
176 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_netport4_uadt()
177 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_netport4_uadt()
178 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_netport4_uadt()
179 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_netport4_uadt()
182 if (tb[IPSET_ATTR_LINENO]) hash_netport4_uadt()
183 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_netport4_uadt()
185 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || hash_netport4_uadt()
186 ip_set_get_extensions(set, tb, &ext); hash_netport4_uadt()
190 if (tb[IPSET_ATTR_CIDR]) { hash_netport4_uadt()
191 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_netport4_uadt()
197 if (tb[IPSET_ATTR_PORT]) hash_netport4_uadt()
198 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_netport4_uadt()
202 if (tb[IPSET_ATTR_PROTO]) { hash_netport4_uadt()
203 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_netport4_uadt()
214 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; hash_netport4_uadt()
216 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_netport4_uadt()
217 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_netport4_uadt()
222 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { hash_netport4_uadt()
230 if (tb[IPSET_ATTR_PORT_TO]) { hash_netport4_uadt()
231 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_netport4_uadt()
235 if (tb[IPSET_ATTR_IP_TO]) { hash_netport4_uadt()
236 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_netport4_uadt()
378 hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], hash_netport6_uadt() argument
390 if (unlikely(!tb[IPSET_ATTR_IP] || hash_netport6_uadt()
391 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || hash_netport6_uadt()
392 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || hash_netport6_uadt()
393 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_netport6_uadt()
394 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_netport6_uadt()
395 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_netport6_uadt()
396 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_netport6_uadt()
397 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_netport6_uadt()
398 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_netport6_uadt()
399 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_netport6_uadt()
401 if (unlikely(tb[IPSET_ATTR_IP_TO])) hash_netport6_uadt()
404 if (tb[IPSET_ATTR_LINENO]) hash_netport6_uadt()
405 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_netport6_uadt()
407 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || hash_netport6_uadt()
408 ip_set_get_extensions(set, tb, &ext); hash_netport6_uadt()
412 if (tb[IPSET_ATTR_CIDR]) { hash_netport6_uadt()
413 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_netport6_uadt()
420 if (tb[IPSET_ATTR_PORT]) hash_netport6_uadt()
421 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); hash_netport6_uadt()
425 if (tb[IPSET_ATTR_PROTO]) { hash_netport6_uadt()
426 e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); hash_netport6_uadt()
437 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_netport6_uadt()
438 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_netport6_uadt()
443 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { hash_netport6_uadt()
450 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); hash_netport6_uadt()
H A Dip_set_bitmap_ip.c130 bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], bitmap_ip_uadt() argument
140 if (unlikely(!tb[IPSET_ATTR_IP] || bitmap_ip_uadt()
141 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || bitmap_ip_uadt()
142 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || bitmap_ip_uadt()
143 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || bitmap_ip_uadt()
144 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || bitmap_ip_uadt()
145 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || bitmap_ip_uadt()
146 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) bitmap_ip_uadt()
149 if (tb[IPSET_ATTR_LINENO]) bitmap_ip_uadt()
150 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); bitmap_ip_uadt()
152 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || bitmap_ip_uadt()
153 ip_set_get_extensions(set, tb, &ext); bitmap_ip_uadt()
165 if (tb[IPSET_ATTR_IP_TO]) { bitmap_ip_uadt()
166 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); bitmap_ip_uadt()
174 } else if (tb[IPSET_ATTR_CIDR]) { bitmap_ip_uadt()
175 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); bitmap_ip_uadt()
249 bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], bitmap_ip_create() argument
258 if (unlikely(!tb[IPSET_ATTR_IP] || bitmap_ip_create()
259 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || bitmap_ip_create()
260 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) bitmap_ip_create()
263 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip); bitmap_ip_create()
267 if (tb[IPSET_ATTR_IP_TO]) { bitmap_ip_create()
268 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip); bitmap_ip_create()
277 } else if (tb[IPSET_ATTR_CIDR]) { bitmap_ip_create()
278 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); bitmap_ip_create()
286 if (tb[IPSET_ATTR_NETMASK]) { bitmap_ip_create()
287 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); bitmap_ip_create()
325 set->dsize = ip_set_elem_len(set, tb, 0); bitmap_ip_create()
331 if (tb[IPSET_ATTR_TIMEOUT]) { bitmap_ip_create()
332 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); bitmap_ip_create()
H A Dip_set_hash_ip.c102 hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], hash_ip4_uadt() argument
112 if (unlikely(!tb[IPSET_ATTR_IP] || hash_ip4_uadt()
113 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ip4_uadt()
114 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ip4_uadt()
115 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ip4_uadt()
116 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ip4_uadt()
117 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ip4_uadt()
118 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_ip4_uadt()
121 if (tb[IPSET_ATTR_LINENO]) hash_ip4_uadt()
122 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ip4_uadt()
124 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || hash_ip4_uadt()
125 ip_set_get_extensions(set, tb, &ext); hash_ip4_uadt()
139 if (tb[IPSET_ATTR_IP_TO]) { hash_ip4_uadt()
140 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_ip4_uadt()
145 } else if (tb[IPSET_ATTR_CIDR]) { hash_ip4_uadt()
146 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_ip4_uadt()
241 hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], hash_ip6_uadt() argument
250 if (unlikely(!tb[IPSET_ATTR_IP] || hash_ip6_uadt()
251 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_ip6_uadt()
252 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_ip6_uadt()
253 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_ip6_uadt()
254 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_ip6_uadt()
255 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_ip6_uadt()
256 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || hash_ip6_uadt()
257 tb[IPSET_ATTR_IP_TO] || hash_ip6_uadt()
258 tb[IPSET_ATTR_CIDR])) hash_ip6_uadt()
261 if (tb[IPSET_ATTR_LINENO]) hash_ip6_uadt()
262 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_ip6_uadt()
264 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || hash_ip6_uadt()
265 ip_set_get_extensions(set, tb, &ext); hash_ip6_uadt()
H A Dip_set_hash_net.c140 hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], hash_net4_uadt() argument
150 if (unlikely(!tb[IPSET_ATTR_IP] || hash_net4_uadt()
151 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_net4_uadt()
152 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_net4_uadt()
153 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_net4_uadt()
154 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_net4_uadt()
155 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_net4_uadt()
156 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_net4_uadt()
157 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_net4_uadt()
160 if (tb[IPSET_ATTR_LINENO]) hash_net4_uadt()
161 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_net4_uadt()
163 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || hash_net4_uadt()
164 ip_set_get_extensions(set, tb, &ext); hash_net4_uadt()
168 if (tb[IPSET_ATTR_CIDR]) { hash_net4_uadt()
169 e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_net4_uadt()
174 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_net4_uadt()
175 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_net4_uadt()
180 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { hash_net4_uadt()
188 if (tb[IPSET_ATTR_IP_TO]) { hash_net4_uadt()
189 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_net4_uadt()
313 hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], hash_net6_uadt() argument
321 if (unlikely(!tb[IPSET_ATTR_IP] || hash_net6_uadt()
322 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_net6_uadt()
323 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_net6_uadt()
324 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_net6_uadt()
325 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_net6_uadt()
326 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_net6_uadt()
327 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_net6_uadt()
328 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_net6_uadt()
330 if (unlikely(tb[IPSET_ATTR_IP_TO])) hash_net6_uadt()
333 if (tb[IPSET_ATTR_LINENO]) hash_net6_uadt()
334 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_net6_uadt()
336 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || hash_net6_uadt()
337 ip_set_get_extensions(set, tb, &ext); hash_net6_uadt()
341 if (tb[IPSET_ATTR_CIDR]) hash_net6_uadt()
342 e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_net6_uadt()
349 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_net6_uadt()
350 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_net6_uadt()
H A Dip_set_bitmap_port.c128 bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], bitmap_port_uadt() argument
139 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || bitmap_port_uadt()
140 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || bitmap_port_uadt()
141 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || bitmap_port_uadt()
142 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || bitmap_port_uadt()
143 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || bitmap_port_uadt()
144 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || bitmap_port_uadt()
145 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || bitmap_port_uadt()
146 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) bitmap_port_uadt()
149 if (tb[IPSET_ATTR_LINENO]) bitmap_port_uadt()
150 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); bitmap_port_uadt()
152 port = ip_set_get_h16(tb[IPSET_ATTR_PORT]); bitmap_port_uadt()
155 ret = ip_set_get_extensions(set, tb, &ext); bitmap_port_uadt()
164 if (tb[IPSET_ATTR_PORT_TO]) { bitmap_port_uadt()
165 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); bitmap_port_uadt()
235 bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], bitmap_port_create() argument
241 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || bitmap_port_create()
242 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) || bitmap_port_create()
243 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || bitmap_port_create()
244 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) bitmap_port_create()
247 first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]); bitmap_port_create()
248 last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); bitmap_port_create()
263 set->dsize = ip_set_elem_len(set, tb, 0); bitmap_port_create()
268 if (tb[IPSET_ATTR_TIMEOUT]) { bitmap_port_create()
269 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); bitmap_port_create()
H A Dip_set_hash_netiface.c286 hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], hash_netiface4_uadt() argument
297 if (unlikely(!tb[IPSET_ATTR_IP] || hash_netiface4_uadt()
298 !tb[IPSET_ATTR_IFACE] || hash_netiface4_uadt()
299 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_netiface4_uadt()
300 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_netiface4_uadt()
301 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_netiface4_uadt()
302 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_netiface4_uadt()
303 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_netiface4_uadt()
304 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_netiface4_uadt()
305 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_netiface4_uadt()
308 if (tb[IPSET_ATTR_LINENO]) hash_netiface4_uadt()
309 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_netiface4_uadt()
311 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || hash_netiface4_uadt()
312 ip_set_get_extensions(set, tb, &ext); hash_netiface4_uadt()
316 if (tb[IPSET_ATTR_CIDR]) { hash_netiface4_uadt()
317 e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_netiface4_uadt()
322 strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); hash_netiface4_uadt()
334 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_netiface4_uadt()
335 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_netiface4_uadt()
341 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { hash_netiface4_uadt()
348 if (tb[IPSET_ATTR_IP_TO]) { hash_netiface4_uadt()
349 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); hash_netiface4_uadt()
523 hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], hash_netiface6_uadt() argument
533 if (unlikely(!tb[IPSET_ATTR_IP] || hash_netiface6_uadt()
534 !tb[IPSET_ATTR_IFACE] || hash_netiface6_uadt()
535 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_netiface6_uadt()
536 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || hash_netiface6_uadt()
537 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_netiface6_uadt()
538 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_netiface6_uadt()
539 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_netiface6_uadt()
540 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_netiface6_uadt()
541 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_netiface6_uadt()
543 if (unlikely(tb[IPSET_ATTR_IP_TO])) hash_netiface6_uadt()
546 if (tb[IPSET_ATTR_LINENO]) hash_netiface6_uadt()
547 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_netiface6_uadt()
549 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || hash_netiface6_uadt()
550 ip_set_get_extensions(set, tb, &ext); hash_netiface6_uadt()
554 if (tb[IPSET_ATTR_CIDR]) hash_netiface6_uadt()
555 e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); hash_netiface6_uadt()
560 strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE])); hash_netiface6_uadt()
572 if (tb[IPSET_ATTR_CADT_FLAGS]) { hash_netiface6_uadt()
573 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); hash_netiface6_uadt()
H A Dip_set_bitmap_ipmac.c231 bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], bitmap_ipmac_uadt() argument
241 if (unlikely(!tb[IPSET_ATTR_IP] || bitmap_ipmac_uadt()
242 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || bitmap_ipmac_uadt()
243 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || bitmap_ipmac_uadt()
244 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || bitmap_ipmac_uadt()
245 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || bitmap_ipmac_uadt()
246 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || bitmap_ipmac_uadt()
247 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) bitmap_ipmac_uadt()
250 if (tb[IPSET_ATTR_LINENO]) bitmap_ipmac_uadt()
251 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); bitmap_ipmac_uadt()
253 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) || bitmap_ipmac_uadt()
254 ip_set_get_extensions(set, tb, &ext); bitmap_ipmac_uadt()
262 if (tb[IPSET_ATTR_ETHER]) bitmap_ipmac_uadt()
263 e.ether = nla_data(tb[IPSET_ATTR_ETHER]); bitmap_ipmac_uadt()
316 bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], bitmap_ipmac_create() argument
324 if (unlikely(!tb[IPSET_ATTR_IP] || bitmap_ipmac_create()
325 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || bitmap_ipmac_create()
326 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) bitmap_ipmac_create()
329 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip); bitmap_ipmac_create()
333 if (tb[IPSET_ATTR_IP_TO]) { bitmap_ipmac_create()
334 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip); bitmap_ipmac_create()
343 } else if (tb[IPSET_ATTR_CIDR]) { bitmap_ipmac_create()
344 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); bitmap_ipmac_create()
363 set->dsize = ip_set_elem_len(set, tb, bitmap_ipmac_create()
369 if (tb[IPSET_ATTR_TIMEOUT]) { bitmap_ipmac_create()
370 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); bitmap_ipmac_create()
H A Dip_set_hash_mac.c98 hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[], hash_mac4_uadt() argument
106 if (unlikely(!tb[IPSET_ATTR_ETHER] || hash_mac4_uadt()
107 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || hash_mac4_uadt()
108 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || hash_mac4_uadt()
109 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || hash_mac4_uadt()
110 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || hash_mac4_uadt()
111 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || hash_mac4_uadt()
112 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) hash_mac4_uadt()
115 if (tb[IPSET_ATTR_LINENO]) hash_mac4_uadt()
116 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); hash_mac4_uadt()
118 ret = ip_set_get_extensions(set, tb, &ext); hash_mac4_uadt()
121 memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN); hash_mac4_uadt()
H A Dip_set_list_set.c376 list_set_uadt(struct ip_set *set, struct nlattr *tb[], list_set_uadt() argument
386 if (unlikely(!tb[IPSET_ATTR_NAME] || list_set_uadt()
387 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || list_set_uadt()
388 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || list_set_uadt()
389 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || list_set_uadt()
390 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || list_set_uadt()
391 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || list_set_uadt()
392 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || list_set_uadt()
393 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) list_set_uadt()
396 if (tb[IPSET_ATTR_LINENO]) list_set_uadt()
397 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); list_set_uadt()
399 ret = ip_set_get_extensions(set, tb, &ext); list_set_uadt()
402 e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s); list_set_uadt()
411 if (tb[IPSET_ATTR_CADT_FLAGS]) { list_set_uadt()
412 u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); list_set_uadt()
416 if (e.before && !tb[IPSET_ATTR_NAMEREF]) { list_set_uadt()
421 if (tb[IPSET_ATTR_NAMEREF]) { list_set_uadt()
423 nla_data(tb[IPSET_ATTR_NAMEREF]), list_set_uadt()
631 list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[], list_set_create() argument
636 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) || list_set_create()
637 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || list_set_create()
638 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) list_set_create()
641 if (tb[IPSET_ATTR_SIZE]) list_set_create()
642 size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]); list_set_create()
647 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem)); list_set_create()
650 if (tb[IPSET_ATTR_TIMEOUT]) { list_set_create()
651 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); list_set_create()
H A Dip_set_core.c292 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1]; ip_set_get_ipaddr4() local
296 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy)) ip_set_get_ipaddr4()
298 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4))) ip_set_get_ipaddr4()
301 *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]); ip_set_get_ipaddr4()
309 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1]; ip_set_get_ipaddr6() local
314 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy)) ip_set_get_ipaddr6()
316 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6))) ip_set_get_ipaddr6()
319 memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]), ip_set_get_ipaddr6()
357 add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[]) add_extension() argument
361 !!tb[IPSET_ATTR_TIMEOUT]; add_extension()
365 ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len) ip_set_elem_len() argument
371 if (tb[IPSET_ATTR_CADT_FLAGS]) ip_set_elem_len()
372 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); ip_set_elem_len()
376 if (!add_extension(id, cadt_flags, tb)) ip_set_elem_len()
388 ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], ip_set_get_extensions() argument
392 if (tb[IPSET_ATTR_TIMEOUT]) { ip_set_get_extensions()
395 ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); ip_set_get_extensions()
397 if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) { ip_set_get_extensions()
400 if (tb[IPSET_ATTR_BYTES]) ip_set_get_extensions()
402 tb[IPSET_ATTR_BYTES])); ip_set_get_extensions()
403 if (tb[IPSET_ATTR_PACKETS]) ip_set_get_extensions()
405 tb[IPSET_ATTR_PACKETS])); ip_set_get_extensions()
407 if (tb[IPSET_ATTR_COMMENT]) { ip_set_get_extensions()
410 ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]); ip_set_get_extensions()
412 if (tb[IPSET_ATTR_SKBMARK]) { ip_set_get_extensions()
415 fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK])); ip_set_get_extensions()
419 if (tb[IPSET_ATTR_SKBPRIO]) { ip_set_get_extensions()
423 tb[IPSET_ATTR_SKBPRIO])); ip_set_get_extensions()
425 if (tb[IPSET_ATTR_SKBQUEUE]) { ip_set_get_extensions()
429 tb[IPSET_ATTR_SKBQUEUE])); ip_set_get_extensions()
707 protocol_failed(const struct nlattr * const tb[]) protocol_failed() argument
709 return !tb[IPSET_ATTR_PROTOCOL] || protocol_failed()
710 nla_get_u8(tb[IPSET_ATTR_PROTOCOL]) != IPSET_PROTOCOL; protocol_failed()
819 struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {}; ip_set_create() local
869 nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], ip_set_create()
875 ret = set->type->create(net, set, tb, flags); ip_set_create()
1402 struct nlattr *tb[], enum ipset_adt adt, call_ad()
1411 ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); call_ad()
1465 struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {}; ip_set_uadd() local
1488 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, ip_set_uadd()
1492 ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags, ip_set_uadd()
1498 memset(tb, 0, sizeof(tb)); nla_for_each_nested()
1501 nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla, nla_for_each_nested()
1504 ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, nla_for_each_nested()
1520 struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {}; ip_set_udel() local
1543 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, ip_set_udel()
1547 ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags, ip_set_udel()
1553 memset(tb, 0, sizeof(*tb)); nla_for_each_nested()
1556 nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla, nla_for_each_nested()
1559 ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, nla_for_each_nested()
1575 struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {}; ip_set_utest() local
1588 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], ip_set_utest()
1593 ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0); ip_set_utest()
1401 call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 flags, bool use_lineno) call_ad() argument
H A Dip_set_hash_gen.h1015 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
1037 struct nlattr *tb[], u32 flags) IPSET_TOKEN()
1065 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || IPSET_TOKEN()
1066 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || IPSET_TOKEN()
1068 !ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK) || IPSET_TOKEN()
1070 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || IPSET_TOKEN()
1071 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) IPSET_TOKEN()
1074 if (tb[IPSET_ATTR_HASHSIZE]) { IPSET_TOKEN()
1075 hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); IPSET_TOKEN()
1080 if (tb[IPSET_ATTR_MAXELEM]) IPSET_TOKEN()
1081 maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]); IPSET_TOKEN()
1084 if (tb[IPSET_ATTR_NETMASK]) { IPSET_TOKEN()
1085 netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]); IPSET_TOKEN()
1094 if (tb[IPSET_ATTR_MARKMASK]) { IPSET_TOKEN()
1095 markmask = ntohl(nla_get_u32(tb[IPSET_ATTR_MARKMASK])); IPSET_TOKEN()
1139 set->dsize = ip_set_elem_len(set, tb, IPSET_TOKEN()
1144 set->dsize = ip_set_elem_len(set, tb, IPSET_TOKEN()
1148 if (tb[IPSET_ATTR_TIMEOUT]) { IPSET_TOKEN()
1149 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); IPSET_TOKEN()
/linux-4.1.27/arch/sparc/mm/
H A Dtlb.c25 struct tlb_batch *tb = &get_cpu_var(tlb_batch); flush_tlb_pending() local
26 struct mm_struct *mm = tb->mm; flush_tlb_pending()
28 if (!tb->tlb_nr) flush_tlb_pending()
31 flush_tsb_user(tb); flush_tlb_pending()
34 if (tb->tlb_nr == 1) { flush_tlb_pending()
35 global_flush_tlb_page(mm, tb->vaddrs[0]); flush_tlb_pending()
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, flush_tlb_pending()
39 &tb->vaddrs[0]); flush_tlb_pending()
41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), flush_tlb_pending()
42 tb->tlb_nr, &tb->vaddrs[0]); flush_tlb_pending()
47 tb->tlb_nr = 0; flush_tlb_pending()
55 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); arch_enter_lazy_mmu_mode() local
57 tb->active = 1; arch_enter_lazy_mmu_mode()
62 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); arch_leave_lazy_mmu_mode() local
64 if (tb->tlb_nr) arch_leave_lazy_mmu_mode()
66 tb->active = 0; arch_leave_lazy_mmu_mode()
72 struct tlb_batch *tb = &get_cpu_var(tlb_batch); tlb_batch_add_one() local
79 nr = tb->tlb_nr; tlb_batch_add_one()
81 if (unlikely(nr != 0 && mm != tb->mm)) { tlb_batch_add_one()
86 if (!tb->active) { tlb_batch_add_one()
93 tb->mm = mm; tlb_batch_add_one()
95 tb->vaddrs[nr] = vaddr; tlb_batch_add_one()
96 tb->tlb_nr = ++nr; tlb_batch_add_one()
H A Dtsb.c63 static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, __flush_tsb_one() argument
68 for (i = 0; i < tb->tlb_nr; i++) __flush_tsb_one()
69 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); __flush_tsb_one()
72 void flush_tsb_user(struct tlb_batch *tb) flush_tsb_user() argument
74 struct mm_struct *mm = tb->mm; flush_tsb_user()
83 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); flush_tsb_user()
91 __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); flush_tsb_user()
/linux-4.1.27/arch/powerpc/kernel/
H A Dsmp-tbsync.c24 volatile u64 tb; member in struct:__anon2361
47 u64 tb; smp_generic_take_timebase() local
62 tb = tbsync->tb; smp_generic_take_timebase()
71 set_tb(tb >> 32, tb & 0xfffffffful); smp_generic_take_timebase()
80 u64 tb; start_contest() local
87 tb = get_tb() + 400; start_contest()
88 tbsync->tb = tb + offset; start_contest()
89 tbsync->mark = mark = tb + 400; start_contest()
97 while (get_tb() <= tb) start_contest()
H A Dswsusp_asm64.S196 /* load saved tb */
200 /* clear tb lower to avoid wrap */
203 /* set tb upper */
205 /* set tb lower */
/linux-4.1.27/net/netfilter/
H A Dnfnetlink_cthelper.c71 struct nlattr *tb[NFCTH_TUPLE_MAX+1]; nfnl_cthelper_parse_tuple() local
73 err = nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, nfnl_cthelper_tuple_pol); nfnl_cthelper_parse_tuple()
77 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) nfnl_cthelper_parse_tuple()
83 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); nfnl_cthelper_parse_tuple()
84 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); nfnl_cthelper_parse_tuple()
131 struct nlattr *tb[NFCTH_POLICY_MAX+1]; nfnl_cthelper_expect_policy() local
133 err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, nfnl_cthelper_expect_pol); nfnl_cthelper_expect_policy()
137 if (!tb[NFCTH_POLICY_NAME] || nfnl_cthelper_expect_policy()
138 !tb[NFCTH_POLICY_EXPECT_MAX] || nfnl_cthelper_expect_policy()
139 !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) nfnl_cthelper_expect_policy()
143 nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); nfnl_cthelper_expect_policy()
145 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); nfnl_cthelper_expect_policy()
147 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT])); nfnl_cthelper_expect_policy()
163 struct nlattr *tb[NFCTH_POLICY_SET_MAX+1]; nfnl_cthelper_parse_expect_policy() local
165 ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, nfnl_cthelper_parse_expect_policy()
170 if (!tb[NFCTH_POLICY_SET_NUM]) nfnl_cthelper_parse_expect_policy()
174 ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); nfnl_cthelper_parse_expect_policy()
186 if (!tb[NFCTH_POLICY_SET+i]) nfnl_cthelper_parse_expect_policy()
190 tb[NFCTH_POLICY_SET+i]); nfnl_cthelper_parse_expect_policy()
202 nfnl_cthelper_create(const struct nlattr * const tb[], nfnl_cthelper_create() argument
208 if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) nfnl_cthelper_create()
215 ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); nfnl_cthelper_create()
219 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); nfnl_cthelper_create()
220 helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); nfnl_cthelper_create()
230 if (tb[NFCTH_QUEUE_NUM]) nfnl_cthelper_create()
231 helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM])); nfnl_cthelper_create()
233 if (tb[NFCTH_STATUS]) { nfnl_cthelper_create()
234 int status = ntohl(nla_get_be32(tb[NFCTH_STATUS])); nfnl_cthelper_create()
257 nfnl_cthelper_update(const struct nlattr * const tb[], nfnl_cthelper_update() argument
262 if (tb[NFCTH_PRIV_DATA_LEN]) nfnl_cthelper_update()
265 if (tb[NFCTH_POLICY]) { nfnl_cthelper_update()
267 tb[NFCTH_POLICY]); nfnl_cthelper_update()
271 if (tb[NFCTH_QUEUE_NUM]) nfnl_cthelper_update()
272 helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM])); nfnl_cthelper_update()
274 if (tb[NFCTH_STATUS]) { nfnl_cthelper_update()
275 int status = ntohl(nla_get_be32(tb[NFCTH_STATUS])); nfnl_cthelper_update()
291 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_cthelper_new()
298 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) nfnl_cthelper_new()
301 helper_name = nla_data(tb[NFCTH_NAME]); nfnl_cthelper_new()
303 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]); nfnl_cthelper_new()
334 ret = nfnl_cthelper_create(tb, &tuple); nfnl_cthelper_new()
336 ret = nfnl_cthelper_update(tb, helper); nfnl_cthelper_new()
503 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_cthelper_get()
519 if (tb[NFCTH_NAME]) nfnl_cthelper_get()
520 helper_name = nla_data(tb[NFCTH_NAME]); nfnl_cthelper_get()
522 if (tb[NFCTH_TUPLE]) { nfnl_cthelper_get()
523 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]); nfnl_cthelper_get()
575 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_cthelper_del()
584 if (tb[NFCTH_NAME]) nfnl_cthelper_del()
585 helper_name = nla_data(tb[NFCTH_NAME]); nfnl_cthelper_del()
587 if (tb[NFCTH_TUPLE]) { nfnl_cthelper_del()
588 ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]); nfnl_cthelper_del()
290 nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_cthelper_new() argument
502 nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_cthelper_get() argument
574 nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_cthelper_del() argument
H A Dnft_payload.c63 const struct nlattr * const tb[]) nft_payload_init()
67 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); nft_payload_init()
68 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); nft_payload_init()
69 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); nft_payload_init()
70 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]); nft_payload_init()
110 const struct nlattr * const tb[]) nft_payload_select_ops()
115 if (tb[NFTA_PAYLOAD_DREG] == NULL || nft_payload_select_ops()
116 tb[NFTA_PAYLOAD_BASE] == NULL || nft_payload_select_ops()
117 tb[NFTA_PAYLOAD_OFFSET] == NULL || nft_payload_select_ops()
118 tb[NFTA_PAYLOAD_LEN] == NULL) nft_payload_select_ops()
121 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); nft_payload_select_ops()
131 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); nft_payload_select_ops()
132 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); nft_payload_select_ops()
61 nft_payload_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_payload_init() argument
109 nft_payload_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) nft_payload_select_ops() argument
H A Dnft_bitwise.c51 const struct nlattr * const tb[]) nft_bitwise_init()
57 if (tb[NFTA_BITWISE_SREG] == NULL || nft_bitwise_init()
58 tb[NFTA_BITWISE_DREG] == NULL || nft_bitwise_init()
59 tb[NFTA_BITWISE_LEN] == NULL || nft_bitwise_init()
60 tb[NFTA_BITWISE_MASK] == NULL || nft_bitwise_init()
61 tb[NFTA_BITWISE_XOR] == NULL) nft_bitwise_init()
64 priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN])); nft_bitwise_init()
65 priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]); nft_bitwise_init()
70 priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]); nft_bitwise_init()
77 tb[NFTA_BITWISE_MASK]); nft_bitwise_init()
84 tb[NFTA_BITWISE_XOR]); nft_bitwise_init()
49 nft_bitwise_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_bitwise_init() argument
H A Dnft_byteorder.c79 const struct nlattr * const tb[]) nft_byteorder_init()
84 if (tb[NFTA_BYTEORDER_SREG] == NULL || nft_byteorder_init()
85 tb[NFTA_BYTEORDER_DREG] == NULL || nft_byteorder_init()
86 tb[NFTA_BYTEORDER_LEN] == NULL || nft_byteorder_init()
87 tb[NFTA_BYTEORDER_SIZE] == NULL || nft_byteorder_init()
88 tb[NFTA_BYTEORDER_OP] == NULL) nft_byteorder_init()
91 priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP])); nft_byteorder_init()
100 priv->size = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SIZE])); nft_byteorder_init()
109 priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]); nft_byteorder_init()
110 priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN])); nft_byteorder_init()
115 priv->dreg = nft_parse_register(tb[NFTA_BYTEORDER_DREG]); nft_byteorder_init()
77 nft_byteorder_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_byteorder_init() argument
H A Dnft_log.c51 const struct nlattr * const tb[]) nft_log_init()
58 nla = tb[NFTA_LOG_PREFIX]; nft_log_init()
69 if (tb[NFTA_LOG_LEVEL] != NULL && nft_log_init()
70 tb[NFTA_LOG_GROUP] != NULL) nft_log_init()
72 if (tb[NFTA_LOG_GROUP] != NULL) nft_log_init()
77 if (tb[NFTA_LOG_LEVEL] != NULL) { nft_log_init()
79 ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL])); nft_log_init()
83 if (tb[NFTA_LOG_FLAGS] != NULL) { nft_log_init()
85 ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS])); nft_log_init()
89 li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP])); nft_log_init()
90 if (tb[NFTA_LOG_SNAPLEN] != NULL) { nft_log_init()
92 ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN])); nft_log_init()
94 if (tb[NFTA_LOG_QTHRESHOLD] != NULL) { nft_log_init()
96 ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD])); nft_log_init()
49 nft_log_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_log_init() argument
H A Dnft_redir.c44 const struct nlattr * const tb[]) nft_redir_init()
55 if (tb[NFTA_REDIR_REG_PROTO_MIN]) { nft_redir_init()
57 nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]); nft_redir_init()
63 if (tb[NFTA_REDIR_REG_PROTO_MAX]) { nft_redir_init()
65 nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MAX]); nft_redir_init()
76 if (tb[NFTA_REDIR_FLAGS]) { nft_redir_init()
77 priv->flags = ntohl(nla_get_be32(tb[NFTA_REDIR_FLAGS])); nft_redir_init()
42 nft_redir_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_redir_init() argument
H A Dnft_cmp.c72 const struct nlattr * const tb[]) nft_cmp_init()
79 tb[NFTA_CMP_DATA]); nft_cmp_init()
82 priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); nft_cmp_init()
87 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP])); nft_cmp_init()
121 const struct nlattr * const tb[]) nft_cmp_fast_init()
130 tb[NFTA_CMP_DATA]); nft_cmp_fast_init()
133 priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); nft_cmp_fast_init()
175 nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) nft_cmp_select_ops() argument
182 if (tb[NFTA_CMP_SREG] == NULL || nft_cmp_select_ops()
183 tb[NFTA_CMP_OP] == NULL || nft_cmp_select_ops()
184 tb[NFTA_CMP_DATA] == NULL) nft_cmp_select_ops()
187 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP])); nft_cmp_select_ops()
201 tb[NFTA_CMP_DATA]); nft_cmp_select_ops()
71 nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_cmp_init() argument
119 nft_cmp_fast_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_cmp_fast_init() argument
H A Dnft_exthdr.c59 const struct nlattr * const tb[]) nft_exthdr_init()
63 if (tb[NFTA_EXTHDR_DREG] == NULL || nft_exthdr_init()
64 tb[NFTA_EXTHDR_TYPE] == NULL || nft_exthdr_init()
65 tb[NFTA_EXTHDR_OFFSET] == NULL || nft_exthdr_init()
66 tb[NFTA_EXTHDR_LEN] == NULL) nft_exthdr_init()
69 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); nft_exthdr_init()
70 priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); nft_exthdr_init()
71 priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); nft_exthdr_init()
72 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); nft_exthdr_init()
57 nft_exthdr_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_exthdr_init() argument
H A Dnft_lookup.c54 const struct nlattr * const tb[]) nft_lookup_init()
60 if (tb[NFTA_LOOKUP_SET] == NULL || nft_lookup_init()
61 tb[NFTA_LOOKUP_SREG] == NULL) nft_lookup_init()
64 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]); nft_lookup_init()
66 if (tb[NFTA_LOOKUP_SET_ID]) { nft_lookup_init()
68 tb[NFTA_LOOKUP_SET_ID]); nft_lookup_init()
77 priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]); nft_lookup_init()
82 if (tb[NFTA_LOOKUP_DREG] != NULL) { nft_lookup_init()
86 priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]); nft_lookup_init()
52 nft_lookup_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_lookup_init() argument
H A Dnfnetlink_acct.c53 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_acct_new()
60 if (!tb[NFACCT_NAME]) nfnl_acct_new()
63 acct_name = nla_data(tb[NFACCT_NAME]); nfnl_acct_new()
93 if (tb[NFACCT_FLAGS]) { nfnl_acct_new()
94 flags = ntohl(nla_get_be32(tb[NFACCT_FLAGS])); nfnl_acct_new()
112 *quota = be64_to_cpu(nla_get_be64(tb[NFACCT_QUOTA])); nfnl_acct_new()
116 strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); nfnl_acct_new()
118 if (tb[NFACCT_BYTES]) { nfnl_acct_new()
120 be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES]))); nfnl_acct_new()
122 if (tb[NFACCT_PKTS]) { nfnl_acct_new()
124 be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS]))); nfnl_acct_new()
239 struct nlattr *tb[NFACCT_FILTER_MAX + 1]; nfacct_filter_alloc() local
242 err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy); nfacct_filter_alloc()
250 filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK])); nfacct_filter_alloc()
251 filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE])); nfacct_filter_alloc()
258 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_acct_get()
270 if (tb[NFACCT_FILTER]) { nfnl_acct_get()
273 filter = nfacct_filter_alloc(tb[NFACCT_FILTER]); nfnl_acct_get()
282 if (!tb[NFACCT_NAME]) nfnl_acct_get()
284 acct_name = nla_data(tb[NFACCT_NAME]); nfnl_acct_get()
337 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_acct_del()
343 if (!tb[NFACCT_NAME]) { nfnl_acct_del()
349 acct_name = nla_data(tb[NFACCT_NAME]); nfnl_acct_del()
52 nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_acct_new() argument
257 nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_acct_get() argument
336 nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_acct_del() argument
H A Dnft_nat.c118 const struct nlattr * const tb[]) nft_nat_init()
125 if (tb[NFTA_NAT_TYPE] == NULL || nft_nat_init()
126 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && nft_nat_init()
127 tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) nft_nat_init()
130 switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { nft_nat_init()
145 if (tb[NFTA_NAT_FAMILY] == NULL) nft_nat_init()
148 family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY])); nft_nat_init()
164 if (tb[NFTA_NAT_REG_ADDR_MIN]) { nft_nat_init()
166 nft_parse_register(tb[NFTA_NAT_REG_ADDR_MIN]); nft_nat_init()
171 if (tb[NFTA_NAT_REG_ADDR_MAX]) { nft_nat_init()
173 nft_parse_register(tb[NFTA_NAT_REG_ADDR_MAX]); nft_nat_init()
185 if (tb[NFTA_NAT_REG_PROTO_MIN]) { nft_nat_init()
187 nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]); nft_nat_init()
193 if (tb[NFTA_NAT_REG_PROTO_MAX]) { nft_nat_init()
195 nft_parse_register(tb[NFTA_NAT_REG_PROTO_MAX]); nft_nat_init()
206 if (tb[NFTA_NAT_FLAGS]) { nft_nat_init()
207 priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); nft_nat_init()
117 nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_nat_init() argument
H A Dnft_ct.c224 const struct nlattr * const tb[]) nft_ct_get_init()
230 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); nft_ct_get_init()
233 if (tb[NFTA_CT_DIRECTION] != NULL) nft_ct_get_init()
246 if (tb[NFTA_CT_DIRECTION] != NULL) nft_ct_get_init()
252 if (tb[NFTA_CT_DIRECTION] != NULL) nft_ct_get_init()
258 if (tb[NFTA_CT_DIRECTION] != NULL) nft_ct_get_init()
265 if (tb[NFTA_CT_DIRECTION] == NULL) nft_ct_get_init()
271 if (tb[NFTA_CT_DIRECTION] == NULL) nft_ct_get_init()
290 if (tb[NFTA_CT_DIRECTION] == NULL) nft_ct_get_init()
298 if (tb[NFTA_CT_DIRECTION] != NULL) { nft_ct_get_init()
299 priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]); nft_ct_get_init()
309 priv->dreg = nft_parse_register(tb[NFTA_CT_DREG]); nft_ct_get_init()
324 const struct nlattr * const tb[]) nft_ct_set_init()
330 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); nft_ct_set_init()
341 priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]); nft_ct_set_init()
421 const struct nlattr * const tb[]) nft_ct_select_ops()
423 if (tb[NFTA_CT_KEY] == NULL) nft_ct_select_ops()
426 if (tb[NFTA_CT_DREG] && tb[NFTA_CT_SREG]) nft_ct_select_ops()
429 if (tb[NFTA_CT_DREG]) nft_ct_select_ops()
432 if (tb[NFTA_CT_SREG]) nft_ct_select_ops()
222 nft_ct_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_ct_get_init() argument
322 nft_ct_set_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_ct_set_init() argument
420 nft_ct_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) nft_ct_select_ops() argument
H A Dnft_queue.c65 const struct nlattr * const tb[]) nft_queue_init()
69 if (tb[NFTA_QUEUE_NUM] == NULL) nft_queue_init()
73 priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM])); nft_queue_init()
75 if (tb[NFTA_QUEUE_TOTAL] != NULL) nft_queue_init()
76 priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL])); nft_queue_init()
77 if (tb[NFTA_QUEUE_FLAGS] != NULL) { nft_queue_init()
78 priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS])); nft_queue_init()
63 nft_queue_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_queue_init() argument
H A Dnf_nat_proto_common.c99 int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], nf_nat_l4proto_nlattr_to_range() argument
102 if (tb[CTA_PROTONAT_PORT_MIN]) { nf_nat_l4proto_nlattr_to_range()
103 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]); nf_nat_l4proto_nlattr_to_range()
107 if (tb[CTA_PROTONAT_PORT_MAX]) { nf_nat_l4proto_nlattr_to_range()
108 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]); nf_nat_l4proto_nlattr_to_range()
H A Dnft_counter.c68 const struct nlattr * const tb[]) nft_counter_init()
72 if (tb[NFTA_COUNTER_PACKETS]) nft_counter_init()
73 priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); nft_counter_init()
74 if (tb[NFTA_COUNTER_BYTES]) nft_counter_init()
75 priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); nft_counter_init()
66 nft_counter_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_counter_init() argument
H A Dnft_limit.c58 const struct nlattr * const tb[]) nft_limit_init()
62 if (tb[NFTA_LIMIT_RATE] == NULL || nft_limit_init()
63 tb[NFTA_LIMIT_UNIT] == NULL) nft_limit_init()
66 priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE])); nft_limit_init()
67 priv->unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT])); nft_limit_init()
56 nft_limit_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_limit_init() argument
H A Dnft_reject.c31 const struct nlattr * const tb[]) nft_reject_init()
35 if (tb[NFTA_REJECT_TYPE] == NULL) nft_reject_init()
38 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); nft_reject_init()
41 if (tb[NFTA_REJECT_ICMP_CODE] == NULL) nft_reject_init()
43 priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); nft_reject_init()
29 nft_reject_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_reject_init() argument
H A Dnft_dynset.c102 const struct nlattr * const tb[]) nft_dynset_init()
109 if (tb[NFTA_DYNSET_SET_NAME] == NULL || nft_dynset_init()
110 tb[NFTA_DYNSET_OP] == NULL || nft_dynset_init()
111 tb[NFTA_DYNSET_SREG_KEY] == NULL) nft_dynset_init()
114 set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME]); nft_dynset_init()
116 if (tb[NFTA_DYNSET_SET_ID]) nft_dynset_init()
118 tb[NFTA_DYNSET_SET_ID]); nft_dynset_init()
126 priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP])); nft_dynset_init()
139 if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { nft_dynset_init()
142 timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT])); nft_dynset_init()
145 priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]); nft_dynset_init()
150 if (tb[NFTA_DYNSET_SREG_DATA] != NULL) { nft_dynset_init()
156 priv->sreg_data = nft_parse_register(tb[NFTA_DYNSET_SREG_DATA]); nft_dynset_init()
163 if (tb[NFTA_DYNSET_EXPR] != NULL) { nft_dynset_init()
169 priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]); nft_dynset_init()
100 nft_dynset_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_dynset_init() argument
H A Dnft_compat.c183 struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; nft_parse_compat() local
187 err = nla_parse_nested(tb, NFTA_RULE_COMPAT_MAX, attr, nft_parse_compat()
192 if (!tb[NFTA_RULE_COMPAT_PROTO] || !tb[NFTA_RULE_COMPAT_FLAGS]) nft_parse_compat()
195 flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS])); nft_parse_compat()
201 *proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO])); nft_parse_compat()
207 const struct nlattr * const tb[]) nft_target_init()
212 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); nft_target_init()
222 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); nft_target_init()
394 const struct nlattr * const tb[]) nft_match_init()
399 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); nft_match_init()
409 match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); nft_match_init()
522 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_compat_get()
531 if (tb[NFTA_COMPAT_NAME] == NULL || nfnl_compat_get()
532 tb[NFTA_COMPAT_REV] == NULL || nfnl_compat_get()
533 tb[NFTA_COMPAT_TYPE] == NULL) nfnl_compat_get()
536 name = nla_data(tb[NFTA_COMPAT_NAME]); nfnl_compat_get()
537 rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV])); nfnl_compat_get()
538 target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE])); nfnl_compat_get()
629 const struct nlattr * const tb[]) nft_match_select_ops()
636 if (tb[NFTA_MATCH_NAME] == NULL || nft_match_select_ops()
637 tb[NFTA_MATCH_REV] == NULL || nft_match_select_ops()
638 tb[NFTA_MATCH_INFO] == NULL) nft_match_select_ops()
641 mt_name = nla_data(tb[NFTA_MATCH_NAME]); nft_match_select_ops()
642 rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); nft_match_select_ops()
709 const struct nlattr * const tb[]) nft_target_select_ops()
716 if (tb[NFTA_TARGET_NAME] == NULL || nft_target_select_ops()
717 tb[NFTA_TARGET_REV] == NULL || nft_target_select_ops()
718 tb[NFTA_TARGET_INFO] == NULL) nft_target_select_ops()
721 tg_name = nla_data(tb[NFTA_TARGET_NAME]); nft_target_select_ops()
722 rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV])); nft_target_select_ops()
206 nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_target_init() argument
393 nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_match_init() argument
521 nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) nfnl_compat_get() argument
628 nft_match_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) nft_match_select_ops() argument
708 nft_target_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) nft_target_select_ops() argument
H A Dnft_masq.c41 const struct nlattr * const tb[]) nft_masq_init()
50 if (tb[NFTA_MASQ_FLAGS] == NULL) nft_masq_init()
53 priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); nft_masq_init()
39 nft_masq_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_masq_init() argument
H A Dnft_meta.c218 const struct nlattr * const tb[]) nft_meta_get_init()
223 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); nft_meta_get_init()
260 priv->dreg = nft_parse_register(tb[NFTA_META_DREG]); nft_meta_get_init()
268 const struct nlattr * const tb[]) nft_meta_set_init()
274 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); nft_meta_set_init()
287 priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); nft_meta_set_init()
348 const struct nlattr * const tb[]) nft_meta_select_ops()
350 if (tb[NFTA_META_KEY] == NULL) nft_meta_select_ops()
353 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG]) nft_meta_select_ops()
356 if (tb[NFTA_META_DREG]) nft_meta_select_ops()
359 if (tb[NFTA_META_SREG]) nft_meta_select_ops()
216 nft_meta_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_meta_get_init() argument
266 nft_meta_set_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_meta_set_init() argument
347 nft_meta_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) nft_meta_select_ops() argument
H A Dnft_immediate.c42 const struct nlattr * const tb[]) nft_immediate_init()
48 if (tb[NFTA_IMMEDIATE_DREG] == NULL || nft_immediate_init()
49 tb[NFTA_IMMEDIATE_DATA] == NULL) nft_immediate_init()
53 tb[NFTA_IMMEDIATE_DATA]); nft_immediate_init()
58 priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]); nft_immediate_init()
40 nft_immediate_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_immediate_init() argument
H A Dnft_reject_inet.c67 const struct nlattr * const tb[]) nft_reject_inet_init()
72 if (tb[NFTA_REJECT_TYPE] == NULL) nft_reject_inet_init()
75 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); nft_reject_inet_init()
79 if (tb[NFTA_REJECT_ICMP_CODE] == NULL) nft_reject_inet_init()
82 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); nft_reject_inet_init()
65 nft_reject_inet_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_reject_inet_init() argument
H A Dnf_conntrack_proto_tcp.c1232 struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1]; nlattr_to_tcp() local
1240 err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr, tcp_nla_policy); nlattr_to_tcp()
1244 if (tb[CTA_PROTOINFO_TCP_STATE] && nlattr_to_tcp()
1245 nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX) nlattr_to_tcp()
1249 if (tb[CTA_PROTOINFO_TCP_STATE]) nlattr_to_tcp()
1250 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]); nlattr_to_tcp()
1252 if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) { nlattr_to_tcp()
1254 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]); nlattr_to_tcp()
1259 if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) { nlattr_to_tcp()
1261 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]); nlattr_to_tcp()
1266 if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] && nlattr_to_tcp()
1267 tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] && nlattr_to_tcp()
1271 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]); nlattr_to_tcp()
1273 nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]); nlattr_to_tcp()
1297 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], tcp_timeout_nlattr_to_obj() argument
1308 if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) { tcp_timeout_nlattr_to_obj()
1310 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ; tcp_timeout_nlattr_to_obj()
1312 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) { tcp_timeout_nlattr_to_obj()
1314 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ; tcp_timeout_nlattr_to_obj()
1316 if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) { tcp_timeout_nlattr_to_obj()
1318 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ; tcp_timeout_nlattr_to_obj()
1320 if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) { tcp_timeout_nlattr_to_obj()
1322 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ; tcp_timeout_nlattr_to_obj()
1324 if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) { tcp_timeout_nlattr_to_obj()
1326 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ; tcp_timeout_nlattr_to_obj()
1328 if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) { tcp_timeout_nlattr_to_obj()
1330 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ; tcp_timeout_nlattr_to_obj()
1332 if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) { tcp_timeout_nlattr_to_obj()
1334 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ; tcp_timeout_nlattr_to_obj()
1336 if (tb[CTA_TIMEOUT_TCP_CLOSE]) { tcp_timeout_nlattr_to_obj()
1338 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ; tcp_timeout_nlattr_to_obj()
1340 if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) { tcp_timeout_nlattr_to_obj()
1342 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ; tcp_timeout_nlattr_to_obj()
1344 if (tb[CTA_TIMEOUT_TCP_RETRANS]) { tcp_timeout_nlattr_to_obj()
1346 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ; tcp_timeout_nlattr_to_obj()
1348 if (tb[CTA_TIMEOUT_TCP_UNACK]) { tcp_timeout_nlattr_to_obj()
1350 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ; tcp_timeout_nlattr_to_obj()
H A Dnf_conntrack_proto_udp.c160 static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], udp_timeout_nlattr_to_obj() argument
170 if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) { udp_timeout_nlattr_to_obj()
172 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ; udp_timeout_nlattr_to_obj()
174 if (tb[CTA_TIMEOUT_UDP_REPLIED]) { udp_timeout_nlattr_to_obj()
176 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ; udp_timeout_nlattr_to_obj()
H A Dnf_conntrack_proto_udplite.c175 static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], udplite_timeout_nlattr_to_obj() argument
185 if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) { udplite_timeout_nlattr_to_obj()
187 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ; udplite_timeout_nlattr_to_obj()
189 if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) { udplite_timeout_nlattr_to_obj()
191 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ; udplite_timeout_nlattr_to_obj()
H A Dnf_conntrack_proto_generic.c101 static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], generic_timeout_nlattr_to_obj() argument
107 if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT]) generic_timeout_nlattr_to_obj()
109 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ; generic_timeout_nlattr_to_obj()
H A Dnf_conntrack_proto_sctp.c526 struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1]; nlattr_to_sctp() local
533 err = nla_parse_nested(tb, nlattr_to_sctp()
540 if (!tb[CTA_PROTOINFO_SCTP_STATE] || nlattr_to_sctp()
541 !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] || nlattr_to_sctp()
542 !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]) nlattr_to_sctp()
546 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); nlattr_to_sctp()
548 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]); nlattr_to_sctp()
550 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]); nlattr_to_sctp()
568 static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], sctp_timeout_nlattr_to_obj() argument
581 if (tb[i]) { sctp_timeout_nlattr_to_obj()
582 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; sctp_timeout_nlattr_to_obj()
H A Dnf_conntrack_proto_gre.c307 static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], gre_timeout_nlattr_to_obj() argument
317 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) { gre_timeout_nlattr_to_obj()
319 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ; gre_timeout_nlattr_to_obj()
321 if (tb[CTA_TIMEOUT_GRE_REPLIED]) { gre_timeout_nlattr_to_obj()
323 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ; gre_timeout_nlattr_to_obj()
H A Dnf_conntrack_proto_dccp.c668 struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1]; nlattr_to_dccp() local
674 err = nla_parse_nested(tb, CTA_PROTOINFO_DCCP_MAX, attr, nlattr_to_dccp()
679 if (!tb[CTA_PROTOINFO_DCCP_STATE] || nlattr_to_dccp()
680 !tb[CTA_PROTOINFO_DCCP_ROLE] || nlattr_to_dccp()
681 nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX || nlattr_to_dccp()
682 nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) { nlattr_to_dccp()
687 ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); nlattr_to_dccp()
688 if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) { nlattr_to_dccp()
695 if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) { nlattr_to_dccp()
697 be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ])); nlattr_to_dccp()
716 static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], dccp_timeout_nlattr_to_obj() argument
729 if (tb[i]) { dccp_timeout_nlattr_to_obj()
730 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; dccp_timeout_nlattr_to_obj()
H A Dnf_nat_core.c728 struct nlattr *tb[CTA_PROTONAT_MAX+1]; nfnetlink_parse_nat_proto() local
732 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); nfnetlink_parse_nat_proto()
738 err = l4proto->nlattr_to_range(tb, range); nfnetlink_parse_nat_proto()
756 struct nlattr *tb[CTA_NAT_MAX+1]; nfnetlink_parse_nat() local
761 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); nfnetlink_parse_nat()
765 err = l3proto->nlattr_to_range(tb, range); nfnetlink_parse_nat()
769 if (!tb[CTA_NAT_PROTO]) nfnetlink_parse_nat()
772 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); nfnetlink_parse_nat()
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
H A Dtestmode.c72 static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) wl1271_tm_cmd_test() argument
81 if (!tb[WL1271_TM_ATTR_DATA]) wl1271_tm_cmd_test()
84 buf = nla_data(tb[WL1271_TM_ATTR_DATA]); wl1271_tm_cmd_test()
85 buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); wl1271_tm_cmd_test()
87 if (tb[WL1271_TM_ATTR_ANSWER]) wl1271_tm_cmd_test()
88 answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]); wl1271_tm_cmd_test()
151 static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) wl1271_tm_cmd_interrogate() argument
160 if (!tb[WL1271_TM_ATTR_IE_ID]) wl1271_tm_cmd_interrogate()
163 ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); wl1271_tm_cmd_interrogate()
215 static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) wl1271_tm_cmd_configure() argument
223 if (!tb[WL1271_TM_ATTR_DATA]) wl1271_tm_cmd_configure()
225 if (!tb[WL1271_TM_ATTR_IE_ID]) wl1271_tm_cmd_configure()
228 ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); wl1271_tm_cmd_configure()
229 buf = nla_data(tb[WL1271_TM_ATTR_DATA]); wl1271_tm_cmd_configure()
230 buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); wl1271_tm_cmd_configure()
247 static int wl1271_tm_detect_fem(struct wl1271 *wl, struct nlattr *tb[]) wl1271_tm_detect_fem() argument
284 static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) wl1271_tm_cmd_set_plt_mode() argument
291 if (!tb[WL1271_TM_ATTR_PLT_MODE]) wl1271_tm_cmd_set_plt_mode()
294 val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]); wl1271_tm_cmd_set_plt_mode()
305 ret = wl1271_tm_detect_fem(wl, tb); wl1271_tm_cmd_set_plt_mode()
315 static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[]) wl12xx_tm_cmd_get_mac() argument
365 struct nlattr *tb[WL1271_TM_ATTR_MAX + 1]; wl1271_tm_cmd() local
369 err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy); wl1271_tm_cmd()
373 if (!tb[WL1271_TM_ATTR_CMD_ID]) wl1271_tm_cmd()
376 nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]); wl1271_tm_cmd()
385 return wl1271_tm_cmd_test(wl, tb); wl1271_tm_cmd()
387 return wl1271_tm_cmd_interrogate(wl, tb); wl1271_tm_cmd()
389 return wl1271_tm_cmd_configure(wl, tb); wl1271_tm_cmd()
391 return wl1271_tm_cmd_set_plt_mode(wl, tb); wl1271_tm_cmd()
393 return wl12xx_tm_cmd_get_mac(wl, tb); wl1271_tm_cmd()
H A Dvendor_cmd.c35 struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR]; wlcore_vendor_cmd_smart_config_start() local
43 ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, wlcore_vendor_cmd_smart_config_start()
48 if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID]) wlcore_vendor_cmd_smart_config_start()
63 nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); wlcore_vendor_cmd_smart_config_start()
110 struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR]; wlcore_vendor_cmd_smart_config_set_group_key() local
118 ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, wlcore_vendor_cmd_smart_config_set_group_key()
123 if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID] || wlcore_vendor_cmd_smart_config_set_group_key()
124 !tb[WLCORE_VENDOR_ATTR_GROUP_KEY]) wlcore_vendor_cmd_smart_config_set_group_key()
139 nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]), wlcore_vendor_cmd_smart_config_set_group_key()
140 nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]), wlcore_vendor_cmd_smart_config_set_group_key()
141 nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY])); wlcore_vendor_cmd_smart_config_set_group_key()
/linux-4.1.27/net/ipv4/
H A Dinet_hashtables.c65 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); inet_bind_bucket_create() local
67 if (tb) { inet_bind_bucket_create()
68 write_pnet(&tb->ib_net, net); inet_bind_bucket_create()
69 tb->port = snum; inet_bind_bucket_create()
70 tb->fastreuse = 0; inet_bind_bucket_create()
71 tb->fastreuseport = 0; inet_bind_bucket_create()
72 tb->num_owners = 0; inet_bind_bucket_create()
73 INIT_HLIST_HEAD(&tb->owners); inet_bind_bucket_create()
74 hlist_add_head(&tb->node, &head->chain); inet_bind_bucket_create()
76 return tb; inet_bind_bucket_create()
80 * Caller must hold hashbucket lock for this tb with local BH disabled
82 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) inet_bind_bucket_destroy() argument
84 if (hlist_empty(&tb->owners)) { inet_bind_bucket_destroy()
85 __hlist_del(&tb->node); inet_bind_bucket_destroy()
86 kmem_cache_free(cachep, tb); inet_bind_bucket_destroy()
90 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, inet_bind_hash() argument
98 sk_add_bind_node(sk, &tb->owners); inet_bind_hash()
99 tb->num_owners++; inet_bind_hash()
100 inet_csk(sk)->icsk_bind_hash = tb; inet_bind_hash()
112 struct inet_bind_bucket *tb; __inet_put_port() local
117 tb = inet_csk(sk)->icsk_bind_hash; __inet_put_port()
119 tb->num_owners--; __inet_put_port()
122 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); __inet_put_port()
141 struct inet_bind_bucket *tb; __inet_inherit_port() local
144 tb = inet_csk(sk)->icsk_bind_hash; __inet_inherit_port()
145 if (tb->port != port) { __inet_inherit_port()
151 inet_bind_bucket_for_each(tb, &head->chain) { __inet_inherit_port()
152 if (net_eq(ib_net(tb), sock_net(sk)) && __inet_inherit_port()
153 tb->port == port) __inet_inherit_port()
156 if (!tb) { __inet_inherit_port()
157 tb = inet_bind_bucket_create(table->bind_bucket_cachep, __inet_inherit_port()
159 if (!tb) { __inet_inherit_port()
165 inet_bind_hash(child, tb, port); __inet_inherit_port()
496 struct inet_bind_bucket *tb; __inet_hash_connect() local
523 inet_bind_bucket_for_each(tb, &head->chain) { __inet_hash_connect()
524 if (net_eq(ib_net(tb), net) && __inet_hash_connect()
525 tb->port == port) { __inet_hash_connect()
526 if (tb->fastreuse >= 0 || __inet_hash_connect()
527 tb->fastreuseport >= 0) __inet_hash_connect()
529 WARN_ON(hlist_empty(&tb->owners)); __inet_hash_connect()
537 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, __inet_hash_connect()
539 if (!tb) { __inet_hash_connect()
543 tb->fastreuse = -1; __inet_hash_connect()
544 tb->fastreuseport = -1; __inet_hash_connect()
558 inet_bind_hash(sk, tb, port); __inet_hash_connect()
580 tb = inet_csk(sk)->icsk_bind_hash; __inet_hash_connect()
582 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { __inet_hash_connect()
H A Dfib_frontend.c77 struct fib_table *tb, *alias = NULL; fib_new_table() local
82 tb = fib_get_table(net, id); fib_new_table()
83 if (tb) fib_new_table()
84 return tb; fib_new_table()
89 tb = fib_trie_table(id, alias); fib_new_table()
90 if (!tb) fib_new_table()
95 rcu_assign_pointer(net->ipv4.fib_local, tb); fib_new_table()
98 rcu_assign_pointer(net->ipv4.fib_main, tb); fib_new_table()
101 rcu_assign_pointer(net->ipv4.fib_default, tb); fib_new_table()
108 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); fib_new_table()
109 return tb; fib_new_table()
115 struct fib_table *tb; fib_get_table() local
124 hlist_for_each_entry_rcu(tb, head, tb_hlist) { hlist_for_each_entry_rcu()
125 if (tb->tb_id == id) hlist_for_each_entry_rcu()
126 return tb; hlist_for_each_entry_rcu()
185 struct fib_table *tb; fib_flush() local
187 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) fib_flush()
188 flushed += fib_table_flush(tb); fib_flush()
197 struct fib_table *tb; fib_flush_external() local
203 hlist_for_each_entry(tb, head, tb_hlist) fib_flush_external()
204 fib_table_flush_external(tb); fib_flush_external()
558 struct fib_table *tb; ip_rt_ioctl() local
561 tb = fib_get_table(net, cfg.fc_table); ip_rt_ioctl()
562 if (tb) ip_rt_ioctl()
563 err = fib_table_delete(tb, &cfg); ip_rt_ioctl()
567 tb = fib_new_table(net, cfg.fc_table); ip_rt_ioctl()
568 if (tb) ip_rt_ioctl()
569 err = fib_table_insert(tb, &cfg); ip_rt_ioctl()
671 struct fib_table *tb; inet_rtm_delroute() local
678 tb = fib_get_table(net, cfg.fc_table); inet_rtm_delroute()
679 if (!tb) { inet_rtm_delroute()
684 err = fib_table_delete(tb, &cfg); inet_rtm_delroute()
693 struct fib_table *tb; inet_rtm_newroute() local
700 tb = fib_new_table(net, cfg.fc_table); inet_rtm_newroute()
701 if (!tb) { inet_rtm_newroute()
706 err = fib_table_insert(tb, &cfg); inet_rtm_newroute()
716 struct fib_table *tb; inet_dump_fib() local
732 hlist_for_each_entry_rcu(tb, head, tb_hlist) { hlist_for_each_entry_rcu()
738 if (fib_table_dump(tb, skb, cb) < 0) hlist_for_each_entry_rcu()
763 struct fib_table *tb; fib_magic() local
778 tb = fib_new_table(net, RT_TABLE_MAIN); fib_magic()
780 tb = fib_new_table(net, RT_TABLE_LOCAL); fib_magic()
782 if (!tb) fib_magic()
785 cfg.fc_table = tb->tb_id; fib_magic()
793 fib_table_insert(tb, &cfg); fib_magic()
795 fib_table_delete(tb, &cfg); fib_magic()
994 struct fib_table *tb; nl_fib_lookup() local
998 tb = fib_get_table(net, frn->tb_id_in); nl_fib_lookup()
1001 if (tb) { nl_fib_lookup()
1004 frn->tb_id = tb->tb_id; nl_fib_lookup()
1005 frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); nl_fib_lookup()
1186 struct fib_table *tb; ip_fib_net_exit() local
1188 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { hlist_for_each_entry_safe()
1189 hlist_del(&tb->tb_hlist); hlist_for_each_entry_safe()
1190 fib_table_flush(tb); hlist_for_each_entry_safe()
1191 fib_free_table(tb); hlist_for_each_entry_safe()
H A Dfib_rules.c168 struct nlattr **tb) fib4_rule_configure()
197 rule4->src = nla_get_in_addr(tb[FRA_SRC]); fib4_rule_configure()
200 rule4->dst = nla_get_in_addr(tb[FRA_DST]); fib4_rule_configure()
203 if (tb[FRA_FLOW]) { fib4_rule_configure()
204 rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); fib4_rule_configure()
245 struct nlattr **tb) fib4_rule_compare()
259 if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW]))) fib4_rule_compare()
263 if (frh->src_len && (rule4->src != nla_get_in_addr(tb[FRA_SRC]))) fib4_rule_compare()
266 if (frh->dst_len && (rule4->dst != nla_get_in_addr(tb[FRA_DST]))) fib4_rule_compare()
166 fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) fib4_rule_configure() argument
244 fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) fib4_rule_compare() argument
H A Dinet_connection_sock.c47 const struct inet_bind_bucket *tb, bool relax) inet_csk_bind_conflict()
57 * in tb->owners list belong to the same net - the inet_csk_bind_conflict()
61 sk_for_each_bound(sk2, &tb->owners) { inet_csk_bind_conflict()
97 struct inet_bind_bucket *tb; inet_csk_get_port() local
119 inet_bind_bucket_for_each(tb, &head->chain) inet_csk_get_port()
120 if (net_eq(ib_net(tb), net) && tb->port == rover) { inet_csk_get_port()
121 if (((tb->fastreuse > 0 && inet_csk_get_port()
124 (tb->fastreuseport > 0 && inet_csk_get_port()
126 uid_eq(tb->fastuid, uid))) && inet_csk_get_port()
127 (tb->num_owners < smallest_size || smallest_size == -1)) { inet_csk_get_port()
128 smallest_size = tb->num_owners; inet_csk_get_port()
131 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { inet_csk_get_port()
136 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { inet_csk_get_port()
173 inet_bind_bucket_for_each(tb, &head->chain) inet_csk_get_port()
174 if (net_eq(ib_net(tb), net) && tb->port == snum) inet_csk_get_port()
177 tb = NULL; inet_csk_get_port()
180 if (!hlist_empty(&tb->owners)) { inet_csk_get_port()
184 if (((tb->fastreuse > 0 && inet_csk_get_port()
186 (tb->fastreuseport > 0 && inet_csk_get_port()
187 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && inet_csk_get_port()
192 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { inet_csk_get_port()
194 (tb->fastreuseport > 0 && inet_csk_get_port()
195 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && inet_csk_get_port()
207 if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, inet_csk_get_port()
210 if (hlist_empty(&tb->owners)) { inet_csk_get_port()
212 tb->fastreuse = 1; inet_csk_get_port()
214 tb->fastreuse = 0; inet_csk_get_port()
216 tb->fastreuseport = 1; inet_csk_get_port()
217 tb->fastuid = uid; inet_csk_get_port()
219 tb->fastreuseport = 0; inet_csk_get_port()
221 if (tb->fastreuse && inet_csk_get_port()
223 tb->fastreuse = 0; inet_csk_get_port()
224 if (tb->fastreuseport && inet_csk_get_port()
225 (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))) inet_csk_get_port()
226 tb->fastreuseport = 0; inet_csk_get_port()
230 inet_bind_hash(sk, tb, snum); inet_csk_get_port()
231 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); inet_csk_get_port()
46 inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb, bool relax) inet_csk_bind_conflict() argument
H A Dfib_trie.c1075 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) fib_table_insert() argument
1077 struct trie *t = (struct trie *)tb->tb_data; fib_table_insert()
1092 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); fib_table_insert()
1105 tb->tb_id) : NULL; fib_table_insert()
1133 (fa->tb_id != tb->tb_id) || hlist_for_each_entry_from()
1167 new_fa->tb_id = tb->tb_id;
1173 tb->tb_id);
1188 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1216 new_fa->tb_id = tb->tb_id;
1222 tb->tb_id);
1234 tb->tb_num_default++;
1243 netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
1260 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, fib_table_lookup() argument
1263 struct trie *t = (struct trie *) tb->tb_data; fib_table_lookup()
1423 res->table = tb; fib_table_lookup()
1468 int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) fib_table_delete() argument
1470 struct trie *t = (struct trie *) tb->tb_data; fib_table_delete()
1490 fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id); fib_table_delete()
1501 (fa->tb_id != tb->tb_id) || hlist_for_each_entry_from()
1522 cfg->fc_type, tb->tb_id);
1524 rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
1528 tb->tb_num_default--;
1598 static void fib_trie_free(struct fib_table *tb) fib_trie_free() argument
1600 struct trie *t = (struct trie *)tb->tb_data; fib_trie_free()
1653 kfree(tb); fib_trie_free()
1713 void fib_table_flush_external(struct fib_table *tb) fib_table_flush_external() argument
1715 struct trie *t = (struct trie *)tb->tb_data; fib_table_flush_external()
1759 if (tb->tb_id != fa->tb_id) { fib_table_flush_external()
1774 fa->fa_type, tb->tb_id); fib_table_flush_external()
1788 int fib_table_flush(struct fib_table *tb) fib_table_flush() argument
1790 struct trie *t = (struct trie *)tb->tb_data; fib_table_flush()
1840 fa->fa_type, tb->tb_id); fib_table_flush()
1862 struct fib_table *tb = container_of(head, struct fib_table, rcu); __trie_free_rcu() local
1864 struct trie *t = (struct trie *)tb->tb_data; __trie_free_rcu()
1866 if (tb->tb_data == tb->__data) __trie_free_rcu()
1869 kfree(tb); __trie_free_rcu()
1872 void fib_free_table(struct fib_table *tb) fib_free_table() argument
1874 call_rcu(&tb->rcu, __trie_free_rcu); fib_free_table()
1877 static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, fn_trie_dump_leaf() argument
1894 if (tb->tb_id != fa->tb_id) { fn_trie_dump_leaf()
1902 tb->tb_id, fn_trie_dump_leaf()
1919 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, fib_table_dump() argument
1922 struct trie *t = (struct trie *)tb->tb_data; fib_table_dump()
1931 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { fib_table_dump()
1967 struct fib_table *tb; fib_trie_table() local
1969 size_t sz = sizeof(*tb); fib_trie_table()
1974 tb = kzalloc(sz, GFP_KERNEL); fib_trie_table()
1975 if (!tb) fib_trie_table()
1978 tb->tb_id = id; fib_trie_table()
1979 tb->tb_default = -1; fib_trie_table()
1980 tb->tb_num_default = 0; fib_trie_table()
1981 tb->tb_data = (alias ? alias->__data : tb->__data); fib_trie_table()
1984 return tb; fib_trie_table()
1986 t = (struct trie *) tb->tb_data; fib_trie_table()
1992 kfree(tb); fib_trie_table()
1993 tb = NULL; fib_trie_table()
1997 return tb; fib_trie_table()
2004 struct fib_table *tb; member in struct:fib_trie_iter
2180 static void fib_table_print(struct seq_file *seq, struct fib_table *tb) fib_table_print() argument
2182 if (tb->tb_id == RT_TABLE_LOCAL) fib_table_print()
2184 else if (tb->tb_id == RT_TABLE_MAIN) fib_table_print()
2187 seq_printf(seq, "Id %d:\n", tb->tb_id); fib_table_print()
2203 struct fib_table *tb; fib_triestat_seq_show() local
2205 hlist_for_each_entry_rcu(tb, head, tb_hlist) { hlist_for_each_entry_rcu()
2206 struct trie *t = (struct trie *) tb->tb_data; hlist_for_each_entry_rcu()
2212 fib_table_print(seq, tb); hlist_for_each_entry_rcu()
2247 struct fib_table *tb; fib_trie_get_idx() local
2249 hlist_for_each_entry_rcu(tb, head, tb_hlist) { hlist_for_each_entry_rcu()
2253 (struct trie *) tb->tb_data); hlist_for_each_entry_rcu()
2256 iter->tb = tb; hlist_for_each_entry_rcu()
2276 struct fib_table *tb = iter->tb; fib_trie_seq_next() local
2288 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); fib_trie_seq_next()
2289 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) { fib_trie_seq_next()
2290 tb = hlist_entry(tb_node, struct fib_table, tb_hlist); fib_trie_seq_next()
2291 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); fib_trie_seq_next()
2299 hlist_for_each_entry_rcu(tb, head, tb_hlist) { hlist_for_each_entry_rcu()
2300 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); hlist_for_each_entry_rcu()
2308 iter->tb = tb;
2368 fib_table_print(seq, iter->tb); fib_trie_seq_show()
2436 struct fib_table *tb = iter->main_tb; fib_route_get_idx() local
2446 t = (struct trie *)tb->tb_data; fib_route_get_idx()
2478 struct fib_table *tb; __acquires() local
2483 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN); __acquires()
2484 if (!tb) __acquires()
2487 iter->main_tb = tb; __acquires()
2492 t = (struct trie *)tb->tb_data; __acquires()
2551 struct fib_table *tb = iter->main_tb; fib_route_seq_show() local
2574 if (fa->tb_id != tb->tb_id) fib_route_seq_show()
H A Dip_gre.c557 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) ipgre_tunnel_validate() argument
575 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[]) ipgre_tap_validate() argument
579 if (tb[IFLA_ADDRESS]) { ipgre_tap_validate()
580 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) ipgre_tap_validate()
582 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) ipgre_tap_validate()
596 return ipgre_tunnel_validate(tb, data); ipgre_tap_validate()
599 static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[], ipgre_netlink_parms() argument
702 struct nlattr *tb[], struct nlattr *data[]) ipgre_newlink()
715 ipgre_netlink_parms(data, tb, &p); ipgre_newlink()
716 return ip_tunnel_newlink(dev, tb, &p); ipgre_newlink()
719 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], ipgre_changelink() argument
733 ipgre_netlink_parms(data, tb, &p); ipgre_changelink()
734 return ip_tunnel_changelink(dev, tb, &p); ipgre_changelink()
701 ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) ipgre_newlink() argument
H A Ddevinet.c574 struct nlattr *tb[IFA_MAX+1]; inet_rtm_deladdr() local
582 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); inet_rtm_deladdr()
595 if (tb[IFA_LOCAL] && inet_rtm_deladdr()
596 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL])) inet_rtm_deladdr()
599 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) inet_rtm_deladdr()
602 if (tb[IFA_ADDRESS] && inet_rtm_deladdr()
604 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa))) inet_rtm_deladdr()
745 struct nlattr *tb[IFA_MAX+1]; rtm_to_ifaddr() local
752 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); rtm_to_ifaddr()
758 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL]) rtm_to_ifaddr()
783 if (!tb[IFA_ADDRESS]) rtm_to_ifaddr()
784 tb[IFA_ADDRESS] = tb[IFA_LOCAL]; rtm_to_ifaddr()
789 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : rtm_to_ifaddr()
794 ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]); rtm_to_ifaddr()
795 ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]); rtm_to_ifaddr()
797 if (tb[IFA_BROADCAST]) rtm_to_ifaddr()
798 ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]); rtm_to_ifaddr()
800 if (tb[IFA_LABEL]) rtm_to_ifaddr()
801 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); rtm_to_ifaddr()
805 if (tb[IFA_CACHEINFO]) { rtm_to_ifaddr()
808 ci = nla_data(tb[IFA_CACHEINFO]); rtm_to_ifaddr()
1684 struct nlattr *a, *tb[IFLA_INET_MAX+1]; inet_validate_link_af() local
1690 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy); inet_validate_link_af()
1694 if (tb[IFLA_INET_CONF]) { nla_for_each_nested()
1695 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) { nla_for_each_nested()
1712 struct nlattr *a, *tb[IFLA_INET_MAX+1]; inet_set_link_af() local
1718 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0) inet_set_link_af()
1721 if (tb[IFLA_INET_CONF]) { inet_set_link_af()
1722 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) inet_set_link_af()
1828 struct nlattr *tb[NETCONFA_MAX+1]; inet_netconf_get_devconf() local
1837 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, inet_netconf_get_devconf()
1843 if (!tb[NETCONFA_IFINDEX]) inet_netconf_get_devconf()
1846 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]); inet_netconf_get_devconf()
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_nla.h4 extern int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
H A Ddrbd_nla.c30 int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, drbd_nla_parse_nested() argument
37 err = nla_parse_nested(tb, maxtype, nla, policy); drbd_nla_parse_nested()
/linux-4.1.27/net/bridge/netfilter/
H A Dnft_meta_bridge.c53 const struct nlattr * const tb[]) nft_meta_bridge_get_init()
58 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); nft_meta_bridge_get_init()
65 return nft_meta_get_init(ctx, expr, tb); nft_meta_bridge_get_init()
68 priv->dreg = nft_parse_register(tb[NFTA_META_DREG]); nft_meta_bridge_get_init()
92 const struct nlattr * const tb[]) nft_meta_bridge_select_ops()
94 if (tb[NFTA_META_KEY] == NULL) nft_meta_bridge_select_ops()
97 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG]) nft_meta_bridge_select_ops()
100 if (tb[NFTA_META_DREG]) nft_meta_bridge_select_ops()
103 if (tb[NFTA_META_SREG]) nft_meta_bridge_select_ops()
51 nft_meta_bridge_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_meta_bridge_get_init() argument
91 nft_meta_bridge_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) nft_meta_bridge_select_ops() argument
H A Dnft_reject_bridge.c326 const struct nlattr * const tb[]) nft_reject_bridge_init()
335 if (tb[NFTA_REJECT_TYPE] == NULL) nft_reject_bridge_init()
338 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); nft_reject_bridge_init()
342 if (tb[NFTA_REJECT_ICMP_CODE] == NULL) nft_reject_bridge_init()
345 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); nft_reject_bridge_init()
324 nft_reject_bridge_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) nft_reject_bridge_init() argument
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
H A Dtestmode.c73 struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1]; ath6kl_tm_cmd() local
77 err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, ath6kl_tm_cmd()
82 if (!tb[ATH6KL_TM_ATTR_CMD]) ath6kl_tm_cmd()
85 switch (nla_get_u32(tb[ATH6KL_TM_ATTR_CMD])) { ath6kl_tm_cmd()
87 if (!tb[ATH6KL_TM_ATTR_DATA]) ath6kl_tm_cmd()
90 buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]); ath6kl_tm_cmd()
91 buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]); ath6kl_tm_cmd()
/linux-4.1.27/security/keys/
H A Dtrusted.c393 static int osap(struct tpm_buf *tb, struct osapsess *s, osap() argument
404 INIT_BUF(tb); osap()
405 store16(tb, TPM_TAG_RQU_COMMAND); osap()
406 store32(tb, TPM_OSAP_SIZE); osap()
407 store32(tb, TPM_ORD_OSAP); osap()
408 store16(tb, type); osap()
409 store32(tb, handle); osap()
410 storebytes(tb, ononce, TPM_NONCE_SIZE); osap()
412 ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); osap()
416 s->handle = LOAD32(tb->data, TPM_DATA_OFFSET); osap()
417 memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]), osap()
419 memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) + osap()
428 static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) oiap() argument
432 INIT_BUF(tb); oiap()
433 store16(tb, TPM_TAG_RQU_COMMAND); oiap()
434 store32(tb, TPM_OIAP_SIZE); oiap()
435 store32(tb, TPM_ORD_OIAP); oiap()
436 ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); oiap()
440 *handle = LOAD32(tb->data, TPM_DATA_OFFSET); oiap()
441 memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)], oiap()
458 static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, tpm_seal() argument
483 ret = osap(tb, &sess, keyauth, keytype, keyhandle); tpm_seal()
529 INIT_BUF(tb); tpm_seal()
530 store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); tpm_seal()
531 store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen); tpm_seal()
532 store32(tb, TPM_ORD_SEAL); tpm_seal()
533 store32(tb, keyhandle); tpm_seal()
534 storebytes(tb, td->encauth, SHA1_DIGEST_SIZE); tpm_seal()
535 store32(tb, pcrinfosize); tpm_seal()
536 storebytes(tb, pcrinfo, pcrinfosize); tpm_seal()
537 store32(tb, datalen); tpm_seal()
538 storebytes(tb, data, datalen); tpm_seal()
539 store32(tb, sess.handle); tpm_seal()
540 storebytes(tb, td->nonceodd, TPM_NONCE_SIZE); tpm_seal()
541 store8(tb, cont); tpm_seal()
542 storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE); tpm_seal()
544 ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); tpm_seal()
549 sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t)); tpm_seal()
550 encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) + tpm_seal()
556 ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret, tpm_seal()
562 memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize); tpm_seal()
573 static int tpm_unseal(struct tpm_buf *tb, tpm_unseal() argument
592 ret = oiap(tb, &authhandle1, enonce1); tpm_unseal()
597 ret = oiap(tb, &authhandle2, enonce2); tpm_unseal()
622 INIT_BUF(tb); tpm_unseal()
623 store16(tb, TPM_TAG_RQU_AUTH2_COMMAND); tpm_unseal()
624 store32(tb, TPM_UNSEAL_SIZE + bloblen); tpm_unseal()
625 store32(tb, TPM_ORD_UNSEAL); tpm_unseal()
626 store32(tb, keyhandle); tpm_unseal()
627 storebytes(tb, blob, bloblen); tpm_unseal()
628 store32(tb, authhandle1); tpm_unseal()
629 storebytes(tb, nonceodd, TPM_NONCE_SIZE); tpm_unseal()
630 store8(tb, cont); tpm_unseal()
631 storebytes(tb, authdata1, SHA1_DIGEST_SIZE); tpm_unseal()
632 store32(tb, authhandle2); tpm_unseal()
633 storebytes(tb, nonceodd, TPM_NONCE_SIZE); tpm_unseal()
634 store8(tb, cont); tpm_unseal()
635 storebytes(tb, authdata2, SHA1_DIGEST_SIZE); tpm_unseal()
637 ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); tpm_unseal()
643 *datalen = LOAD32(tb->data, TPM_DATA_OFFSET); tpm_unseal()
644 ret = TSS_checkhmac2(tb->data, ordinal, nonceodd, tpm_unseal()
654 memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen); tpm_unseal()
664 struct tpm_buf *tb; key_seal() local
667 tb = kzalloc(sizeof *tb, GFP_KERNEL); key_seal()
668 if (!tb) key_seal()
674 ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth, key_seal()
680 kfree(tb); key_seal()
690 struct tpm_buf *tb; key_unseal() local
693 tb = kzalloc(sizeof *tb, GFP_KERNEL); key_unseal()
694 if (!tb) key_unseal()
697 ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len, key_unseal()
705 kfree(tb); key_unseal()
H A Dtrusted.h25 #define INIT_BUF(tb) (tb->len = 0)
/linux-4.1.27/include/net/netfilter/
H A Dnft_meta.h16 const struct nlattr * const tb[]);
20 const struct nlattr * const tb[]);
H A Dnft_masq.h12 const struct nlattr * const tb[]);
H A Dnft_redir.h14 const struct nlattr * const tb[]);
H A Dnft_reject.h13 const struct nlattr * const tb[]);
H A Dnf_nat_l4proto.h40 int (*nlattr_to_range)(struct nlattr *tb[],
69 int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
H A Dnf_conntrack_l4proto.h75 int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
81 int (*nlattr_to_tuple)(struct nlattr *tb[],
90 int (*nlattr_to_obj)(struct nlattr *tb[],
145 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
H A Dnf_conntrack_l3proto.h60 int (*nlattr_to_tuple)(struct nlattr *tb[],
/linux-4.1.27/net/sched/
H A Dact_bpf.c170 static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) tcf_bpf_init_from_ops() argument
178 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]); tcf_bpf_init_from_ops()
183 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS])) tcf_bpf_init_from_ops()
190 memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size); tcf_bpf_init_from_ops()
209 static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) tcf_bpf_init_from_efd() argument
215 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]); tcf_bpf_init_from_efd()
226 if (tb[TCA_ACT_BPF_NAME]) { tcf_bpf_init_from_efd()
227 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]), tcf_bpf_init_from_efd()
228 nla_len(tb[TCA_ACT_BPF_NAME]), tcf_bpf_init_from_efd()
269 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; tcf_bpf_init() local
279 ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy); tcf_bpf_init()
283 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; tcf_bpf_init()
284 is_ebpf = tb[TCA_ACT_BPF_FD]; tcf_bpf_init()
287 !tb[TCA_ACT_BPF_PARMS]) tcf_bpf_init()
290 parm = nla_data(tb[TCA_ACT_BPF_PARMS]); tcf_bpf_init()
294 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : tcf_bpf_init()
295 tcf_bpf_init_from_efd(tb, &cfg); tcf_bpf_init()
H A Dcls_bpf.c184 static int cls_bpf_prog_from_ops(struct nlattr **tb, cls_bpf_prog_from_ops() argument
193 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); cls_bpf_prog_from_ops()
198 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) cls_bpf_prog_from_ops()
205 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); cls_bpf_prog_from_ops()
226 static int cls_bpf_prog_from_efd(struct nlattr **tb, cls_bpf_prog_from_efd() argument
233 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); cls_bpf_prog_from_efd()
244 if (tb[TCA_BPF_NAME]) { cls_bpf_prog_from_efd()
245 name = kmemdup(nla_data(tb[TCA_BPF_NAME]), cls_bpf_prog_from_efd()
246 nla_len(tb[TCA_BPF_NAME]), cls_bpf_prog_from_efd()
266 unsigned long base, struct nlattr **tb, cls_bpf_modify_existing()
274 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; cls_bpf_modify_existing()
275 is_ebpf = tb[TCA_BPF_FD]; cls_bpf_modify_existing()
278 !tb[TCA_BPF_CLASSID]) cls_bpf_modify_existing()
282 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr); cls_bpf_modify_existing()
286 classid = nla_get_u32(tb[TCA_BPF_CLASSID]); cls_bpf_modify_existing()
288 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) : cls_bpf_modify_existing()
289 cls_bpf_prog_from_efd(tb, prog, classid); cls_bpf_modify_existing()
329 struct nlattr *tb[TCA_BPF_MAX + 1]; cls_bpf_change() local
336 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy); cls_bpf_change()
362 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr); cls_bpf_change()
264 cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, struct cls_bpf_prog *prog, unsigned long base, struct nlattr **tb, struct nlattr *est, bool ovr) cls_bpf_modify_existing() argument
H A Dcls_flow.c370 struct nlattr *tb[TCA_FLOW_MAX + 1]; flow_change() local
383 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy); flow_change()
387 if (tb[TCA_FLOW_BASECLASS]) { flow_change()
388 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]); flow_change()
393 if (tb[TCA_FLOW_KEYS]) { flow_change()
394 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]); flow_change()
409 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); flow_change()
413 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); flow_change()
445 if (tb[TCA_FLOW_MODE]) flow_change()
446 mode = nla_get_u32(tb[TCA_FLOW_MODE]); flow_change()
452 if (tb[TCA_FLOW_PERTURB]) { flow_change()
455 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; flow_change()
461 if (!tb[TCA_FLOW_KEYS]) flow_change()
465 if (tb[TCA_FLOW_MODE]) flow_change()
466 mode = nla_get_u32(tb[TCA_FLOW_MODE]); flow_change()
470 if (tb[TCA_FLOW_PERTURB]) { flow_change()
473 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; flow_change()
496 if (tb[TCA_FLOW_KEYS]) { flow_change()
503 if (tb[TCA_FLOW_MASK]) flow_change()
504 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]); flow_change()
505 if (tb[TCA_FLOW_XOR]) flow_change()
506 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]); flow_change()
507 if (tb[TCA_FLOW_RSHIFT]) flow_change()
508 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); flow_change()
509 if (tb[TCA_FLOW_ADDEND]) flow_change()
510 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); flow_change()
512 if (tb[TCA_FLOW_DIVISOR]) flow_change()
513 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); flow_change()
H A Dact_skbedit.c64 struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; tcf_skbedit_init() local
74 err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy); tcf_skbedit_init()
78 if (tb[TCA_SKBEDIT_PARMS] == NULL) tcf_skbedit_init()
81 if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { tcf_skbedit_init()
83 priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); tcf_skbedit_init()
86 if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { tcf_skbedit_init()
88 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); tcf_skbedit_init()
91 if (tb[TCA_SKBEDIT_MARK] != NULL) { tcf_skbedit_init()
93 mark = nla_data(tb[TCA_SKBEDIT_MARK]); tcf_skbedit_init()
99 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); tcf_skbedit_init()
H A Dact_vlan.c71 struct nlattr *tb[TCA_VLAN_MAX + 1]; tcf_vlan_init() local
83 err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy); tcf_vlan_init()
87 if (!tb[TCA_VLAN_PARMS]) tcf_vlan_init()
89 parm = nla_data(tb[TCA_VLAN_PARMS]); tcf_vlan_init()
94 if (!tb[TCA_VLAN_PUSH_VLAN_ID]) tcf_vlan_init()
96 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); tcf_vlan_init()
100 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { tcf_vlan_init()
101 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); tcf_vlan_init()
H A Dact_police.c120 struct nlattr *tb[TCA_POLICE_MAX + 1]; tcf_act_police_locate() local
130 err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy); tcf_act_police_locate()
134 if (tb[TCA_POLICE_TBF] == NULL) tcf_act_police_locate()
136 size = nla_len(tb[TCA_POLICE_TBF]); tcf_act_police_locate()
139 parm = nla_data(tb[TCA_POLICE_TBF]); tcf_act_police_locate()
167 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]); tcf_act_police_locate()
173 tb[TCA_POLICE_PEAKRATE]); tcf_act_police_locate()
186 } else if (tb[TCA_POLICE_AVRATE] && tcf_act_police_locate()
216 if (tb[TCA_POLICE_RESULT]) tcf_act_police_locate()
217 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); tcf_act_police_locate()
227 if (tb[TCA_POLICE_AVRATE]) tcf_act_police_locate()
228 police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); tcf_act_police_locate()
H A Dact_ipt.c89 struct nlattr *tb[TCA_IPT_MAX + 1]; tcf_ipt_init() local
100 err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy); tcf_ipt_init()
104 if (tb[TCA_IPT_HOOK] == NULL) tcf_ipt_init()
106 if (tb[TCA_IPT_TARG] == NULL) tcf_ipt_init()
109 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); tcf_ipt_init()
110 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) tcf_ipt_init()
113 if (tb[TCA_IPT_INDEX] != NULL) tcf_ipt_init()
114 index = nla_get_u32(tb[TCA_IPT_INDEX]); tcf_ipt_init()
131 hook = nla_get_u32(tb[TCA_IPT_HOOK]); tcf_ipt_init()
137 if (tb[TCA_IPT_TABLE] == NULL || tcf_ipt_init()
138 nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ) tcf_ipt_init()
H A Dact_api.c493 struct nlattr *tb[TCA_ACT_MAX + 1]; tcf_action_init_1() local
498 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); tcf_action_init_1()
502 kind = tb[TCA_ACT_KIND]; tcf_action_init_1()
546 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind); tcf_action_init_1()
573 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; tcf_action_init() local
578 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); tcf_action_init()
582 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { tcf_action_init()
583 act = tcf_action_init_1(net, tb[i], est, name, ovr, bind); tcf_action_init()
709 struct nlattr *tb[TCA_ACT_MAX + 1]; tcf_action_get_1() local
714 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); tcf_action_get_1()
719 if (tb[TCA_ACT_INDEX] == NULL || tcf_action_get_1()
720 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) tcf_action_get_1()
722 index = nla_get_u32(tb[TCA_ACT_INDEX]); tcf_action_get_1()
730 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); tcf_action_get_1()
767 struct nlattr *tb[TCA_ACT_MAX + 1]; tca_action_flush() local
780 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); tca_action_flush()
785 kind = tb[TCA_ACT_KIND]; tca_action_flush()
866 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; tca_action_gd() local
870 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); tca_action_gd()
875 if (tb[1] != NULL) tca_action_gd()
876 return tca_action_flush(net, tb[1], n, portid); tca_action_gd()
881 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { tca_action_gd()
882 act = tcf_action_get_1(tb[i], n, portid); tca_action_gd()
1002 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; find_dump_kind() local
1012 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), find_dump_kind()
1016 if (tb[1] == NULL) find_dump_kind()
1018 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), find_dump_kind()
1019 nla_len(tb[1]), NULL) < 0) find_dump_kind()
H A Dact_gact.c56 struct nlattr *tb[TCA_GACT_MAX + 1]; tcf_gact_init() local
68 err = nla_parse_nested(tb, TCA_GACT_MAX, nla, gact_policy); tcf_gact_init()
72 if (tb[TCA_GACT_PARMS] == NULL) tcf_gact_init()
74 parm = nla_data(tb[TCA_GACT_PARMS]); tcf_gact_init()
77 if (tb[TCA_GACT_PROB] != NULL) tcf_gact_init()
80 if (tb[TCA_GACT_PROB]) { tcf_gact_init()
81 p_parm = nla_data(tb[TCA_GACT_PROB]); tcf_gact_init()
H A Dcls_fw.c191 struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr) fw_change_attrs()
199 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); fw_change_attrs()
203 if (tb[TCA_FW_CLASSID]) { fw_change_attrs()
204 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); fw_change_attrs()
209 if (tb[TCA_FW_INDEV]) { fw_change_attrs()
211 ret = tcf_change_indev(net, tb[TCA_FW_INDEV]); fw_change_attrs()
221 if (tb[TCA_FW_MASK]) { fw_change_attrs()
222 mask = nla_get_u32(tb[TCA_FW_MASK]); fw_change_attrs()
245 struct nlattr *tb[TCA_FW_MAX + 1]; fw_change() local
251 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); fw_change()
275 err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr); fw_change()
301 if (tb[TCA_FW_MASK]) fw_change()
302 mask = nla_get_u32(tb[TCA_FW_MASK]); fw_change()
320 err = fw_change_attrs(net, tp, f, tb, tca, base, ovr); fw_change()
190 fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr) fw_change_attrs() argument
H A Dsch_codel.c117 struct nlattr *tb[TCA_CODEL_MAX + 1]; codel_change() local
124 err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy); codel_change()
130 if (tb[TCA_CODEL_TARGET]) { codel_change()
131 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]); codel_change()
136 if (tb[TCA_CODEL_INTERVAL]) { codel_change()
137 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]); codel_change()
142 if (tb[TCA_CODEL_LIMIT]) codel_change()
143 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]); codel_change()
145 if (tb[TCA_CODEL_ECN]) codel_change()
146 q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]); codel_change()
H A Dsch_dsmark.c117 struct nlattr *tb[TCA_DSMARK_MAX + 1]; dsmark_change() local
132 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy); dsmark_change()
136 if (tb[TCA_DSMARK_MASK]) dsmark_change()
137 mask = nla_get_u8(tb[TCA_DSMARK_MASK]); dsmark_change()
139 if (tb[TCA_DSMARK_VALUE]) dsmark_change()
140 p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); dsmark_change()
142 if (tb[TCA_DSMARK_MASK]) dsmark_change()
345 struct nlattr *tb[TCA_DSMARK_MAX + 1]; dsmark_init() local
356 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy); dsmark_init()
361 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]); dsmark_init()
366 if (tb[TCA_DSMARK_DEFAULT_INDEX]) dsmark_init()
367 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]); dsmark_init()
383 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); dsmark_init()
H A Dcls_route.c383 struct nlattr **tb, struct nlattr *est, int new, route4_set_parms()
394 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); route4_set_parms()
399 if (tb[TCA_ROUTE4_TO]) { route4_set_parms()
402 to = nla_get_u32(tb[TCA_ROUTE4_TO]); route4_set_parms()
408 if (tb[TCA_ROUTE4_FROM]) { route4_set_parms()
409 if (tb[TCA_ROUTE4_IIF]) route4_set_parms()
411 id = nla_get_u32(tb[TCA_ROUTE4_FROM]); route4_set_parms()
415 } else if (tb[TCA_ROUTE4_IIF]) { route4_set_parms()
416 id = nla_get_u32(tb[TCA_ROUTE4_IIF]); route4_set_parms()
449 if (tb[TCA_ROUTE4_TO]) route4_set_parms()
452 if (tb[TCA_ROUTE4_FROM]) route4_set_parms()
454 else if (tb[TCA_ROUTE4_IIF]) route4_set_parms()
461 if (tb[TCA_ROUTE4_CLASSID]) { route4_set_parms()
462 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]); route4_set_parms()
485 struct nlattr *tb[TCA_ROUTE4_MAX + 1]; route4_change() local
493 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy); route4_change()
518 err = route4_set_parms(net, tp, base, f, handle, head, tb, route4_change()
380 route4_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct route4_filter *f, u32 handle, struct route4_head *head, struct nlattr **tb, struct nlattr *est, int new, bool ovr) route4_set_parms() argument
H A Dact_simple.c83 struct nlattr *tb[TCA_DEF_MAX + 1]; tcf_simp_init() local
92 err = nla_parse_nested(tb, TCA_DEF_MAX, nla, simple_policy); tcf_simp_init()
96 if (tb[TCA_DEF_PARMS] == NULL) tcf_simp_init()
99 if (tb[TCA_DEF_DATA] == NULL) tcf_simp_init()
102 parm = nla_data(tb[TCA_DEF_PARMS]); tcf_simp_init()
103 defdata = nla_data(tb[TCA_DEF_DATA]); tcf_simp_init()
H A Dsch_pie.c185 struct nlattr *tb[TCA_PIE_MAX + 1]; pie_change() local
192 err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy); pie_change()
199 if (tb[TCA_PIE_TARGET]) { pie_change()
201 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]); pie_change()
208 if (tb[TCA_PIE_TUPDATE]) pie_change()
209 q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])); pie_change()
211 if (tb[TCA_PIE_LIMIT]) { pie_change()
212 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); pie_change()
218 if (tb[TCA_PIE_ALPHA]) pie_change()
219 q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]); pie_change()
221 if (tb[TCA_PIE_BETA]) pie_change()
222 q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]); pie_change()
224 if (tb[TCA_PIE_ECN]) pie_change()
225 q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]); pie_change()
227 if (tb[TCA_PIE_BYTEMODE]) pie_change()
228 q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]); pie_change()
H A Dsch_tbf.c316 struct nlattr *tb[TCA_TBF_MAX + 1]; tbf_change() local
325 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); tbf_change()
330 if (tb[TCA_TBF_PARMS] == NULL) tbf_change()
333 qopt = nla_data(tb[TCA_TBF_PARMS]); tbf_change()
336 tb[TCA_TBF_RTAB])); tbf_change()
340 tb[TCA_TBF_PTAB])); tbf_change()
345 if (tb[TCA_TBF_RATE64]) tbf_change()
346 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); tbf_change()
349 if (tb[TCA_TBF_BURST]) { tbf_change()
350 max_size = nla_get_u32(tb[TCA_TBF_BURST]); tbf_change()
357 if (tb[TCA_TBF_PRATE64]) tbf_change()
358 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); tbf_change()
367 if (tb[TCA_TBF_PBURST]) { tbf_change()
368 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]); tbf_change()
407 if (tb[TCA_TBF_PBURST]) tbf_change()
412 if (tb[TCA_TBF_BURST]) tbf_change()
H A Dsch_fq.c660 struct nlattr *tb[TCA_FQ_MAX + 1]; fq_change() local
667 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy); fq_change()
675 if (tb[TCA_FQ_BUCKETS_LOG]) { fq_change()
676 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); fq_change()
683 if (tb[TCA_FQ_PLIMIT]) fq_change()
684 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); fq_change()
686 if (tb[TCA_FQ_FLOW_PLIMIT]) fq_change()
687 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); fq_change()
689 if (tb[TCA_FQ_QUANTUM]) { fq_change()
690 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); fq_change()
698 if (tb[TCA_FQ_INITIAL_QUANTUM]) fq_change()
699 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); fq_change()
701 if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) fq_change()
703 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); fq_change()
705 if (tb[TCA_FQ_FLOW_MAX_RATE]) fq_change()
706 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); fq_change()
708 if (tb[TCA_FQ_RATE_ENABLE]) { fq_change()
709 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); fq_change()
717 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { fq_change()
718 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; fq_change()
723 if (tb[TCA_FQ_ORPHAN_MASK]) fq_change()
724 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); fq_change()
H A Dcls_tcindex.c218 struct tcindex_filter_result *r, struct nlattr **tb, tcindex_set_parms()
229 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); tcindex_set_parms()
268 if (tb[TCA_TCINDEX_HASH]) tcindex_set_parms()
269 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); tcindex_set_parms()
271 if (tb[TCA_TCINDEX_MASK]) tcindex_set_parms()
272 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); tcindex_set_parms()
274 if (tb[TCA_TCINDEX_SHIFT]) tcindex_set_parms()
275 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); tcindex_set_parms()
291 if (tb[TCA_TCINDEX_FALL_THROUGH]) tcindex_set_parms()
292 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); tcindex_set_parms()
359 if (tb[TCA_TCINDEX_CLASSID]) { tcindex_set_parms()
360 cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); tcindex_set_parms()
412 struct nlattr *tb[TCA_TCINDEX_MAX + 1]; tcindex_change() local
424 err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy); tcindex_change()
428 return tcindex_set_parms(net, tp, base, handle, p, r, tb, tcindex_change()
216 tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, u32 handle, struct tcindex_data *p, struct tcindex_filter_result *r, struct nlattr **tb, struct nlattr *est, bool ovr) tcindex_set_parms() argument
H A Dsch_gred.c423 struct nlattr *tb[TCA_GRED_MAX + 1]; gred_change() local
432 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy); gred_change()
436 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) gred_change()
439 if (tb[TCA_GRED_PARMS] == NULL || gred_change()
440 tb[TCA_GRED_STAB] == NULL) gred_change()
443 max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; gred_change()
446 ctl = nla_data(tb[TCA_GRED_PARMS]); gred_change()
447 stab = nla_data(tb[TCA_GRED_STAB]); gred_change()
491 struct nlattr *tb[TCA_GRED_MAX + 1]; gred_init() local
497 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy); gred_init()
501 if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) gred_init()
504 return gred_change_table_def(sch, tb[TCA_GRED_DPS]); gred_init()
H A Dcls_basic.c134 struct nlattr **tb, basic_set_parms()
142 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); basic_set_parms()
146 err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &t); basic_set_parms()
150 if (tb[TCA_BASIC_CLASSID]) { basic_set_parms()
151 f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]); basic_set_parms()
171 struct nlattr *tb[TCA_BASIC_MAX + 1]; basic_change() local
178 err = nla_parse_nested(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS], basic_change()
213 err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr); basic_change()
132 basic_set_parms(struct net *net, struct tcf_proto *tp, struct basic_filter *f, unsigned long base, struct nlattr **tb, struct nlattr *est, bool ovr) basic_set_parms() argument
H A Dsch_red.c182 struct nlattr *tb[TCA_RED_MAX + 1]; red_change() local
191 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy); red_change()
195 if (tb[TCA_RED_PARMS] == NULL || red_change()
196 tb[TCA_RED_STAB] == NULL) red_change()
199 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; red_change()
201 ctl = nla_data(tb[TCA_RED_PARMS]); red_change()
221 nla_data(tb[TCA_RED_STAB]), red_change()
H A Dsch_fq_codel.c307 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; fq_codel_change() local
313 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy); fq_codel_change()
316 if (tb[TCA_FQ_CODEL_FLOWS]) { fq_codel_change()
319 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); fq_codel_change()
326 if (tb[TCA_FQ_CODEL_TARGET]) { fq_codel_change()
327 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); fq_codel_change()
332 if (tb[TCA_FQ_CODEL_INTERVAL]) { fq_codel_change()
333 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); fq_codel_change()
338 if (tb[TCA_FQ_CODEL_LIMIT]) fq_codel_change()
339 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); fq_codel_change()
341 if (tb[TCA_FQ_CODEL_ECN]) fq_codel_change()
342 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); fq_codel_change()
344 if (tb[TCA_FQ_CODEL_QUANTUM]) fq_codel_change()
345 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); fq_codel_change()
H A Dact_pedit.c36 struct nlattr *tb[TCA_PEDIT_MAX + 1]; tcf_pedit_init() local
46 err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy); tcf_pedit_init()
50 if (tb[TCA_PEDIT_PARMS] == NULL) tcf_pedit_init()
52 parm = nla_data(tb[TCA_PEDIT_PARMS]); tcf_pedit_init()
54 if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize) tcf_pedit_init()
H A Dsch_cbq.c1346 struct nlattr *tb[TCA_CBQ_MAX + 1]; cbq_init() local
1350 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); cbq_init()
1354 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL) cbq_init()
1357 r = nla_data(tb[TCA_CBQ_RATE]); cbq_init()
1359 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) cbq_init()
1396 if (tb[TCA_CBQ_LSSOPT]) cbq_init()
1397 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); cbq_init()
1732 struct nlattr *tb[TCA_CBQ_MAX + 1]; cbq_change_class() local
1739 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); cbq_change_class()
1753 if (tb[TCA_CBQ_RATE]) { cbq_change_class()
1754 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), cbq_change_class()
1755 tb[TCA_CBQ_RTAB]); cbq_change_class()
1782 if (tb[TCA_CBQ_LSSOPT]) cbq_change_class()
1783 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); cbq_change_class()
1785 if (tb[TCA_CBQ_WRROPT]) { cbq_change_class()
1787 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); cbq_change_class()
1790 if (tb[TCA_CBQ_OVL_STRATEGY]) cbq_change_class()
1791 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); cbq_change_class()
1794 if (tb[TCA_CBQ_POLICE]) cbq_change_class()
1795 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); cbq_change_class()
1798 if (tb[TCA_CBQ_FOPT]) cbq_change_class()
1799 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); cbq_change_class()
1812 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL || cbq_change_class()
1813 tb[TCA_CBQ_LSSOPT] == NULL) cbq_change_class()
1816 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]); cbq_change_class()
1884 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); cbq_change_class()
1885 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); cbq_change_class()
1893 if (tb[TCA_CBQ_OVL_STRATEGY]) cbq_change_class()
1894 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY])); cbq_change_class()
1896 if (tb[TCA_CBQ_POLICE]) cbq_change_class()
1897 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE])); cbq_change_class()
1899 if (tb[TCA_CBQ_FOPT]) cbq_change_class()
1900 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); cbq_change_class()
H A Dact_mirred.c51 struct nlattr *tb[TCA_MIRRED_MAX + 1]; tcf_mirred_init() local
59 ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); tcf_mirred_init()
62 if (tb[TCA_MIRRED_PARMS] == NULL) tcf_mirred_init()
64 parm = nla_data(tb[TCA_MIRRED_PARMS]); tcf_mirred_init()
H A Dcls_cgroup.c95 struct nlattr *tb[TCA_CGROUP_MAX + 1]; cls_cgroup_change() local
118 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], cls_cgroup_change()
124 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); cls_cgroup_change()
128 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); cls_cgroup_change()
H A Dsch_hhf.c537 struct nlattr *tb[TCA_HHF_MAX + 1]; hhf_change() local
547 err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy); hhf_change()
551 if (tb[TCA_HHF_QUANTUM]) hhf_change()
552 new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]); hhf_change()
554 if (tb[TCA_HHF_NON_HH_WEIGHT]) hhf_change()
555 new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]); hhf_change()
563 if (tb[TCA_HHF_BACKLOG_LIMIT]) hhf_change()
564 sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); hhf_change()
569 if (tb[TCA_HHF_HH_FLOWS_LIMIT]) hhf_change()
570 q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]); hhf_change()
572 if (tb[TCA_HHF_RESET_TIMEOUT]) { hhf_change()
573 u32 us = nla_get_u32(tb[TCA_HHF_RESET_TIMEOUT]); hhf_change()
578 if (tb[TCA_HHF_ADMIT_BYTES]) hhf_change()
579 q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]); hhf_change()
581 if (tb[TCA_HHF_EVICT_TIMEOUT]) { hhf_change()
582 u32 us = nla_get_u32(tb[TCA_HHF_EVICT_TIMEOUT]); hhf_change()
H A Dsch_netem.c782 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, parse_attr() argument
793 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), parse_attr()
796 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); parse_attr()
804 struct nlattr *tb[TCA_NETEM_MAX + 1]; netem_change() local
814 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); netem_change()
822 if (tb[TCA_NETEM_LOSS]) { netem_change()
823 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); netem_change()
832 if (tb[TCA_NETEM_DELAY_DIST]) { netem_change()
833 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); netem_change()
861 if (tb[TCA_NETEM_CORR]) netem_change()
862 get_correlation(q, tb[TCA_NETEM_CORR]); netem_change()
864 if (tb[TCA_NETEM_REORDER]) netem_change()
865 get_reorder(q, tb[TCA_NETEM_REORDER]); netem_change()
867 if (tb[TCA_NETEM_CORRUPT]) netem_change()
868 get_corrupt(q, tb[TCA_NETEM_CORRUPT]); netem_change()
870 if (tb[TCA_NETEM_RATE]) netem_change()
871 get_rate(q, tb[TCA_NETEM_RATE]); netem_change()
873 if (tb[TCA_NETEM_RATE64]) netem_change()
875 nla_get_u64(tb[TCA_NETEM_RATE64])); netem_change()
877 if (tb[TCA_NETEM_ECN]) netem_change()
878 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); netem_change()
H A Dcls_rsvp.h467 struct nlattr *tb[TCA_RSVP_MAX + 1]; rsvp_change() local
476 err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy); rsvp_change()
481 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); rsvp_change()
501 if (tb[TCA_RSVP_CLASSID]) { rsvp_change()
502 n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); rsvp_change()
515 if (tb[TCA_RSVP_DST] == NULL) rsvp_change()
525 if (tb[TCA_RSVP_SRC]) { rsvp_change()
526 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); rsvp_change()
529 if (tb[TCA_RSVP_PINFO]) { rsvp_change()
530 pinfo = nla_data(tb[TCA_RSVP_PINFO]); rsvp_change()
534 if (tb[TCA_RSVP_CLASSID]) rsvp_change()
535 f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); rsvp_change()
537 dst = nla_data(tb[TCA_RSVP_DST]); rsvp_change()
H A Dcls_u32.c594 struct tc_u_knode *n, struct nlattr **tb, u32_set_parms()
601 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); u32_set_parms()
606 if (tb[TCA_U32_LINK]) { u32_set_parms()
607 u32 handle = nla_get_u32(tb[TCA_U32_LINK]); u32_set_parms()
627 if (tb[TCA_U32_CLASSID]) { u32_set_parms()
628 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]); u32_set_parms()
633 if (tb[TCA_U32_INDEV]) { u32_set_parms()
635 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]); u32_set_parms()
735 struct nlattr *tb[TCA_U32_MAX + 1]; u32_change() local
745 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy); u32_change()
761 rtnl_dereference(n->ht_up), new, tb, u32_change()
775 if (tb[TCA_U32_DIVISOR]) { u32_change()
776 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); u32_change()
801 if (tb[TCA_U32_HASH]) { u32_change()
802 htid = nla_get_u32(tb[TCA_U32_HASH]); u32_change()
826 if (tb[TCA_U32_SEL] == NULL) u32_change()
829 s = nla_data(tb[TCA_U32_SEL]); u32_change()
858 if (tb[TCA_U32_MARK]) { u32_change()
861 mark = nla_data(tb[TCA_U32_MARK]); u32_change()
867 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr); u32_change()
592 u32_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct tc_u_hnode *ht, struct tc_u_knode *n, struct nlattr **tb, struct nlattr *est, bool ovr) u32_set_parms() argument
H A Dact_connmark.c96 struct nlattr *tb[TCA_CONNMARK_MAX + 1]; tcf_connmark_init() local
104 ret = nla_parse_nested(tb, TCA_CONNMARK_MAX, nla, connmark_policy); tcf_connmark_init()
108 parm = nla_data(tb[TCA_CONNMARK_PARMS]); tcf_connmark_init()
H A Dsch_choke.c412 struct nlattr *tb[TCA_CHOKE_MAX + 1]; choke_change() local
422 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy); choke_change()
426 if (tb[TCA_CHOKE_PARMS] == NULL || choke_change()
427 tb[TCA_CHOKE_STAB] == NULL) choke_change()
430 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; choke_change()
432 ctl = nla_data(tb[TCA_CHOKE_PARMS]); choke_change()
482 nla_data(tb[TCA_CHOKE_STAB]), choke_change()
H A Dact_nat.c41 struct nlattr *tb[TCA_NAT_MAX + 1]; tcf_nat_init() local
49 err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy); tcf_nat_init()
53 if (tb[TCA_NAT_PARMS] == NULL) tcf_nat_init()
55 parm = nla_data(tb[TCA_NAT_PARMS]); tcf_nat_init()
H A Dcls_api.c518 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, tcf_exts_validate() argument
526 if (exts->police && tb[exts->police]) { tcf_exts_validate()
527 act = tcf_action_init_1(net, tb[exts->police], rate_tlv, tcf_exts_validate()
535 } else if (exts->action && tb[exts->action]) { tcf_exts_validate()
537 err = tcf_action_init(net, tb[exts->action], rate_tlv, tcf_exts_validate()
545 if ((exts->action && tb[exts->action]) || tcf_exts_validate()
546 (exts->police && tb[exts->police])) tcf_exts_validate()
H A Dsch_drr.c71 struct nlattr *tb[TCA_DRR_MAX + 1]; drr_change_class() local
78 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy); drr_change_class()
82 if (tb[TCA_DRR_QUANTUM]) { drr_change_class()
83 quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]); drr_change_class()
100 if (tb[TCA_DRR_QUANTUM]) drr_change_class()
H A Dsch_atm.c192 struct nlattr *tb[TCA_ATM_MAX + 1]; atm_tc_change() local
216 error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy); atm_tc_change()
220 if (!tb[TCA_ATM_FD]) atm_tc_change()
222 fd = nla_get_u32(tb[TCA_ATM_FD]); atm_tc_change()
224 if (tb[TCA_ATM_HDR]) { atm_tc_change()
225 hdr_len = nla_len(tb[TCA_ATM_HDR]); atm_tc_change()
226 hdr = nla_data(tb[TCA_ATM_HDR]); atm_tc_change()
231 if (!tb[TCA_ATM_EXCESS]) atm_tc_change()
235 atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS])); atm_tc_change()
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
H A Dtestmode.c110 static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) ath10k_tm_cmd_get_version() argument
142 static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) ath10k_tm_cmd_utf_start() argument
263 static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[]) ath10k_tm_cmd_utf_stop() argument
287 static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[]) ath10k_tm_cmd_wmi() argument
301 if (!tb[ATH10K_TM_ATTR_DATA]) { ath10k_tm_cmd_wmi()
306 if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) { ath10k_tm_cmd_wmi()
311 buf = nla_data(tb[ATH10K_TM_ATTR_DATA]); ath10k_tm_cmd_wmi()
312 buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]); ath10k_tm_cmd_wmi()
313 cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]); ath10k_tm_cmd_wmi()
347 struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1]; ath10k_tm_cmd() local
350 ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, ath10k_tm_cmd()
355 if (!tb[ATH10K_TM_ATTR_CMD]) ath10k_tm_cmd()
358 switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) { ath10k_tm_cmd()
360 return ath10k_tm_cmd_get_version(ar, tb); ath10k_tm_cmd()
362 return ath10k_tm_cmd_utf_start(ar, tb); ath10k_tm_cmd()
364 return ath10k_tm_cmd_utf_stop(ar, tb); ath10k_tm_cmd()
366 return ath10k_tm_cmd_wmi(ar, tb); ath10k_tm_cmd()
H A Dwmi-tlv.c123 const void **tb = data; ath10k_wmi_tlv_iter_parse() local
126 tb[tag] = ptr; ath10k_wmi_tlv_iter_parse()
131 static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb, ath10k_wmi_tlv_parse() argument
135 (void *)tb); ath10k_wmi_tlv_parse()
142 const void **tb; ath10k_wmi_tlv_parse_alloc() local
145 tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp); ath10k_wmi_tlv_parse_alloc()
146 if (!tb) ath10k_wmi_tlv_parse_alloc()
149 ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len); ath10k_wmi_tlv_parse_alloc()
151 kfree(tb); ath10k_wmi_tlv_parse_alloc()
155 return tb; ath10k_wmi_tlv_parse_alloc()
169 const void **tb; ath10k_wmi_tlv_event_bcn_tx_status() local
174 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_event_bcn_tx_status()
175 if (IS_ERR(tb)) { ath10k_wmi_tlv_event_bcn_tx_status()
176 ret = PTR_ERR(tb); ath10k_wmi_tlv_event_bcn_tx_status()
181 ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]; ath10k_wmi_tlv_event_bcn_tx_status()
183 kfree(tb); ath10k_wmi_tlv_event_bcn_tx_status()
204 kfree(tb); ath10k_wmi_tlv_event_bcn_tx_status()
211 const void **tb; ath10k_wmi_tlv_event_diag_data() local
217 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_event_diag_data()
218 if (IS_ERR(tb)) { ath10k_wmi_tlv_event_diag_data()
219 ret = PTR_ERR(tb); ath10k_wmi_tlv_event_diag_data()
224 ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]; ath10k_wmi_tlv_event_diag_data()
225 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; ath10k_wmi_tlv_event_diag_data()
227 kfree(tb); ath10k_wmi_tlv_event_diag_data()
267 kfree(tb); ath10k_wmi_tlv_event_diag_data()
274 const void **tb; ath10k_wmi_tlv_event_diag() local
278 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_event_diag()
279 if (IS_ERR(tb)) { ath10k_wmi_tlv_event_diag()
280 ret = PTR_ERR(tb); ath10k_wmi_tlv_event_diag()
285 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; ath10k_wmi_tlv_event_diag()
287 kfree(tb); ath10k_wmi_tlv_event_diag()
295 kfree(tb); ath10k_wmi_tlv_event_diag()
432 const void **tb; ath10k_wmi_tlv_op_pull_scan_ev() local
436 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_scan_ev()
437 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_scan_ev()
438 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_scan_ev()
443 ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT]; ath10k_wmi_tlv_op_pull_scan_ev()
445 kfree(tb); ath10k_wmi_tlv_op_pull_scan_ev()
456 kfree(tb); ath10k_wmi_tlv_op_pull_scan_ev()
464 const void **tb; ath10k_wmi_tlv_op_pull_mgmt_rx_ev() local
470 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
471 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
472 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
477 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]; ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
478 frame = tb[WMI_TLV_TAG_ARRAY_BYTE]; ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
481 kfree(tb); ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
495 kfree(tb); ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
505 kfree(tb); ath10k_wmi_tlv_op_pull_mgmt_rx_ev()
513 const void **tb; ath10k_wmi_tlv_op_pull_ch_info_ev() local
517 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_ch_info_ev()
518 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_ch_info_ev()
519 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_ch_info_ev()
524 ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]; ath10k_wmi_tlv_op_pull_ch_info_ev()
526 kfree(tb); ath10k_wmi_tlv_op_pull_ch_info_ev()
537 kfree(tb); ath10k_wmi_tlv_op_pull_ch_info_ev()
545 const void **tb; ath10k_wmi_tlv_op_pull_vdev_start_ev() local
549 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_vdev_start_ev()
550 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_vdev_start_ev()
551 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_vdev_start_ev()
556 ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]; ath10k_wmi_tlv_op_pull_vdev_start_ev()
558 kfree(tb); ath10k_wmi_tlv_op_pull_vdev_start_ev()
568 kfree(tb); ath10k_wmi_tlv_op_pull_vdev_start_ev()
576 const void **tb; ath10k_wmi_tlv_op_pull_peer_kick_ev() local
580 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_peer_kick_ev()
581 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_peer_kick_ev()
582 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_peer_kick_ev()
587 ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]; ath10k_wmi_tlv_op_pull_peer_kick_ev()
589 kfree(tb); ath10k_wmi_tlv_op_pull_peer_kick_ev()
595 kfree(tb); ath10k_wmi_tlv_op_pull_peer_kick_ev()
707 const void **tb; ath10k_wmi_tlv_op_pull_phyerr_ev() local
712 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_phyerr_ev()
713 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_phyerr_ev()
714 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_phyerr_ev()
719 ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR]; ath10k_wmi_tlv_op_pull_phyerr_ev()
720 phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE]; ath10k_wmi_tlv_op_pull_phyerr_ev()
723 kfree(tb); ath10k_wmi_tlv_op_pull_phyerr_ev()
733 kfree(tb); ath10k_wmi_tlv_op_pull_phyerr_ev()
772 const void **tb; ath10k_wmi_tlv_op_pull_svc_rdy_ev() local
779 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
780 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_svc_rdy_ev()
781 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
786 ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]; ath10k_wmi_tlv_op_pull_svc_rdy_ev()
787 reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]; ath10k_wmi_tlv_op_pull_svc_rdy_ev()
788 svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32]; ath10k_wmi_tlv_op_pull_svc_rdy_ev()
789 mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT]; ath10k_wmi_tlv_op_pull_svc_rdy_ev()
792 kfree(tb); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
812 kfree(tb); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
833 kfree(tb); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
838 kfree(tb); ath10k_wmi_tlv_op_pull_svc_rdy_ev()
846 const void **tb; ath10k_wmi_tlv_op_pull_rdy_ev() local
850 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_rdy_ev()
851 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_rdy_ev()
852 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_rdy_ev()
857 ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT]; ath10k_wmi_tlv_op_pull_rdy_ev()
859 kfree(tb); ath10k_wmi_tlv_op_pull_rdy_ev()
868 kfree(tb); ath10k_wmi_tlv_op_pull_rdy_ev()
912 const void **tb; ath10k_wmi_tlv_op_pull_fw_stats() local
924 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); ath10k_wmi_tlv_op_pull_fw_stats()
925 if (IS_ERR(tb)) { ath10k_wmi_tlv_op_pull_fw_stats()
926 ret = PTR_ERR(tb); ath10k_wmi_tlv_op_pull_fw_stats()
931 ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT]; ath10k_wmi_tlv_op_pull_fw_stats()
932 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; ath10k_wmi_tlv_op_pull_fw_stats()
935 kfree(tb); ath10k_wmi_tlv_op_pull_fw_stats()
1011 kfree(tb); ath10k_wmi_tlv_op_pull_fw_stats()
/linux-4.1.27/net/core/
H A Dfib_rules.c243 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, validate_rulemsg() argument
249 if (tb[FRA_SRC] == NULL || validate_rulemsg()
251 nla_len(tb[FRA_SRC]) != ops->addr_size) validate_rulemsg()
255 if (tb[FRA_DST] == NULL || validate_rulemsg()
257 nla_len(tb[FRA_DST]) != ops->addr_size) validate_rulemsg()
271 struct nlattr *tb[FRA_MAX+1]; fib_nl_newrule() local
283 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); fib_nl_newrule()
287 err = validate_rulemsg(frh, tb, ops); fib_nl_newrule()
298 if (tb[FRA_PRIORITY]) fib_nl_newrule()
299 rule->pref = nla_get_u32(tb[FRA_PRIORITY]); fib_nl_newrule()
301 if (tb[FRA_IIFNAME]) { fib_nl_newrule()
305 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); fib_nl_newrule()
311 if (tb[FRA_OIFNAME]) { fib_nl_newrule()
315 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); fib_nl_newrule()
321 if (tb[FRA_FWMARK]) { fib_nl_newrule()
322 rule->mark = nla_get_u32(tb[FRA_FWMARK]); fib_nl_newrule()
330 if (tb[FRA_FWMASK]) fib_nl_newrule()
331 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); fib_nl_newrule()
335 rule->table = frh_get_table(frh, tb); fib_nl_newrule()
336 if (tb[FRA_SUPPRESS_PREFIXLEN]) fib_nl_newrule()
337 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); fib_nl_newrule()
341 if (tb[FRA_SUPPRESS_IFGROUP]) fib_nl_newrule()
342 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); fib_nl_newrule()
346 if (!tb[FRA_PRIORITY] && ops->default_pref) fib_nl_newrule()
350 if (tb[FRA_GOTO]) { fib_nl_newrule()
354 rule->target = nla_get_u32(tb[FRA_GOTO]); fib_nl_newrule()
371 err = ops->configure(rule, skb, frh, tb); fib_nl_newrule()
428 struct nlattr *tb[FRA_MAX+1]; fib_nl_delrule() local
440 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); fib_nl_delrule()
444 err = validate_rulemsg(frh, tb, ops); fib_nl_delrule()
452 if (frh_get_table(frh, tb) && fib_nl_delrule()
453 (frh_get_table(frh, tb) != rule->table)) fib_nl_delrule()
456 if (tb[FRA_PRIORITY] && fib_nl_delrule()
457 (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) fib_nl_delrule()
460 if (tb[FRA_IIFNAME] && fib_nl_delrule()
461 nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) fib_nl_delrule()
464 if (tb[FRA_OIFNAME] && fib_nl_delrule()
465 nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) fib_nl_delrule()
468 if (tb[FRA_FWMARK] && fib_nl_delrule()
469 (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) fib_nl_delrule()
472 if (tb[FRA_FWMASK] && fib_nl_delrule()
473 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) fib_nl_delrule()
476 if (!ops->compare(rule, frh, tb)) fib_nl_delrule()
H A Drtnetlink.c1321 struct nlattr *tb[IFLA_MAX+1]; rtnl_dump_ifinfo() local
1341 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { rtnl_dump_ifinfo()
1343 if (tb[IFLA_EXT_MASK]) rtnl_dump_ifinfo()
1344 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); rtnl_dump_ifinfo()
1378 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len) rtnl_nla_parse_ifla() argument
1380 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy); rtnl_nla_parse_ifla()
1384 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) rtnl_link_get_net() argument
1390 if (tb[IFLA_NET_NS_PID]) rtnl_link_get_net()
1391 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); rtnl_link_get_net()
1392 else if (tb[IFLA_NET_NS_FD]) rtnl_link_get_net()
1393 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD])); rtnl_link_get_net()
1400 static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) validate_linkmsg() argument
1403 if (tb[IFLA_ADDRESS] && validate_linkmsg()
1404 nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) validate_linkmsg()
1407 if (tb[IFLA_BROADCAST] && validate_linkmsg()
1408 nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) validate_linkmsg()
1412 if (tb[IFLA_AF_SPEC]) { validate_linkmsg()
1416 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { nla_for_each_nested()
1436 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) do_setvfinfo() argument
1441 if (tb[IFLA_VF_MAC]) { do_setvfinfo()
1442 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); do_setvfinfo()
1452 if (tb[IFLA_VF_VLAN]) { do_setvfinfo()
1453 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); do_setvfinfo()
1463 if (tb[IFLA_VF_TX_RATE]) { do_setvfinfo()
1464 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); do_setvfinfo()
1482 if (tb[IFLA_VF_RATE]) { do_setvfinfo()
1483 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); do_setvfinfo()
1494 if (tb[IFLA_VF_SPOOFCHK]) { do_setvfinfo()
1495 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); do_setvfinfo()
1505 if (tb[IFLA_VF_LINK_STATE]) { do_setvfinfo()
1506 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); do_setvfinfo()
1516 if (tb[IFLA_VF_RSS_QUERY_EN]) { do_setvfinfo()
1520 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); do_setvfinfo()
1571 struct nlattr **tb, char *ifname, int status) do_setlink()
1576 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) { do_setlink()
1577 struct net *net = rtnl_link_get_net(dev_net(dev), tb); do_setlink()
1594 if (tb[IFLA_MAP]) { do_setlink()
1608 u_map = nla_data(tb[IFLA_MAP]); do_setlink()
1623 if (tb[IFLA_ADDRESS]) { do_setlink()
1634 memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), do_setlink()
1643 if (tb[IFLA_MTU]) { do_setlink()
1644 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); do_setlink()
1650 if (tb[IFLA_GROUP]) { do_setlink()
1651 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); do_setlink()
1667 if (tb[IFLA_IFALIAS]) { do_setlink()
1668 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), do_setlink()
1669 nla_len(tb[IFLA_IFALIAS])); do_setlink()
1675 if (tb[IFLA_BROADCAST]) { do_setlink()
1676 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); do_setlink()
1686 if (tb[IFLA_MASTER]) { do_setlink()
1687 err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER])); do_setlink()
1693 if (tb[IFLA_CARRIER]) { do_setlink()
1694 err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); do_setlink()
1700 if (tb[IFLA_TXQLEN]) { do_setlink()
1701 unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]); do_setlink()
1709 if (tb[IFLA_OPERSTATE]) do_setlink()
1710 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); do_setlink()
1712 if (tb[IFLA_LINKMODE]) { do_setlink()
1713 unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); do_setlink()
1722 if (tb[IFLA_VFINFO_LIST]) { do_setlink()
1727 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { nla_for_each_nested()
1745 if (tb[IFLA_VF_PORTS]) {
1755 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { nla_for_each_nested()
1775 if (tb[IFLA_PORT_SELF]) {
1779 tb[IFLA_PORT_SELF], ifla_port_policy);
1791 if (tb[IFLA_AF_SPEC]) {
1795 nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { nla_for_each_nested()
1829 struct nlattr *tb[IFLA_MAX+1]; rtnl_setlink() local
1832 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); rtnl_setlink()
1836 if (tb[IFLA_IFNAME]) rtnl_setlink()
1837 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); rtnl_setlink()
1845 else if (tb[IFLA_IFNAME]) rtnl_setlink()
1855 err = validate_linkmsg(dev, tb); rtnl_setlink()
1859 err = do_setlink(skb, dev, ifm, tb, ifname, 0); rtnl_setlink()
1907 struct nlattr *tb[IFLA_MAX+1]; rtnl_dellink() local
1911 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); rtnl_dellink()
1915 if (tb[IFLA_IFNAME]) rtnl_dellink()
1916 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); rtnl_dellink()
1921 else if (tb[IFLA_IFNAME]) rtnl_dellink()
1923 else if (tb[IFLA_GROUP]) rtnl_dellink()
1924 return rtnl_group_dellink(net, nla_get_u32(tb[IFLA_GROUP])); rtnl_dellink()
1961 const struct rtnl_link_ops *ops, struct nlattr *tb[]) rtnl_create_link()
1968 if (tb[IFLA_NUM_TX_QUEUES]) rtnl_create_link()
1969 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); rtnl_create_link()
1973 if (tb[IFLA_NUM_RX_QUEUES]) rtnl_create_link()
1974 num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); rtnl_create_link()
1988 if (tb[IFLA_MTU]) rtnl_create_link()
1989 dev->mtu = nla_get_u32(tb[IFLA_MTU]); rtnl_create_link()
1990 if (tb[IFLA_ADDRESS]) { rtnl_create_link()
1991 memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), rtnl_create_link()
1992 nla_len(tb[IFLA_ADDRESS])); rtnl_create_link()
1995 if (tb[IFLA_BROADCAST]) rtnl_create_link()
1996 memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), rtnl_create_link()
1997 nla_len(tb[IFLA_BROADCAST])); rtnl_create_link()
1998 if (tb[IFLA_TXQLEN]) rtnl_create_link()
1999 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); rtnl_create_link()
2000 if (tb[IFLA_OPERSTATE]) rtnl_create_link()
2001 set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); rtnl_create_link()
2002 if (tb[IFLA_LINKMODE]) rtnl_create_link()
2003 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); rtnl_create_link()
2004 if (tb[IFLA_GROUP]) rtnl_create_link()
2005 dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); rtnl_create_link()
2017 struct nlattr **tb) rtnl_group_changelink()
2024 err = do_setlink(skb, dev, ifm, tb, NULL, 0); for_each_netdev_safe()
2043 struct nlattr *tb[IFLA_MAX+1]; rtnl_newlink() local
2051 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); rtnl_newlink()
2055 if (tb[IFLA_IFNAME]) rtnl_newlink()
2056 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); rtnl_newlink()
2076 err = validate_linkmsg(dev, tb); rtnl_newlink()
2080 if (tb[IFLA_LINKINFO]) { rtnl_newlink()
2082 tb[IFLA_LINKINFO], ifla_info_policy); rtnl_newlink()
2113 err = ops->validate(tb, data); rtnl_newlink()
2131 err = m_ops->slave_validate(tb, slave_data); rtnl_newlink()
2150 err = ops->changelink(dev, tb, data); rtnl_newlink()
2161 tb, slave_data); rtnl_newlink()
2167 return do_setlink(skb, dev, ifm, tb, ifname, status); rtnl_newlink()
2171 if (ifm->ifi_index == 0 && tb[IFLA_GROUP]) rtnl_newlink()
2173 nla_get_u32(tb[IFLA_GROUP]), rtnl_newlink()
2174 ifm, tb); rtnl_newlink()
2178 if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO]) rtnl_newlink()
2203 dest_net = rtnl_link_get_net(net, tb); rtnl_newlink()
2211 if (tb[IFLA_LINK_NETNSID]) { rtnl_newlink()
2212 int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); rtnl_newlink()
2225 name_assign_type, ops, tb); rtnl_newlink()
2234 err = ops->newlink(link_net ? : net, dev, tb, data); rtnl_newlink()
2283 struct nlattr *tb[IFLA_MAX+1]; rtnl_getlink() local
2289 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); rtnl_getlink()
2293 if (tb[IFLA_IFNAME]) rtnl_getlink()
2294 nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); rtnl_getlink()
2296 if (tb[IFLA_EXT_MASK]) rtnl_getlink()
2297 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); rtnl_getlink()
2302 else if (tb[IFLA_IFNAME]) rtnl_getlink()
2330 struct nlattr *tb[IFLA_MAX+1]; rtnl_calcit() local
2339 if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { rtnl_calcit()
2340 if (tb[IFLA_EXT_MASK]) rtnl_calcit()
2341 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); rtnl_calcit()
2501 struct nlattr *tb[], ndo_dflt_fdb_add()
2560 struct nlattr *tb[NDA_MAX+1]; rtnl_fdb_add() local
2566 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); rtnl_fdb_add()
2582 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { rtnl_fdb_add()
2587 addr = nla_data(tb[NDA_LLADDR]); rtnl_fdb_add()
2589 err = fdb_vid_parse(tb[NDA_VLAN], &vid); rtnl_fdb_add()
2601 err = ops->ndo_fdb_add(ndm, tb, dev, addr, vid, rtnl_fdb_add()
2612 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr, rtnl_fdb_add()
2616 err = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, rtnl_fdb_add()
2632 struct nlattr *tb[], ndo_dflt_fdb_del()
2659 struct nlattr *tb[NDA_MAX+1]; rtnl_fdb_del() local
2668 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); rtnl_fdb_del()
2684 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) { rtnl_fdb_del()
2689 addr = nla_data(tb[NDA_LLADDR]); rtnl_fdb_del()
2691 err = fdb_vid_parse(tb[NDA_VLAN], &vid); rtnl_fdb_del()
2704 err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid); rtnl_fdb_del()
2715 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr, rtnl_fdb_del()
2718 err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid); rtnl_fdb_del()
2788 struct nlattr *tb[IFLA_MAX+1]; rtnl_fdb_dump() local
2798 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, rtnl_fdb_dump()
2800 if (tb[IFLA_MASTER]) rtnl_fdb_dump()
2801 br_idx = nla_get_u32(tb[IFLA_MASTER]); rtnl_fdb_dump()
1569 do_setlink(const struct sk_buff *skb, struct net_device *dev, struct ifinfomsg *ifm, struct nlattr **tb, char *ifname, int status) do_setlink() argument
1959 rtnl_create_link(struct net *net, const char *ifname, unsigned char name_assign_type, const struct rtnl_link_ops *ops, struct nlattr *tb[]) rtnl_create_link() argument
2014 rtnl_group_changelink(const struct sk_buff *skb, struct net *net, int group, struct ifinfomsg *ifm, struct nlattr **tb) rtnl_group_changelink() argument
2500 ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 flags) ndo_dflt_fdb_add() argument
2631 ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid) ndo_dflt_fdb_del() argument
H A Dnet_namespace.c499 struct nlattr *tb[NETNSA_MAX + 1]; rtnl_net_newid() local
503 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_newid()
507 if (!tb[NETNSA_NSID]) rtnl_net_newid()
509 nsid = nla_get_s32(tb[NETNSA_NSID]); rtnl_net_newid()
511 if (tb[NETNSA_PID]) rtnl_net_newid()
512 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); rtnl_net_newid()
513 else if (tb[NETNSA_FD]) rtnl_net_newid()
514 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); rtnl_net_newid()
578 struct nlattr *tb[NETNSA_MAX + 1]; rtnl_net_getid() local
583 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_getid()
587 if (tb[NETNSA_PID]) rtnl_net_getid()
588 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); rtnl_net_getid()
589 else if (tb[NETNSA_FD]) rtnl_net_getid()
590 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); rtnl_net_getid()
/linux-4.1.27/crypto/
H A Dzlib.c86 struct nlattr *tb[ZLIB_COMP_MAX + 1]; zlib_compress_setup() local
91 ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL); zlib_compress_setup()
97 window_bits = tb[ZLIB_COMP_WINDOWBITS] zlib_compress_setup()
98 ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS]) zlib_compress_setup()
100 mem_level = tb[ZLIB_COMP_MEMLEVEL] zlib_compress_setup()
101 ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL]) zlib_compress_setup()
110 tb[ZLIB_COMP_LEVEL] zlib_compress_setup()
111 ? nla_get_u32(tb[ZLIB_COMP_LEVEL]) zlib_compress_setup()
113 tb[ZLIB_COMP_METHOD] zlib_compress_setup()
114 ? nla_get_u32(tb[ZLIB_COMP_METHOD]) zlib_compress_setup()
118 tb[ZLIB_COMP_STRATEGY] zlib_compress_setup()
119 ? nla_get_u32(tb[ZLIB_COMP_STRATEGY]) zlib_compress_setup()
217 struct nlattr *tb[ZLIB_DECOMP_MAX + 1]; zlib_decompress_setup() local
220 ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL); zlib_decompress_setup()
226 ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS] zlib_decompress_setup()
227 ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS]) zlib_decompress_setup()
H A Darc4.c58 u32 ty, ta, tb; arc4_crypt() local
77 tb = S[ty]; arc4_crypt()
83 b = tb; arc4_crypt()
H A Dseqiv.c261 static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb) seqiv_ablkcipher_alloc() argument
265 inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0); seqiv_ablkcipher_alloc()
287 static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb) seqiv_aead_alloc() argument
291 inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0); seqiv_aead_alloc()
313 static struct crypto_instance *seqiv_alloc(struct rtattr **tb) seqiv_alloc() argument
319 algt = crypto_get_attr_type(tb); seqiv_alloc()
328 inst = seqiv_ablkcipher_alloc(tb); seqiv_alloc()
330 inst = seqiv_aead_alloc(tb); seqiv_alloc()
H A Dcryptd.c171 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, cryptd_check_internal() argument
176 algt = crypto_get_attr_type(tb); cryptd_check_internal()
332 struct rtattr **tb, cryptd_create_blkcipher()
342 cryptd_check_internal(tb, &type, &mask); cryptd_create_blkcipher()
344 alg = crypto_get_attr_alg(tb, type, mask); cryptd_create_blkcipher()
593 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, cryptd_create_hash() argument
604 cryptd_check_internal(tb, &type, &mask); cryptd_create_hash()
606 salg = shash_attr_alg(tb[1], type, mask); cryptd_create_hash()
743 struct rtattr **tb, cryptd_create_aead()
753 cryptd_check_internal(tb, &type, &mask); cryptd_create_aead()
755 alg = crypto_get_attr_alg(tb, type, mask); cryptd_create_aead()
803 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) cryptd_create() argument
807 algt = crypto_get_attr_type(tb); cryptd_create()
813 return cryptd_create_blkcipher(tmpl, tb, &queue); cryptd_create()
815 return cryptd_create_hash(tmpl, tb, &queue); cryptd_create()
817 return cryptd_create_aead(tmpl, tb, &queue); cryptd_create()
331 cryptd_create_blkcipher(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) cryptd_create_blkcipher() argument
742 cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) cryptd_create_aead() argument
H A Dalgboss.c29 struct rtattr *tb[CRYPTO_MAX_ATTRS + 2]; member in struct:cryptomgr_param
75 err = tmpl->create(tmpl, param->tb); cryptomgr_probe()
79 inst = tmpl->alloc(param->tb); cryptomgr_probe()
163 param->tb[i + 1] = &param->attrs[i].attr; cryptomgr_schedule_probe()
179 param->tb[i + 1] = NULL; cryptomgr_schedule_probe()
185 param->tb[0] = &param->type.attr; cryptomgr_schedule_probe()
H A Dccm.c475 static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, crypto_ccm_alloc_common() argument
487 algt = crypto_get_attr_type(tb); crypto_ccm_alloc_common()
573 static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb) crypto_ccm_alloc() argument
579 cipher_name = crypto_attr_alg_name(tb[1]); crypto_ccm_alloc()
591 return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); crypto_ccm_alloc()
610 static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb) crypto_ccm_base_alloc() argument
616 ctr_name = crypto_attr_alg_name(tb[1]); crypto_ccm_base_alloc()
620 cipher_name = crypto_attr_alg_name(tb[2]); crypto_ccm_base_alloc()
628 return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name); crypto_ccm_base_alloc()
747 static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb) crypto_rfc4309_alloc() argument
756 algt = crypto_get_attr_type(tb); crypto_rfc4309_alloc()
763 ccm_name = crypto_attr_alg_name(tb[1]); crypto_rfc4309_alloc()
H A Decb.c118 static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) crypto_ecb_alloc() argument
124 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); crypto_ecb_alloc()
128 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, crypto_ecb_alloc()
H A Dctr.c181 static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) crypto_ctr_alloc() argument
187 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); crypto_ctr_alloc()
191 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, crypto_ctr_alloc()
336 static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) crypto_rfc3686_alloc() argument
345 algt = crypto_get_attr_type(tb); crypto_rfc3686_alloc()
352 cipher_name = crypto_attr_alg_name(tb[1]); crypto_rfc3686_alloc()
H A Daead.c273 struct rtattr **tb, u32 type, aead_geniv_alloc()
283 algt = crypto_get_attr_type(tb); aead_geniv_alloc()
291 name = crypto_attr_alg_name(tb[1]); aead_geniv_alloc()
398 struct rtattr *tb[3]; crypto_nivaead_default() local
429 tb[0] = &ptype.attr; crypto_nivaead_default()
435 tb[1] = &palg.attr; crypto_nivaead_default()
437 tb[2] = NULL; crypto_nivaead_default()
446 inst = tmpl->alloc(tb); crypto_nivaead_default()
272 aead_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb, u32 type, u32 mask) aead_geniv_alloc() argument
H A Dmcryptd.c261 static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type, mcryptd_check_internal() argument
266 algt = crypto_get_attr_type(tb); mcryptd_check_internal()
490 static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, mcryptd_create_hash() argument
501 mcryptd_check_internal(tb, &type, &mask); mcryptd_create_hash()
503 salg = shash_attr_alg(tb[1], type, mask); mcryptd_create_hash()
557 static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) mcryptd_create() argument
561 algt = crypto_get_attr_type(tb); mcryptd_create()
567 return mcryptd_create_hash(tmpl, tb, &mqueue); mcryptd_create()
H A Dgcm.c697 static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, crypto_gcm_alloc_common() argument
710 algt = crypto_get_attr_type(tb); crypto_gcm_alloc_common()
793 static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb) crypto_gcm_alloc() argument
799 cipher_name = crypto_attr_alg_name(tb[1]); crypto_gcm_alloc()
811 return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash"); crypto_gcm_alloc()
830 static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb) crypto_gcm_base_alloc() argument
836 ctr_name = crypto_attr_alg_name(tb[1]); crypto_gcm_base_alloc()
840 ghash_name = crypto_attr_alg_name(tb[2]); crypto_gcm_base_alloc()
848 return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name); crypto_gcm_base_alloc()
964 static struct crypto_instance *crypto_rfc4106_alloc(struct rtattr **tb) crypto_rfc4106_alloc() argument
973 algt = crypto_get_attr_type(tb); crypto_rfc4106_alloc()
980 ccm_name = crypto_attr_alg_name(tb[1]); crypto_rfc4106_alloc()
1279 static struct crypto_instance *crypto_rfc4543_alloc(struct rtattr **tb) crypto_rfc4543_alloc() argument
1289 algt = crypto_get_attr_type(tb); crypto_rfc4543_alloc()
1296 ccm_name = crypto_attr_alg_name(tb[1]); crypto_rfc4543_alloc()
/linux-4.1.27/net/decnet/
H A Ddn_rules.c123 struct nlattr **tb) dn_fib_rule_configure()
146 r->src = nla_get_le16(tb[FRA_SRC]); dn_fib_rule_configure()
149 r->dst = nla_get_le16(tb[FRA_DST]); dn_fib_rule_configure()
161 struct nlattr **tb) dn_fib_rule_compare()
171 if (frh->src_len && (r->src != nla_get_le16(tb[FRA_SRC]))) dn_fib_rule_compare()
174 if (frh->dst_len && (r->dst != nla_get_le16(tb[FRA_DST]))) dn_fib_rule_compare()
185 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); dnet_addr_type() local
189 if (tb) { dnet_addr_type()
190 if (!tb->lookup(tb, &fld, &res)) { dnet_addr_type()
121 dn_fib_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) dn_fib_rule_configure() argument
160 dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) dn_fib_rule_compare() argument
H A Ddn_table.c408 struct dn_fib_table *tb, dn_hash_dump_bucket()
423 tb->n, dn_hash_dump_bucket()
437 struct dn_fib_table *tb, dn_hash_dump_zone()
450 if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) { dn_hash_dump_zone()
459 static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, dn_fib_table_dump() argument
464 struct dn_hash *table = (struct dn_hash *)tb->data; dn_fib_table_dump()
474 if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) { dn_fib_table_dump()
491 struct dn_fib_table *tb; dn_fib_dump() local
506 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) { dn_fib_dump()
512 if (tb->dump(tb, skb, cb) < 0) dn_fib_dump()
526 static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[], dn_fib_table_insert() argument
529 struct dn_hash *table = (struct dn_hash *)tb->data; dn_fib_table_insert()
645 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
654 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
663 static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[], dn_fib_table_delete() argument
666 struct dn_hash *table = (struct dn_hash*)tb->data; dn_fib_table_delete()
718 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
768 static int dn_fib_table_flush(struct dn_fib_table *tb) dn_fib_table_flush() argument
770 struct dn_hash *table = (struct dn_hash *)tb->data; dn_fib_table_flush()
787 static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowidn *flp, struct dn_fib_res *res) dn_fib_table_lookup() argument
791 struct dn_hash *t = (struct dn_hash *)tb->data; dn_fib_table_lookup()
891 struct dn_fib_table *tb; dn_fib_flush() local
895 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) dn_fib_flush()
896 flushed += tb->flush(tb); dn_fib_flush()
406 dn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, struct dn_fib_table *tb, struct dn_zone *dz, struct dn_fib_node *f) dn_hash_dump_bucket() argument
435 dn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb, struct dn_fib_table *tb, struct dn_zone *dz) dn_hash_dump_zone() argument
H A Ddn_fib.c504 struct dn_fib_table *tb; dn_fib_rtm_delroute() local
519 tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 0); dn_fib_rtm_delroute()
520 if (!tb) dn_fib_rtm_delroute()
523 return tb->delete(tb, r, attrs, nlh, &NETLINK_CB(skb)); dn_fib_rtm_delroute()
529 struct dn_fib_table *tb; dn_fib_rtm_newroute() local
544 tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 1); dn_fib_rtm_newroute()
545 if (!tb) dn_fib_rtm_newroute()
548 return tb->insert(tb, r, attrs, nlh, &NETLINK_CB(skb)); dn_fib_rtm_newroute()
553 struct dn_fib_table *tb; fib_magic() local
585 tb = dn_fib_get_table(RT_MIN_TABLE, 1); fib_magic()
587 tb = dn_fib_get_table(RT_TABLE_LOCAL, 1); fib_magic()
589 if (tb == NULL) fib_magic()
599 req.rtm.rtm_table = tb->n; fib_magic()
605 tb->insert(tb, &req.rtm, attrs, &req.nlh, NULL); fib_magic()
607 tb->delete(tb, &req.rtm, attrs, &req.nlh, NULL); fib_magic()
H A Ddn_dev.c571 struct nlattr *tb[IFA_MAX+1]; dn_nl_deladdr() local
584 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); dn_nl_deladdr()
597 if (tb[IFA_LOCAL] && dn_nl_deladdr()
598 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) dn_nl_deladdr()
601 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) dn_nl_deladdr()
615 struct nlattr *tb[IFA_MAX+1]; dn_nl_newaddr() local
628 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); dn_nl_newaddr()
632 if (tb[IFA_LOCAL] == NULL) dn_nl_newaddr()
648 if (tb[IFA_ADDRESS] == NULL) dn_nl_newaddr()
649 tb[IFA_ADDRESS] = tb[IFA_LOCAL]; dn_nl_newaddr()
651 ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]); dn_nl_newaddr()
652 ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]); dn_nl_newaddr()
653 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : dn_nl_newaddr()
658 if (tb[IFA_LABEL]) dn_nl_newaddr()
659 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); dn_nl_newaddr()
/linux-4.1.27/net/8021q/
H A Dvlan_netlink.c41 static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) vlan_validate() argument
47 if (tb[IFLA_ADDRESS]) { vlan_validate()
48 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) vlan_validate()
50 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) vlan_validate()
90 struct nlattr *tb[], struct nlattr *data[]) vlan_changelink()
117 struct nlattr *tb[], struct nlattr *data[]) vlan_newlink()
127 if (!tb[IFLA_LINK]) vlan_newlink()
129 real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); vlan_newlink()
147 if (!tb[IFLA_MTU]) vlan_newlink()
152 err = vlan_changelink(dev, tb, data); vlan_newlink()
89 vlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) vlan_changelink() argument
116 vlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) vlan_newlink() argument
/linux-4.1.27/net/dcb/
H A Ddcbnl.c225 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getstate()
227 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ dcbnl_getstate()
236 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getpfccfg()
244 if (!tb[DCB_ATTR_PFC_CFG]) dcbnl_getpfccfg()
251 tb[DCB_ATTR_PFC_CFG], dcbnl_getpfccfg()
281 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getperm_hwaddr()
295 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getcap()
303 if (!tb[DCB_ATTR_CAP]) dcbnl_getcap()
309 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], dcbnl_getcap()
339 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getnumtcs()
347 if (!tb[DCB_ATTR_NUMTCS]) dcbnl_getnumtcs()
353 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_getnumtcs()
385 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setnumtcs()
392 if (!tb[DCB_ATTR_NUMTCS]) dcbnl_setnumtcs()
398 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_setnumtcs()
418 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getpfcstate()
428 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setpfcstate()
432 if (!tb[DCB_ATTR_PFC_STATE]) dcbnl_setpfcstate()
438 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); dcbnl_setpfcstate()
446 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getapp()
454 if (!tb[DCB_ATTR_APP]) dcbnl_getapp()
457 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_getapp()
515 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setapp()
522 if (!tb[DCB_ATTR_APP]) dcbnl_setapp()
525 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_setapp()
564 struct nlattr **tb, struct sk_buff *skb, int dir) __dcbnl_pg_getcfg()
574 if (!tb[DCB_ATTR_PG_CFG]) __dcbnl_pg_getcfg()
584 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); __dcbnl_pg_getcfg()
698 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_pgtx_getcfg()
700 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0); dcbnl_pgtx_getcfg()
704 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_pgrx_getcfg()
706 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1); dcbnl_pgrx_getcfg()
710 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setstate()
714 if (!tb[DCB_ATTR_STATE]) dcbnl_setstate()
720 value = nla_get_u8(tb[DCB_ATTR_STATE]); dcbnl_setstate()
727 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setpfccfg()
734 if (!tb[DCB_ATTR_PFC_CFG]) dcbnl_setpfccfg()
741 tb[DCB_ATTR_PFC_CFG], dcbnl_setpfccfg()
758 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setall()
762 if (!tb[DCB_ATTR_SET_ALL]) dcbnl_setall()
776 u32 seq, struct nlattr **tb, struct sk_buff *skb, __dcbnl_pg_setcfg()
788 if (!tb[DCB_ATTR_PG_CFG]) __dcbnl_pg_setcfg()
798 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); __dcbnl_pg_setcfg()
866 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_pgtx_setcfg()
868 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0); dcbnl_pgtx_setcfg()
872 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_pgrx_setcfg()
874 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1); dcbnl_pgrx_setcfg()
878 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_bcn_getcfg()
888 if (!tb[DCB_ATTR_BCN]) dcbnl_bcn_getcfg()
896 tb[DCB_ATTR_BCN], dcbnl_bcn_nest); dcbnl_bcn_getcfg()
939 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_bcn_setcfg()
947 if (!tb[DCB_ATTR_BCN]) dcbnl_bcn_setcfg()
955 tb[DCB_ATTR_BCN], dcbnl_bcn_setcfg()
1417 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_ieee_set()
1426 if (!tb[DCB_ATTR_IEEE]) dcbnl_ieee_set()
1430 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); dcbnl_ieee_set()
1490 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_ieee_get()
1501 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_ieee_del()
1510 if (!tb[DCB_ATTR_IEEE]) dcbnl_ieee_del()
1514 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); dcbnl_ieee_del()
1546 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getdcbx()
1556 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setdcbx()
1563 if (!tb[DCB_ATTR_DCBX]) dcbnl_setdcbx()
1566 value = nla_get_u8(tb[DCB_ATTR_DCBX]); dcbnl_setdcbx()
1573 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getfeatcfg()
1583 if (!tb[DCB_ATTR_FEATCFG]) dcbnl_getfeatcfg()
1586 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_getfeatcfg()
1618 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setfeatcfg()
1627 if (!tb[DCB_ATTR_FEATCFG]) dcbnl_setfeatcfg()
1630 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_setfeatcfg()
1655 u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_cee_get()
1709 struct nlattr *tb[DCB_ATTR_MAX + 1]; dcb_doit() local
1719 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, dcb_doit()
1732 if (!tb[DCB_ATTR_IFNAME]) dcb_doit()
1735 netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME])); dcb_doit()
1747 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb); dcb_doit()
224 dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getstate() argument
235 dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getpfccfg() argument
280 dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getperm_hwaddr() argument
294 dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getcap() argument
338 dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getnumtcs() argument
384 dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setnumtcs() argument
417 dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getpfcstate() argument
427 dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setpfcstate() argument
445 dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getapp() argument
514 dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setapp() argument
563 __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, struct nlattr **tb, struct sk_buff *skb, int dir) __dcbnl_pg_getcfg() argument
697 dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_pgtx_getcfg() argument
703 dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_pgrx_getcfg() argument
709 dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setstate() argument
726 dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setpfccfg() argument
757 dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setall() argument
775 __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb, int dir) __dcbnl_pg_setcfg() argument
865 dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_pgtx_setcfg() argument
871 dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_pgrx_setcfg() argument
877 dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_bcn_getcfg() argument
938 dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_bcn_setcfg() argument
1416 dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_ieee_set() argument
1489 dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_ieee_get() argument
1500 dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_ieee_del() argument
1545 dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getdcbx() argument
1555 dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setdcbx() argument
1572 dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_getfeatcfg() argument
1617 dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_setfeatcfg() argument
1654 dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) dcbnl_cee_get() argument
/linux-4.1.27/include/net/
H A Dip_fib.h194 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
203 void fib_free_table(struct fib_table *tb);
232 struct fib_table *tb; fib_lookup() local
237 tb = fib_get_table(net, RT_TABLE_MAIN); fib_lookup()
238 if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) fib_lookup()
258 struct fib_table *tb; fib_lookup() local
269 tb = rcu_dereference_rtnl(net->ipv4.fib_main); fib_lookup()
270 if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) fib_lookup()
273 tb = rcu_dereference_rtnl(net->ipv4.fib_default); fib_lookup()
274 if (tb && !fib_table_lookup(tb, flp, res, FIB_LOOKUP_NOREF)) fib_lookup()
H A Drtnetlink.h61 int (*validate)(struct nlattr *tb[],
66 struct nlattr *tb[],
69 struct nlattr *tb[],
86 int (*slave_validate)(struct nlattr *tb[],
90 struct nlattr *tb[],
139 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
143 struct nlattr *tb[]);
146 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
H A Dinet6_connection_sock.h26 const struct inet_bind_bucket *tb, bool relax);
/linux-4.1.27/include/linux/netfilter/ipset/
H A Dip_set_comment.h14 ip_set_comment_uget(struct nlattr *tb) ip_set_comment_uget() argument
16 return nla_data(tb); ip_set_comment_uget()
H A Dip_set_timeout.h30 ip_set_timeout_uget(struct nlattr *tb) ip_set_timeout_uget() argument
32 unsigned int timeout = ip_set_get_h32(tb); ip_set_timeout_uget()
H A Dip_set.h160 int (*uadt)(struct ip_set *set, struct nlattr *tb[],
207 struct nlattr *tb[], u32 flags);
416 extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
418 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
452 ip_set_attr_netorder(struct nlattr *tb[], int type) ip_set_attr_netorder() argument
454 return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER); ip_set_attr_netorder()
458 ip_set_optattr_netorder(struct nlattr *tb[], int type) ip_set_optattr_netorder() argument
460 return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER); ip_set_optattr_netorder()
/linux-4.1.27/net/bridge/
H A Dbr_netlink.c537 static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], br_set_port_flag() argument
540 if (tb[attrtype]) { br_set_port_flag()
541 u8 flag = nla_get_u8(tb[attrtype]); br_set_port_flag()
550 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_setport() argument
555 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); br_setport()
556 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_setport()
557 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); br_setport()
558 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); br_setport()
559 br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); br_setport()
560 br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); br_setport()
561 br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); br_setport()
562 br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); br_setport()
564 if (tb[IFLA_BRPORT_COST]) { br_setport()
565 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); br_setport()
570 if (tb[IFLA_BRPORT_PRIORITY]) { br_setport()
571 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY])); br_setport()
576 if (tb[IFLA_BRPORT_STATE]) { br_setport()
577 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE])); br_setport()
592 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; br_setlink() local
609 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, br_setlink()
615 err = br_setport(p, tb); br_setlink()
687 static int br_validate(struct nlattr *tb[], struct nlattr *data[]) br_validate() argument
689 if (tb[IFLA_ADDRESS]) { br_validate()
690 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) br_validate()
692 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) br_validate()
700 struct nlattr *tb[], struct nlattr *data[]) br_dev_newlink()
704 if (tb[IFLA_ADDRESS]) { br_dev_newlink()
706 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS])); br_dev_newlink()
715 struct nlattr *tb[], br_port_slave_changelink()
753 static int br_changelink(struct net_device *brdev, struct nlattr *tb[], br_changelink() argument
699 br_dev_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) br_dev_newlink() argument
713 br_port_slave_changelink(struct net_device *brdev, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) br_port_slave_changelink() argument
H A Dbr_mdb.c276 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1]; br_mdb_parse() local
280 err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL); br_mdb_parse()
303 if (!tb[MDBA_SET_ENTRY] || br_mdb_parse()
304 nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) { br_mdb_parse()
309 entry = nla_data(tb[MDBA_SET_ENTRY]); br_mdb_parse()
/linux-4.1.27/drivers/tty/
H A Dtty_buffer.c307 struct tty_buffer *tb = port->buf.tail; tty_insert_flip_string_fixed_flag() local
310 memcpy(char_buf_ptr(tb, tb->used), chars, space); tty_insert_flip_string_fixed_flag()
311 if (~tb->flags & TTYB_NORMAL) tty_insert_flip_string_fixed_flag()
312 memset(flag_buf_ptr(tb, tb->used), flag, space); tty_insert_flip_string_fixed_flag()
313 tb->used += space; tty_insert_flip_string_fixed_flag()
342 struct tty_buffer *tb = port->buf.tail; tty_insert_flip_string_flags() local
345 memcpy(char_buf_ptr(tb, tb->used), chars, space); tty_insert_flip_string_flags()
346 memcpy(flag_buf_ptr(tb, tb->used), flags, space); tty_insert_flip_string_flags()
347 tb->used += space; tty_insert_flip_string_flags()
394 struct tty_buffer *tb = port->buf.tail; tty_prepare_flip_string() local
395 *chars = char_buf_ptr(tb, tb->used); tty_prepare_flip_string()
396 if (~tb->flags & TTYB_NORMAL) tty_prepare_flip_string()
397 memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); tty_prepare_flip_string()
398 tb->used += space; tty_prepare_flip_string()
/linux-4.1.27/arch/m68k/coldfire/
H A Dintc-2.c148 u16 pa, tb; intc_irq_set_type() local
152 tb = 0x1; intc_irq_set_type()
155 tb = 0x2; intc_irq_set_type()
158 tb = 0x3; intc_irq_set_type()
162 tb = 0; intc_irq_set_type()
166 if (tb) intc_irq_set_type()
171 pa = (pa & ~(0x3 << (irq * 2))) | (tb << (irq * 2)); intc_irq_set_type()
H A Dintc-simr.c132 u16 pa, tb; intc_irq_set_type() local
136 tb = 0x1; intc_irq_set_type()
139 tb = 0x2; intc_irq_set_type()
142 tb = 0x3; intc_irq_set_type()
146 tb = 0; intc_irq_set_type()
150 if (tb) intc_irq_set_type()
155 pa = (pa & ~(0x3 << ebit)) | (tb << ebit); intc_irq_set_type()
/linux-4.1.27/net/phonet/
H A Dpn_netlink.c67 struct nlattr *tb[IFA_MAX+1]; addr_doit() local
81 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy); addr_doit()
86 if (tb[IFA_LOCAL] == NULL) addr_doit()
88 pnaddr = nla_get_u8(tb[IFA_LOCAL]); addr_doit()
232 struct nlattr *tb[RTA_MAX+1]; route_doit() local
246 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy); route_doit()
253 if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL) route_doit()
255 dst = nla_get_u8(tb[RTA_DST]); route_doit()
259 dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF])); route_doit()
/linux-4.1.27/drivers/net/
H A Dveth.c322 static int veth_validate(struct nlattr *tb[], struct nlattr *data[]) veth_validate() argument
324 if (tb[IFLA_ADDRESS]) { veth_validate()
325 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) veth_validate()
327 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) veth_validate()
330 if (tb[IFLA_MTU]) { veth_validate()
331 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU]))) veth_validate()
340 struct nlattr *tb[], struct nlattr *data[]) veth_newlink()
372 tbp = tb; veth_newlink()
419 if (tb[IFLA_ADDRESS] == NULL) veth_newlink()
422 if (tb[IFLA_IFNAME]) veth_newlink()
423 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); veth_newlink()
339 veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) veth_newlink() argument
H A Ddummy.c156 static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) dummy_validate() argument
158 if (tb[IFLA_ADDRESS]) { dummy_validate()
159 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) dummy_validate()
161 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) dummy_validate()
H A Dnlmon.c150 static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[]) nlmon_validate() argument
152 if (tb[IFLA_ADDRESS]) nlmon_validate()
H A Difb.c245 static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) ifb_validate() argument
247 if (tb[IFLA_ADDRESS]) { ifb_validate()
248 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) ifb_validate()
250 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) ifb_validate()
H A Dmacvlan.c888 static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], macvlan_fdb_add() argument
913 static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], macvlan_fdb_del() argument
1123 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) macvlan_validate() argument
1125 if (tb[IFLA_ADDRESS]) { macvlan_validate()
1126 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) macvlan_validate()
1128 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) macvlan_validate()
1242 struct nlattr *tb[], struct nlattr *data[]) macvlan_common_newlink()
1250 if (!tb[IFLA_LINK]) macvlan_common_newlink()
1253 lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); macvlan_common_newlink()
1263 if (!tb[IFLA_MTU]) macvlan_common_newlink()
1268 if (!tb[IFLA_ADDRESS]) macvlan_common_newlink()
1338 struct nlattr *tb[], struct nlattr *data[]) macvlan_newlink()
1340 return macvlan_common_newlink(src_net, dev, tb, data); macvlan_newlink()
1356 struct nlattr *tb[], struct nlattr *data[]) macvlan_changelink()
1241 macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) macvlan_common_newlink() argument
1337 macvlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) macvlan_newlink() argument
1355 macvlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) macvlan_changelink() argument
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
H A Dipoib_netlink.c68 struct nlattr *tb[], struct nlattr *data[]) ipoib_changelink()
96 struct nlattr *tb[], struct nlattr *data[]) ipoib_new_child_link()
103 if (!tb[IFLA_LINK]) ipoib_new_child_link()
106 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); ipoib_new_child_link()
135 err = ipoib_changelink(dev, tb, data); ipoib_new_child_link()
67 ipoib_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) ipoib_changelink() argument
95 ipoib_new_child_link(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) ipoib_new_child_link() argument
/linux-4.1.27/net/ipv4/netfilter/
H A Dnf_conntrack_proto_icmp.c253 static int icmp_nlattr_to_tuple(struct nlattr *tb[], icmp_nlattr_to_tuple() argument
256 if (!tb[CTA_PROTO_ICMP_TYPE] || icmp_nlattr_to_tuple()
257 !tb[CTA_PROTO_ICMP_CODE] || icmp_nlattr_to_tuple()
258 !tb[CTA_PROTO_ICMP_ID]) icmp_nlattr_to_tuple()
261 tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]); icmp_nlattr_to_tuple()
262 tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]); icmp_nlattr_to_tuple()
263 tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]); icmp_nlattr_to_tuple()
283 static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], icmp_timeout_nlattr_to_obj() argument
289 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { icmp_timeout_nlattr_to_obj()
291 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ; icmp_timeout_nlattr_to_obj()
/linux-4.1.27/net/ipv6/netfilter/
H A Dnf_conntrack_proto_icmpv6.c259 static int icmpv6_nlattr_to_tuple(struct nlattr *tb[], icmpv6_nlattr_to_tuple() argument
262 if (!tb[CTA_PROTO_ICMPV6_TYPE] || icmpv6_nlattr_to_tuple()
263 !tb[CTA_PROTO_ICMPV6_CODE] || icmpv6_nlattr_to_tuple()
264 !tb[CTA_PROTO_ICMPV6_ID]) icmpv6_nlattr_to_tuple()
267 tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]); icmpv6_nlattr_to_tuple()
268 tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]); icmpv6_nlattr_to_tuple()
269 tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]); icmpv6_nlattr_to_tuple()
290 static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], icmpv6_timeout_nlattr_to_obj() argument
296 if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) { icmpv6_timeout_nlattr_to_obj()
298 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ; icmpv6_timeout_nlattr_to_obj()
H A Dnf_conntrack_l3proto_ipv6.c299 static int ipv6_nlattr_to_tuple(struct nlattr *tb[], ipv6_nlattr_to_tuple() argument
302 if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST]) ipv6_nlattr_to_tuple()
305 t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]); ipv6_nlattr_to_tuple()
306 t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]); ipv6_nlattr_to_tuple()
/linux-4.1.27/arch/sparc/kernel/
H A Dirq_64.c997 struct trap_per_cpu *tb = &trap_block[this_cpu]; sun4v_register_mondo_queues() local
999 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, sun4v_register_mondo_queues()
1000 tb->cpu_mondo_qmask); sun4v_register_mondo_queues()
1001 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, sun4v_register_mondo_queues()
1002 tb->dev_mondo_qmask); sun4v_register_mondo_queues()
1003 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, sun4v_register_mondo_queues()
1004 tb->resum_qmask); sun4v_register_mondo_queues()
1005 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, sun4v_register_mondo_queues()
1006 tb->nonresum_qmask); sun4v_register_mondo_queues()
1028 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) init_cpu_send_mondo_info() argument
1041 tb->cpu_mondo_block_pa = __pa(page); init_cpu_send_mondo_info()
1042 tb->cpu_list_pa = __pa(page + 64); init_cpu_send_mondo_info()
1052 struct trap_per_cpu *tb = &trap_block[cpu]; for_each_possible_cpu() local
1054 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); for_each_possible_cpu()
1055 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask); for_each_possible_cpu()
1056 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask); for_each_possible_cpu()
1057 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask); for_each_possible_cpu()
1058 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask); for_each_possible_cpu()
1059 alloc_one_queue(&tb->nonresum_kernel_buf_pa, for_each_possible_cpu()
1060 tb->nonresum_qmask); for_each_possible_cpu()
1069 struct trap_per_cpu *tb = &trap_block[cpu]; for_each_possible_cpu() local
1071 init_cpu_send_mondo_info(tb); for_each_possible_cpu()
H A Dsmp_64.c289 struct trap_per_cpu *tb; ldom_startcpu_cpuid() local
308 tb = &trap_block[cpu]; ldom_startcpu_cpuid()
310 hdesc->fault_info_va = (unsigned long) &tb->fault_info; ldom_startcpu_cpuid()
311 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); ldom_startcpu_cpuid()
456 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) spitfire_xcall_deliver() argument
464 cpu_list = __va(tb->cpu_list_pa); spitfire_xcall_deliver()
465 mondo = __va(tb->cpu_mondo_block_pa); spitfire_xcall_deliver()
477 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) cheetah_xcall_deliver() argument
483 cpu_list = __va(tb->cpu_list_pa); cheetah_xcall_deliver()
484 mondo = __va(tb->cpu_mondo_block_pa); cheetah_xcall_deliver()
621 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) hypervisor_xcall_deliver() argument
629 cpu_list = __va(tb->cpu_list_pa); hypervisor_xcall_deliver()
638 tb->cpu_list_pa, hypervisor_xcall_deliver()
639 tb->cpu_mondo_block_pa); hypervisor_xcall_deliver()
724 this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); hypervisor_xcall_deliver()
737 struct trap_per_cpu *tb; xcall_deliver() local
756 tb = &trap_block[this_cpu]; xcall_deliver()
758 mondo = __va(tb->cpu_mondo_block_pa); xcall_deliver()
764 cpu_list = __va(tb->cpu_list_pa); xcall_deliver()
775 xcall_deliver_impl(tb, cnt);
1306 struct trap_per_cpu *tb = &trap_block[cpu]; cpu_play_dead() local
1309 tb->cpu_mondo_pa, 0); cpu_play_dead()
1311 tb->dev_mondo_pa, 0); cpu_play_dead()
1313 tb->resum_mondo_pa, 0); cpu_play_dead()
1315 tb->nonresum_mondo_pa, 0); cpu_play_dead()
/linux-4.1.27/arch/x86/crypto/
H A Dfpu.c102 static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) crypto_fpu_alloc() argument
108 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); crypto_fpu_alloc()
112 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, crypto_fpu_alloc()
/linux-4.1.27/arch/powerpc/boot/
H A D4xx.c339 u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; ibm440gp_fixup_clocks() local
363 tb = sys_clk; ibm440gp_fixup_clocks()
366 tb = cpu; ibm440gp_fixup_clocks()
385 dt_fixup_cpu_clocks(cpu, tb, 0); ibm440gp_fixup_clocks()
426 u32 ccr1, tb = tmr_clk; __ibm440eplike_fixup_clocks() local
469 if (tb == 0) { __ibm440eplike_fixup_clocks()
474 tb = cpu; __ibm440eplike_fixup_clocks()
476 dt_fixup_cpu_clocks(cpu, tb, 0); __ibm440eplike_fixup_clocks()
558 u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; ibm405gp_fixup_clocks() local
614 tb = cpu; ibm405gp_fixup_clocks()
616 dt_fixup_cpu_clocks(cpu, tb, 0); ibm405gp_fixup_clocks()
632 u32 pllmr0_ccdv, tb, m; ibm405ep_fixup_clocks() local
655 tb = cpu; ibm405ep_fixup_clocks()
659 dt_fixup_cpu_clocks(cpu, tb, 0); ibm405ep_fixup_clocks()
753 u32 cpu, plb, opb, ebc, vco, tb, uart0, uart1; ibm405ex_fixup_clocks() local
794 tb = cpu; ibm405ex_fixup_clocks()
797 dt_fixup_cpu_clocks(cpu, tb, 0); ibm405ex_fixup_clocks()
H A Ddevtree.c62 void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus) dt_fixup_cpu_clocks() argument
67 printf("CPU timebase-frequency <- 0x%x (%dMHz)\n\r", tb, MHZ(tb)); dt_fixup_cpu_clocks()
73 setprop_val(devp, "timebase-frequency", tb); dt_fixup_cpu_clocks()
78 timebase_period_ns = 1000000000 / tb; dt_fixup_cpu_clocks()
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
H A Dconfig.c912 struct lnet_text_buf_t *tb; lnet_splitnets() local
922 tb = list_entry(nets->next, struct lnet_text_buf_t, ltb_list); lnet_splitnets()
925 sep = strchr(tb->ltb_text, ','); lnet_splitnets()
926 bracket = strchr(tb->ltb_text, '('); lnet_splitnets()
933 offset2 = offset + (int)(bracket - tb->ltb_text); lnet_splitnets()
950 net = lnet_netspec2net(tb->ltb_text); lnet_splitnets()
953 strlen(tb->ltb_text)); lnet_splitnets()
960 if (tb2 == tb) list_for_each()
966 strlen(tb->ltb_text)); list_for_each()
974 offset += (int)(sep - tb->ltb_text);
982 tb = tb2;
997 struct lnet_text_buf_t *tb; lnet_match_networks() local
1021 tb = list_entry(raw_entries.next, struct lnet_text_buf_t, lnet_match_networks()
1024 strncpy(source, tb->ltb_text, sizeof(source)-1); lnet_match_networks()
1028 rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip); lnet_match_networks()
1032 list_del(&tb->ltb_list); lnet_match_networks()
1035 lnet_free_text_buf(tb); lnet_match_networks()
1041 list_add(&tb->ltb_list, &current_nets); lnet_match_networks()
1048 tb = list_entry(t, struct lnet_text_buf_t, ltb_list); lnet_match_networks()
1049 net1 = lnet_netspec2net(tb->ltb_text); lnet_match_networks()
1074 tb = list_entry(t, struct lnet_text_buf_t, ltb_list); lnet_match_networks()
1076 list_del(&tb->ltb_list); lnet_match_networks()
1077 list_add_tail(&tb->ltb_list, &matched_nets); lnet_match_networks()
1081 tb->ltb_text); lnet_match_networks()
/linux-4.1.27/net/can/
H A Dgw.c628 struct nlattr *tb[CGW_MAX+1]; cgw_parse_attr() local
636 err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, cgw_parse_attr()
641 if (tb[CGW_LIM_HOPS]) { cgw_parse_attr()
642 *limhops = nla_get_u8(tb[CGW_LIM_HOPS]); cgw_parse_attr()
650 if (tb[CGW_MOD_AND]) { cgw_parse_attr()
651 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); cgw_parse_attr()
666 if (tb[CGW_MOD_OR]) { cgw_parse_attr()
667 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); cgw_parse_attr()
682 if (tb[CGW_MOD_XOR]) { cgw_parse_attr()
683 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); cgw_parse_attr()
698 if (tb[CGW_MOD_SET]) { cgw_parse_attr()
699 nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN); cgw_parse_attr()
717 if (tb[CGW_CS_CRC8]) { cgw_parse_attr()
718 struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]); cgw_parse_attr()
725 nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8], cgw_parse_attr()
741 if (tb[CGW_CS_XOR]) { cgw_parse_attr()
742 struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]); cgw_parse_attr()
749 nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR], cgw_parse_attr()
774 if (tb[CGW_FILTER]) cgw_parse_attr()
775 nla_memcpy(&ccgw->filter, tb[CGW_FILTER], cgw_parse_attr()
781 if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF]) cgw_parse_attr()
784 ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]); cgw_parse_attr()
785 ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]); cgw_parse_attr()
/linux-4.1.27/net/ieee802154/6lowpan/
H A Dcore.c132 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) lowpan_validate() argument
134 if (tb[IFLA_ADDRESS]) { lowpan_validate()
135 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) lowpan_validate()
142 struct nlattr *tb[], struct nlattr *data[]) lowpan_newlink()
152 if (!tb[IFLA_LINK] || lowpan_newlink()
156 real_dev = dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); lowpan_newlink()
141 lowpan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) lowpan_newlink() argument
/linux-4.1.27/net/ipv6/
H A Dfib6_rules.c185 struct nlattr **tb) fib6_rule_configure()
202 rule6->src.addr = nla_get_in6_addr(tb[FRA_SRC]); fib6_rule_configure()
205 rule6->dst.addr = nla_get_in6_addr(tb[FRA_DST]); fib6_rule_configure()
217 struct nlattr **tb) fib6_rule_compare()
231 nla_memcmp(tb[FRA_SRC], &rule6->src.addr, sizeof(struct in6_addr))) fib6_rule_compare()
235 nla_memcmp(tb[FRA_DST], &rule6->dst.addr, sizeof(struct in6_addr))) fib6_rule_compare()
183 fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) fib6_rule_configure() argument
216 fib6_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) fib6_rule_compare() argument
H A Daddrlabel.c411 struct nlattr *tb[IFAL_MAX+1]; ip6addrlbl_newdel() local
416 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); ip6addrlbl_newdel()
426 if (!tb[IFAL_ADDRESS]) ip6addrlbl_newdel()
428 pfx = nla_data(tb[IFAL_ADDRESS]); ip6addrlbl_newdel()
430 if (!tb[IFAL_LABEL]) ip6addrlbl_newdel()
432 label = nla_get_u32(tb[IFAL_LABEL]); ip6addrlbl_newdel()
528 struct nlattr *tb[IFAL_MAX+1]; ip6addrlbl_get() local
535 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); ip6addrlbl_get()
549 if (!tb[IFAL_ADDRESS]) ip6addrlbl_get()
551 addr = nla_data(tb[IFAL_ADDRESS]); ip6addrlbl_get()
H A Droute.c2430 struct nlattr *tb[RTA_MAX+1]; rtm_to_fib6_config() local
2434 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); rtm_to_fib6_config()
2462 if (tb[RTA_GATEWAY]) { rtm_to_fib6_config()
2463 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); rtm_to_fib6_config()
2467 if (tb[RTA_DST]) { rtm_to_fib6_config()
2470 if (nla_len(tb[RTA_DST]) < plen) rtm_to_fib6_config()
2473 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen); rtm_to_fib6_config()
2476 if (tb[RTA_SRC]) { rtm_to_fib6_config()
2479 if (nla_len(tb[RTA_SRC]) < plen) rtm_to_fib6_config()
2482 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); rtm_to_fib6_config()
2485 if (tb[RTA_PREFSRC]) rtm_to_fib6_config()
2486 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]); rtm_to_fib6_config()
2488 if (tb[RTA_OIF]) rtm_to_fib6_config()
2489 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); rtm_to_fib6_config()
2491 if (tb[RTA_PRIORITY]) rtm_to_fib6_config()
2492 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]); rtm_to_fib6_config()
2494 if (tb[RTA_METRICS]) { rtm_to_fib6_config()
2495 cfg->fc_mx = nla_data(tb[RTA_METRICS]); rtm_to_fib6_config()
2496 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]); rtm_to_fib6_config()
2499 if (tb[RTA_TABLE]) rtm_to_fib6_config()
2500 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); rtm_to_fib6_config()
2502 if (tb[RTA_MULTIPATH]) { rtm_to_fib6_config()
2503 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); rtm_to_fib6_config()
2504 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); rtm_to_fib6_config()
2507 if (tb[RTA_PREF]) { rtm_to_fib6_config()
2508 pref = nla_get_u8(tb[RTA_PREF]); rtm_to_fib6_config()
2921 struct nlattr *tb[RTA_MAX+1]; inet6_rtm_getroute() local
2928 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); inet6_rtm_getroute()
2935 if (tb[RTA_SRC]) { inet6_rtm_getroute()
2936 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) inet6_rtm_getroute()
2939 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); inet6_rtm_getroute()
2942 if (tb[RTA_DST]) { inet6_rtm_getroute()
2943 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) inet6_rtm_getroute()
2946 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); inet6_rtm_getroute()
2949 if (tb[RTA_IIF]) inet6_rtm_getroute()
2950 iif = nla_get_u32(tb[RTA_IIF]); inet6_rtm_getroute()
2952 if (tb[RTA_OIF]) inet6_rtm_getroute()
2953 oif = nla_get_u32(tb[RTA_OIF]); inet6_rtm_getroute()
2955 if (tb[RTA_MARK]) inet6_rtm_getroute()
2956 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); inet6_rtm_getroute()
H A Dip6_fib.c163 static void fib6_link_table(struct net *net, struct fib6_table *tb) fib6_link_table() argument
171 rwlock_init(&tb->tb6_lock); fib6_link_table()
173 h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); fib6_link_table()
179 hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); fib6_link_table()
201 struct fib6_table *tb; fib6_new_table() local
205 tb = fib6_get_table(net, id); fib6_new_table()
206 if (tb) fib6_new_table()
207 return tb; fib6_new_table()
209 tb = fib6_alloc_table(net, id); fib6_new_table()
210 if (tb) fib6_new_table()
211 fib6_link_table(net, tb); fib6_new_table()
213 return tb; fib6_new_table()
218 struct fib6_table *tb; fib6_get_table() local
227 hlist_for_each_entry_rcu(tb, head, tb6_hlist) { hlist_for_each_entry_rcu()
228 if (tb->tb6_id == id) { hlist_for_each_entry_rcu()
230 return tb; hlist_for_each_entry_rcu()
356 struct fib6_table *tb; inet6_dump_fib() local
391 hlist_for_each_entry_rcu(tb, head, tb6_hlist) { hlist_for_each_entry_rcu()
394 res = fib6_dump_table(tb, skb, cb); hlist_for_each_entry_rcu()
/linux-4.1.27/drivers/net/fddi/skfp/
H A Dfplustm.c1065 struct s_fpmc *tb ; mac_get_mc_table() local
1080 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ mac_get_mc_table()
1081 if (!tb->n) { /* not used */ mac_get_mc_table()
1083 slot = tb ; mac_get_mc_table()
1086 if (!ether_addr_equal((char *)&tb->a, (char *)own)) mac_get_mc_table()
1088 return tb; mac_get_mc_table()
1106 struct s_fpmc *tb ; mac_clear_multicast() local
1111 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ mac_clear_multicast()
1112 if (!tb->perm) { mac_clear_multicast()
1113 tb->n = 0 ; mac_clear_multicast()
1149 struct s_fpmc *tb ; mac_add_multicast() local
1168 if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80))) mac_add_multicast()
1170 tb->n++ ; mac_add_multicast()
1171 tb->a = own ; mac_add_multicast()
1172 tb->perm = (can & 0x80) ? 1 : 0 ; mac_add_multicast()
1202 struct s_fpmc *tb ; mac_update_multicast() local
1234 for (i = 0, tb = smc->hw.fp.mc.table; i < FPMAX_MULTICAST; i++, tb++) { mac_update_multicast()
1235 if (tb->n) { mac_update_multicast()
1242 (u_short)((tb->a.a[0]<<8)+tb->a.a[1])) ; mac_update_multicast()
1244 (u_short)((tb->a.a[2]<<8)+tb->a.a[3])) ; mac_update_multicast()
1246 (u_short)((tb->a.a[4]<<8)+tb->a.a[5])) ; mac_update_multicast()
/linux-4.1.27/net/wireless/
H A Dnl80211.c675 struct nlattr *tb[NL80211_KEY_MAX + 1]; nl80211_parse_key_new() local
676 int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, nl80211_parse_key_new()
681 k->def = !!tb[NL80211_KEY_DEFAULT]; nl80211_parse_key_new()
682 k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; nl80211_parse_key_new()
691 if (tb[NL80211_KEY_IDX]) nl80211_parse_key_new()
692 k->idx = nla_get_u8(tb[NL80211_KEY_IDX]); nl80211_parse_key_new()
694 if (tb[NL80211_KEY_DATA]) { nl80211_parse_key_new()
695 k->p.key = nla_data(tb[NL80211_KEY_DATA]); nl80211_parse_key_new()
696 k->p.key_len = nla_len(tb[NL80211_KEY_DATA]); nl80211_parse_key_new()
699 if (tb[NL80211_KEY_SEQ]) { nl80211_parse_key_new()
700 k->p.seq = nla_data(tb[NL80211_KEY_SEQ]); nl80211_parse_key_new()
701 k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]); nl80211_parse_key_new()
704 if (tb[NL80211_KEY_CIPHER]) nl80211_parse_key_new()
705 k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]); nl80211_parse_key_new()
707 if (tb[NL80211_KEY_TYPE]) { nl80211_parse_key_new()
708 k->type = nla_get_u32(tb[NL80211_KEY_TYPE]); nl80211_parse_key_new()
713 if (tb[NL80211_KEY_DEFAULT_TYPES]) { nl80211_parse_key_new()
716 tb[NL80211_KEY_DEFAULT_TYPES], nl80211_parse_key_new()
906 struct nlattr *tb) nl80211_get_valid_chan()
910 if (tb == NULL) nl80211_get_valid_chan()
912 chan = ieee80211_get_channel(wiphy, nla_get_u32(tb)); nl80211_get_valid_chan()
1737 struct nlattr **tb = nl80211_fam.attrbuf; nl80211_dump_wiphy_parse() local
1739 tb, nl80211_fam.maxattr, nl80211_policy); nl80211_dump_wiphy_parse()
1744 state->split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; nl80211_dump_wiphy_parse()
1745 if (tb[NL80211_ATTR_WIPHY]) nl80211_dump_wiphy_parse()
1746 state->filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]); nl80211_dump_wiphy_parse()
1747 if (tb[NL80211_ATTR_WDEV]) nl80211_dump_wiphy_parse()
1748 state->filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32; nl80211_dump_wiphy_parse()
1749 if (tb[NL80211_ATTR_IFINDEX]) { nl80211_dump_wiphy_parse()
1752 int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]); nl80211_dump_wiphy_parse()
1874 static int parse_txq_params(struct nlattr *tb[], parse_txq_params() argument
1877 if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] || parse_txq_params()
1878 !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || parse_txq_params()
1879 !tb[NL80211_TXQ_ATTR_AIFS]) parse_txq_params()
1882 txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]); parse_txq_params()
1883 txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); parse_txq_params()
1884 txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); parse_txq_params()
1885 txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); parse_txq_params()
1886 txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); parse_txq_params()
2134 struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1]; nl80211_set_wiphy() local
2152 result = nla_parse(tb, NL80211_TXQ_ATTR_MAX, nl80211_set_wiphy()
2158 result = parse_txq_params(tb, &txq_params); nl80211_set_wiphy()
4122 struct nlattr *tb[NL80211_STA_WME_MAX + 1]; nl80211_parse_sta_wme() local
4131 err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla, nl80211_parse_sta_wme()
4136 if (tb[NL80211_STA_WME_UAPSD_QUEUES]) nl80211_parse_sta_wme()
4138 tb[NL80211_STA_WME_UAPSD_QUEUES]); nl80211_parse_sta_wme()
4142 if (tb[NL80211_STA_WME_MAX_SP]) nl80211_parse_sta_wme()
4143 params->max_sp = nla_get_u8(tb[NL80211_STA_WME_MAX_SP]); nl80211_parse_sta_wme()
4928 static int parse_reg_rule(struct nlattr *tb[], parse_reg_rule() argument
4934 if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) parse_reg_rule()
4936 if (!tb[NL80211_ATTR_FREQ_RANGE_START]) parse_reg_rule()
4938 if (!tb[NL80211_ATTR_FREQ_RANGE_END]) parse_reg_rule()
4940 if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) parse_reg_rule()
4942 if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) parse_reg_rule()
4945 reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); parse_reg_rule()
4948 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); parse_reg_rule()
4950 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); parse_reg_rule()
4952 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); parse_reg_rule()
4955 nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); parse_reg_rule()
4957 if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) parse_reg_rule()
4959 nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); parse_reg_rule()
4961 if (tb[NL80211_ATTR_DFS_CAC_TIME]) parse_reg_rule()
4963 nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]); parse_reg_rule()
5171 struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; nl80211_parse_mesh_config() local
5174 #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ nl80211_parse_mesh_config()
5176 if (tb[attr]) { \ nl80211_parse_mesh_config()
5177 if (fn(tb[attr]) < min || fn(tb[attr]) > max) \ nl80211_parse_mesh_config()
5179 cfg->param = fn(tb[attr]); \ nl80211_parse_mesh_config()
5187 if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, nl80211_parse_mesh_config()
5197 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255, nl80211_parse_mesh_config()
5200 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255, nl80211_parse_mesh_config()
5203 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255, nl80211_parse_mesh_config()
5206 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255, nl80211_parse_mesh_config()
5209 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16, nl80211_parse_mesh_config()
5212 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255, nl80211_parse_mesh_config()
5214 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255, nl80211_parse_mesh_config()
5217 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1, nl80211_parse_mesh_config()
5220 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, nl80211_parse_mesh_config()
5224 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255, nl80211_parse_mesh_config()
5227 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535, nl80211_parse_mesh_config()
5230 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535, nl80211_parse_mesh_config()
5233 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, nl80211_parse_mesh_config()
5237 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, nl80211_parse_mesh_config()
5241 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, nl80211_parse_mesh_config()
5245 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, nl80211_parse_mesh_config()
5250 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4, nl80211_parse_mesh_config()
5253 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535, nl80211_parse_mesh_config()
5256 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, nl80211_parse_mesh_config()
5260 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1, nl80211_parse_mesh_config()
5263 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, nl80211_parse_mesh_config()
5266 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, nl80211_parse_mesh_config()
5269 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, nl80211_parse_mesh_config()
5273 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535, nl80211_parse_mesh_config()
5276 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, nl80211_parse_mesh_config()
5281 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, nl80211_parse_mesh_config()
5286 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, nl80211_parse_mesh_config()
5289 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff, nl80211_parse_mesh_config()
5304 struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1]; nl80211_parse_mesh_setup() local
5308 if (nla_parse_nested(tb, NL80211_MESH_SETUP_ATTR_MAX, nl80211_parse_mesh_setup()
5313 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC]) nl80211_parse_mesh_setup()
5315 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ? nl80211_parse_mesh_setup()
5319 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL]) nl80211_parse_mesh_setup()
5321 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ? nl80211_parse_mesh_setup()
5325 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC]) nl80211_parse_mesh_setup()
5327 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC])) ? nl80211_parse_mesh_setup()
5332 if (tb[NL80211_MESH_SETUP_IE]) { nl80211_parse_mesh_setup()
5334 tb[NL80211_MESH_SETUP_IE]; nl80211_parse_mesh_setup()
5340 if (tb[NL80211_MESH_SETUP_USERSPACE_MPM] && nl80211_parse_mesh_setup()
5343 setup->user_mpm = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_MPM]); nl80211_parse_mesh_setup()
5344 setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]); nl80211_parse_mesh_setup()
5345 setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]); nl80211_parse_mesh_setup()
5349 if (tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]) { nl80211_parse_mesh_setup()
5353 nla_get_u8(tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]); nl80211_parse_mesh_setup()
5601 struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; nl80211_set_reg() local
5650 r = nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, nl80211_set_reg()
5655 r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); nl80211_set_reg()
5970 struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1]; nl80211_parse_sched_scan() local
6015 err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, nla_for_each_nested()
6021 if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) { nla_for_each_nested()
6025 rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; nla_for_each_nested()
6147 err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX, nla_for_each_nested()
6152 ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]; nla_for_each_nested()
6174 rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; nla_for_each_nested()
8166 struct nlattr *tb[NL80211_TXRATE_MAX + 1]; nl80211_set_tx_bitrate_mask() local
8216 err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates), nl80211_set_tx_bitrate_mask()
8220 if (tb[NL80211_TXRATE_LEGACY]) { nl80211_set_tx_bitrate_mask()
8223 nla_data(tb[NL80211_TXRATE_LEGACY]), nl80211_set_tx_bitrate_mask()
8224 nla_len(tb[NL80211_TXRATE_LEGACY])); nl80211_set_tx_bitrate_mask()
8226 nla_len(tb[NL80211_TXRATE_LEGACY])) nl80211_set_tx_bitrate_mask()
8229 if (tb[NL80211_TXRATE_HT]) { nl80211_set_tx_bitrate_mask()
8232 nla_data(tb[NL80211_TXRATE_HT]), nl80211_set_tx_bitrate_mask()
8233 nla_len(tb[NL80211_TXRATE_HT]), nl80211_set_tx_bitrate_mask()
8237 if (tb[NL80211_TXRATE_VHT]) { nl80211_set_tx_bitrate_mask()
8240 nla_data(tb[NL80211_TXRATE_VHT]), nl80211_set_tx_bitrate_mask()
8244 if (tb[NL80211_TXRATE_GI]) { nl80211_set_tx_bitrate_mask()
8246 nla_get_u8(tb[NL80211_TXRATE_GI]); nl80211_set_tx_bitrate_mask()
8949 struct nlattr *tb[NUM_NL80211_WOWLAN_TCP]; nl80211_parse_wowlan_tcp() local
8960 err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP, nl80211_parse_wowlan_tcp()
8966 if (!tb[NL80211_WOWLAN_TCP_SRC_IPV4] || nl80211_parse_wowlan_tcp()
8967 !tb[NL80211_WOWLAN_TCP_DST_IPV4] || nl80211_parse_wowlan_tcp()
8968 !tb[NL80211_WOWLAN_TCP_DST_MAC] || nl80211_parse_wowlan_tcp()
8969 !tb[NL80211_WOWLAN_TCP_DST_PORT] || nl80211_parse_wowlan_tcp()
8970 !tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD] || nl80211_parse_wowlan_tcp()
8971 !tb[NL80211_WOWLAN_TCP_DATA_INTERVAL] || nl80211_parse_wowlan_tcp()
8972 !tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] || nl80211_parse_wowlan_tcp()
8973 !tb[NL80211_WOWLAN_TCP_WAKE_MASK]) nl80211_parse_wowlan_tcp()
8976 data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]); nl80211_parse_wowlan_tcp()
8980 if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > nl80211_parse_wowlan_tcp()
8982 nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0) nl80211_parse_wowlan_tcp()
8985 wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); nl80211_parse_wowlan_tcp()
8989 wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]); nl80211_parse_wowlan_tcp()
8993 if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]) { nl80211_parse_wowlan_tcp()
8994 u32 tokln = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]); nl80211_parse_wowlan_tcp()
8996 tok = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN]); nl80211_parse_wowlan_tcp()
9013 if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) { nl80211_parse_wowlan_tcp()
9014 seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]); nl80211_parse_wowlan_tcp()
9031 cfg->src = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_SRC_IPV4]); nl80211_parse_wowlan_tcp()
9032 cfg->dst = nla_get_in_addr(tb[NL80211_WOWLAN_TCP_DST_IPV4]); nl80211_parse_wowlan_tcp()
9033 memcpy(cfg->dst_mac, nla_data(tb[NL80211_WOWLAN_TCP_DST_MAC]), nl80211_parse_wowlan_tcp()
9035 if (tb[NL80211_WOWLAN_TCP_SRC_PORT]) nl80211_parse_wowlan_tcp()
9036 port = nla_get_u16(tb[NL80211_WOWLAN_TCP_SRC_PORT]); nl80211_parse_wowlan_tcp()
9061 cfg->dst_port = nla_get_u16(tb[NL80211_WOWLAN_TCP_DST_PORT]); nl80211_parse_wowlan_tcp()
9065 nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]), nl80211_parse_wowlan_tcp()
9069 cfg->data_interval = nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]); nl80211_parse_wowlan_tcp()
9073 nla_data(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]), nl80211_parse_wowlan_tcp()
9078 nla_data(tb[NL80211_WOWLAN_TCP_WAKE_MASK]), nl80211_parse_wowlan_tcp()
9095 struct nlattr **tb; nl80211_parse_wowlan_nd() local
9098 tb = kzalloc(NUM_NL80211_ATTR * sizeof(*tb), GFP_KERNEL); nl80211_parse_wowlan_nd()
9099 if (!tb) nl80211_parse_wowlan_nd()
9107 err = nla_parse(tb, NL80211_ATTR_MAX, nl80211_parse_wowlan_nd()
9113 trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb); nl80211_parse_wowlan_nd()
9119 kfree(tb); nl80211_parse_wowlan_nd()
9126 struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; nl80211_set_wowlan() local
9143 err = nla_parse(tb, MAX_NL80211_WOWLAN_TRIG, nl80211_set_wowlan()
9150 if (tb[NL80211_WOWLAN_TRIG_ANY]) { nl80211_set_wowlan()
9156 if (tb[NL80211_WOWLAN_TRIG_DISCONNECT]) { nl80211_set_wowlan()
9163 if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) { nl80211_set_wowlan()
9170 if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED]) nl80211_set_wowlan()
9173 if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE]) { nl80211_set_wowlan()
9180 if (tb[NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST]) { nl80211_set_wowlan()
9187 if (tb[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE]) { nl80211_set_wowlan()
9194 if (tb[NL80211_WOWLAN_TRIG_RFKILL_RELEASE]) { nl80211_set_wowlan()
9201 if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) { nl80211_set_wowlan()
9209 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], nl80211_set_wowlan()
9224 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], nla_for_each_nested()
9269 if (tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION]) {
9272 rdev, tb[NL80211_WOWLAN_TRIG_TCP_CONNECTION],
9278 if (tb[NL80211_WOWLAN_TRIG_NET_DETECT]) {
9281 rdev, wowlan, tb[NL80211_WOWLAN_TRIG_NET_DETECT],
9434 struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat; nl80211_parse_coalesce_rule() local
9438 err = nla_parse(tb, NL80211_ATTR_COALESCE_RULE_MAX, nla_data(rule), nl80211_parse_coalesce_rule()
9443 if (tb[NL80211_ATTR_COALESCE_RULE_DELAY]) nl80211_parse_coalesce_rule()
9445 nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]); nl80211_parse_coalesce_rule()
9449 if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION]) nl80211_parse_coalesce_rule()
9451 nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]); nl80211_parse_coalesce_rule()
9456 if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN]) nl80211_parse_coalesce_rule()
9459 nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], nl80211_parse_coalesce_rule()
9473 nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN], nla_for_each_nested()
9590 struct nlattr *tb[NUM_NL80211_REKEY_DATA]; nl80211_set_rekey_data() local
9597 err = nla_parse(tb, MAX_NL80211_REKEY_DATA, nl80211_set_rekey_data()
9604 if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) nl80211_set_rekey_data()
9606 if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) nl80211_set_rekey_data()
9608 if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) nl80211_set_rekey_data()
9611 rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]); nl80211_set_rekey_data()
9612 rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); nl80211_set_rekey_data()
9613 rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]); nl80211_set_rekey_data()
905 nl80211_get_valid_chan(struct wiphy *wiphy, struct nlattr *tb) nl80211_get_valid_chan() argument
/linux-4.1.27/include/linux/iio/common/
H A Dst_sensors.h157 int (*read_byte) (struct st_sensor_transfer_buffer *tb,
159 int (*write_byte) (struct st_sensor_transfer_buffer *tb,
161 int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb,
211 * @tb: Transfer buffers and mutex used by I/O operations.
234 struct st_sensor_transfer_buffer tb; member in struct:st_sensor_data
/linux-4.1.27/arch/um/kernel/
H A Ddyn.lds.S41 .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
42 .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
115 .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
H A Duml.lds.S90 .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
/linux-4.1.27/include/crypto/
H A Dalgapi.h53 struct crypto_instance *(*alloc)(struct rtattr **tb);
55 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
159 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
160 int crypto_check_attr_type(struct rtattr **tb, u32 type);
381 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, crypto_get_attr_alg() argument
384 return crypto_attr_alg(tb[1], type, mask); crypto_get_attr_alg()
/linux-4.1.27/drivers/net/bonding/
H A Dbond_netlink.c103 static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) bond_validate() argument
105 if (tb[IFLA_ADDRESS]) { bond_validate()
106 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) bond_validate()
108 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) bond_validate()
116 struct nlattr *tb[], struct nlattr *data[]) bond_slave_changelink()
142 struct nlattr *tb[], struct nlattr *data[]) bond_changelink()
386 struct nlattr *tb[], struct nlattr *data[]) bond_newlink()
390 err = bond_changelink(bond_dev, tb, data); bond_newlink()
114 bond_slave_changelink(struct net_device *bond_dev, struct net_device *slave_dev, struct nlattr *tb[], struct nlattr *data[]) bond_slave_changelink() argument
141 bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], struct nlattr *data[]) bond_changelink() argument
385 bond_newlink(struct net *src_net, struct net_device *bond_dev, struct nlattr *tb[], struct nlattr *data[]) bond_newlink() argument
/linux-4.1.27/arch/powerpc/kvm/
H A Dbook3s_64_mmu.c58 if (vcpu->arch.slb[i].tb) kvmppc_mmu_book3s_64_find_slbe()
72 vcpu->arch.slb[i].tb ? 't' : ' ', kvmppc_mmu_book3s_64_find_slbe()
82 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; kvmppc_slb_sid_shift()
146 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; kvmppc_mmu_book3s_64_get_pteg()
249 if (slbe->tb) kvmppc_mmu_book3s_64_xlate()
399 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; kvmppc_mmu_book3s_64_slbmte()
400 slbe->esid = slbe->tb ? esid_1t : esid; kvmppc_mmu_book3s_64_slbmte()
592 if (slb->tb) { kvmppc_mmu_book3s_64_esid_to_vsid()
/linux-4.1.27/arch/sparc/include/asm/
H A Dtlbflush_64.h18 void flush_tsb_user(struct tlb_batch *tb);
/linux-4.1.27/include/crypto/internal/
H A Daead.h59 struct rtattr **tb, u32 type,
/linux-4.1.27/lib/
H A Dnlattr.c168 * nla_parse - Parse a stream of attributes into a tb buffer
169 * @tb: destination array with maxtype+1 elements
176 * the tb array accessible via the attribute type. Attributes with a type
182 int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, nla_parse() argument
188 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_parse()
200 tb[type] = (struct nlattr *)nla; nla_for_each_attr()
/linux-4.1.27/net/ieee802154/
H A Dnl802154.c327 struct nlattr **tb = nl802154_fam.attrbuf; nl802154_dump_wpan_phy_parse() local
329 tb, nl802154_fam.maxattr, nl802154_policy); nl802154_dump_wpan_phy_parse()
337 if (tb[NL802154_ATTR_WPAN_PHY]) nl802154_dump_wpan_phy_parse()
338 state->filter_wpan_phy = nla_get_u32(tb[NL802154_ATTR_WPAN_PHY]); nl802154_dump_wpan_phy_parse()
339 if (tb[NL802154_ATTR_WPAN_DEV]) nl802154_dump_wpan_phy_parse()
340 state->filter_wpan_phy = nla_get_u64(tb[NL802154_ATTR_WPAN_DEV]) >> 32; nl802154_dump_wpan_phy_parse()
341 if (tb[NL802154_ATTR_IFINDEX]) { nl802154_dump_wpan_phy_parse()
344 int ifidx = nla_get_u32(tb[NL802154_ATTR_IFINDEX]); nl802154_dump_wpan_phy_parse()

Completed in 7272 milliseconds

12