/linux-4.4.14/fs/reiserfs/ |
D | do_balan.c | 19 static inline void buffer_info_init_left(struct tree_balance *tb, in buffer_info_init_left() argument 22 bi->tb = tb; in buffer_info_init_left() 23 bi->bi_bh = tb->L[0]; in buffer_info_init_left() 24 bi->bi_parent = tb->FL[0]; in buffer_info_init_left() 25 bi->bi_position = get_left_neighbor_position(tb, 0); in buffer_info_init_left() 28 static inline void buffer_info_init_right(struct tree_balance *tb, in buffer_info_init_right() argument 31 bi->tb = tb; in buffer_info_init_right() 32 bi->bi_bh = tb->R[0]; in buffer_info_init_right() 33 bi->bi_parent = tb->FR[0]; in buffer_info_init_right() 34 bi->bi_position = get_right_neighbor_position(tb, 0); in buffer_info_init_right() [all …]
|
D | fix_node.c | 51 static void create_virtual_node(struct tree_balance *tb, int h) in create_virtual_node() argument 54 struct virtual_node *vn = tb->tb_vn; in create_virtual_node() 58 Sh = PATH_H_PBUFFER(tb->tb_path, h); in create_virtual_node() 62 MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h]; in create_virtual_node() 76 vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1); in create_virtual_node() 115 op_create_vi(vn, vi, is_affected, tb->insert_size[0]); in create_virtual_node() 116 if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr) in create_virtual_node() 117 reiserfs_panic(tb->tb_sb, "vs-8030", in create_virtual_node() 125 vn->vn_vi[new_num].vi_item_len += tb->insert_size[0]; in create_virtual_node() 137 vi->vi_item_len = tb->insert_size[0]; in create_virtual_node() [all …]
|
D | ibalance.c | 28 struct tree_balance *tb, in internal_define_dest_src_infos() argument 41 src_bi->tb = tb; in internal_define_dest_src_infos() 42 src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h); in internal_define_dest_src_infos() 43 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h); in internal_define_dest_src_infos() 44 src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); in internal_define_dest_src_infos() 45 dest_bi->tb = tb; in internal_define_dest_src_infos() 46 dest_bi->bi_bh = tb->L[h]; in internal_define_dest_src_infos() 47 dest_bi->bi_parent = tb->FL[h]; in internal_define_dest_src_infos() 48 dest_bi->bi_position = get_left_neighbor_position(tb, h); in internal_define_dest_src_infos() 49 *d_key = tb->lkey[h]; in internal_define_dest_src_infos() [all …]
|
D | prints.c | 622 void store_print_tb(struct tree_balance *tb) in store_print_tb() argument 628 if (!tb) in store_print_tb() 636 REISERFS_SB(tb->tb_sb)->s_do_balance, in store_print_tb() 637 tb->tb_mode, PATH_LAST_POSITION(tb->tb_path), in store_print_tb() 638 tb->tb_path->pos_in_item); in store_print_tb() 640 for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) { in store_print_tb() 641 if (PATH_H_PATH_OFFSET(tb->tb_path, h) <= in store_print_tb() 642 tb->tb_path->path_length in store_print_tb() 643 && PATH_H_PATH_OFFSET(tb->tb_path, in store_print_tb() 645 tbSh = PATH_H_PBUFFER(tb->tb_path, h); in store_print_tb() [all …]
|
D | lbalance.c | 392 do_balance_mark_leaf_dirty(dest_bi->tb, dest, 0); in leaf_copy_items_entirely() 405 do_balance_mark_internal_dirty(dest_bi->tb, dest_bi->bi_parent, in leaf_copy_items_entirely() 629 static void leaf_define_dest_src_infos(int shift_mode, struct tree_balance *tb, in leaf_define_dest_src_infos() argument 641 src_bi->tb = tb; in leaf_define_dest_src_infos() 642 src_bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path); in leaf_define_dest_src_infos() 643 src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0); in leaf_define_dest_src_infos() 646 src_bi->bi_position = PATH_H_B_ITEM_ORDER(tb->tb_path, 0); in leaf_define_dest_src_infos() 647 dest_bi->tb = tb; in leaf_define_dest_src_infos() 648 dest_bi->bi_bh = tb->L[0]; in leaf_define_dest_src_infos() 649 dest_bi->bi_parent = tb->FL[0]; in leaf_define_dest_src_infos() [all …]
|
D | stree.c | 1146 static int calc_deleted_bytes_number(struct tree_balance *tb, char mode) in calc_deleted_bytes_number() argument 1149 struct item_head *p_le_ih = tp_item_head(tb->tb_path); in calc_deleted_bytes_number() 1156 M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0]; in calc_deleted_bytes_number() 1168 (PATH_PLAST_BUFFER(tb->tb_path)->b_size); in calc_deleted_bytes_number() 1173 struct tree_balance *tb, in init_tb_struct() argument 1180 memset(tb, '\0', sizeof(struct tree_balance)); in init_tb_struct() 1181 tb->transaction_handle = th; in init_tb_struct() 1182 tb->tb_sb = sb; in init_tb_struct() 1183 tb->tb_path = path; in init_tb_struct() 1186 tb->insert_size[0] = size; in init_tb_struct() [all …]
|
/linux-4.4.14/drivers/thunderbolt/ |
D | tb.c | 52 sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port)); in tb_scan_port() 63 static void tb_free_invalid_tunnels(struct tb *tb) in tb_free_invalid_tunnels() argument 67 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) in tb_free_invalid_tunnels() 143 static void tb_activate_pcie_devices(struct tb *tb) in tb_activate_pcie_devices() argument 153 for (i = 1; i <= tb->root_switch->config.max_port_number; i++) { in tb_activate_pcie_devices() 154 if (tb_is_upstream_port(&tb->root_switch->ports[i])) in tb_activate_pcie_devices() 156 if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT) in tb_activate_pcie_devices() 158 if (!tb->root_switch->ports[i].remote) in tb_activate_pcie_devices() 160 sw = tb->root_switch->ports[i].remote->sw; in tb_activate_pcie_devices() 179 down_port = tb_find_unused_down_port(tb->root_switch); in tb_activate_pcie_devices() [all …]
|
D | tb.h | 21 struct tb *tb; member 80 struct tb *tb; member 99 struct tb { struct 143 return tb_cfg_read(sw->tb->ctl, in tb_sw_read() 155 return tb_cfg_write(sw->tb->ctl, in tb_sw_write() 167 return tb_cfg_read(port->sw->tb->ctl, in tb_port_read() 179 return tb_cfg_write(port->sw->tb->ctl, in tb_port_write() 188 #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg) argument 189 #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg) argument 190 #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg) argument [all …]
|
D | switch.c | 43 static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) in tb_dump_port() argument 45 tb_info(tb, in tb_dump_port() 50 tb_info(tb, " Max hop id (in/out): %d/%d\n", in tb_dump_port() 52 tb_info(tb, " Max counters: %d\n", port->max_counters); in tb_dump_port() 53 tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits); in tb_dump_port() 203 tb_dump_port(port->sw->tb, &port->config); in tb_init_port() 212 static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) in tb_dump_switch() argument 214 tb_info(tb, in tb_dump_switch() 218 tb_info(tb, " Max Port Number: %d\n", sw->max_port_number); in tb_dump_switch() 219 tb_info(tb, " Config:\n"); in tb_dump_switch() [all …]
|
D | path.c | 35 struct tb_path *tb_path_alloc(struct tb *tb, int num_hops) in tb_path_alloc() argument 45 path->tb = tb; in tb_path_alloc() 56 tb_WARN(path->tb, "trying to free an activated path\n") in tb_path_free() 93 tb_WARN(path->tb, "trying to deactivate an inactive path\n"); in tb_path_deactivate() 96 tb_info(path->tb, in tb_path_deactivate() 120 tb_WARN(path->tb, "trying to activate already activated path\n"); in tb_path_activate() 124 tb_info(path->tb, in tb_path_activate() 212 tb_info(path->tb, "path activation complete\n"); in tb_path_activate() 215 tb_WARN(path->tb, "path activation failed\n"); in tb_path_activate()
|
D | tunnel_pci.h | 13 struct tb *tb; member 21 struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
|
D | tunnel_pci.c | 16 level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \ 58 struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up, in tb_pci_alloc() argument 64 tunnel->tb = tb; in tb_pci_alloc() 68 tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2); in tb_pci_alloc() 71 tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2); in tb_pci_alloc() 208 list_add(&tunnel->list, &tunnel->tb->tunnel_list); in tb_pci_activate()
|
D | nhi.c | 499 struct tb *tb = pci_get_drvdata(pdev); in nhi_suspend_noirq() local 500 thunderbolt_suspend(tb); in nhi_suspend_noirq() 507 struct tb *tb = pci_get_drvdata(pdev); in nhi_resume_noirq() local 508 thunderbolt_resume(tb); in nhi_resume_noirq() 538 struct tb *tb; in nhi_probe() local 596 tb = thunderbolt_alloc_and_start(nhi); in nhi_probe() 597 if (!tb) { in nhi_probe() 605 pci_set_drvdata(pdev, tb); in nhi_probe() 612 struct tb *tb = pci_get_drvdata(pdev); in nhi_remove() local 613 struct tb_nhi *nhi = tb->nhi; in nhi_remove() [all …]
|
/linux-4.4.14/tools/testing/selftests/powerpc/pmu/ebb/ |
D | trace.c | 17 struct trace_buffer *tb; in trace_buffer_allocate() local 19 if (size < sizeof(*tb)) { in trace_buffer_allocate() 24 tb = mmap(NULL, size, PROT_READ | PROT_WRITE, in trace_buffer_allocate() 26 if (tb == MAP_FAILED) { in trace_buffer_allocate() 31 tb->size = size; in trace_buffer_allocate() 32 tb->tail = tb->data; in trace_buffer_allocate() 33 tb->overflow = false; in trace_buffer_allocate() 35 return tb; in trace_buffer_allocate() 38 static bool trace_check_bounds(struct trace_buffer *tb, void *p) in trace_check_bounds() argument 40 return p < ((void *)tb + tb->size); in trace_check_bounds() [all …]
|
D | trace.h | 33 int trace_log_reg(struct trace_buffer *tb, u64 reg, u64 value); 34 int trace_log_counter(struct trace_buffer *tb, u64 value); 35 int trace_log_string(struct trace_buffer *tb, char *str); 36 int trace_log_indent(struct trace_buffer *tb); 37 int trace_log_outdent(struct trace_buffer *tb); 38 void trace_buffer_print(struct trace_buffer *tb); 39 void trace_print_location(struct trace_buffer *tb);
|
/linux-4.4.14/drivers/iio/common/st_sensors/ |
D | st_sensors_spi.c | 29 static int st_sensors_spi_read(struct st_sensor_transfer_buffer *tb, in st_sensors_spi_read() argument 36 .tx_buf = tb->tx_buf, in st_sensors_spi_read() 41 .rx_buf = tb->rx_buf, in st_sensors_spi_read() 47 mutex_lock(&tb->buf_lock); in st_sensors_spi_read() 49 tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_MULTIREAD; in st_sensors_spi_read() 51 tb->tx_buf[0] = reg_addr | ST_SENSORS_SPI_READ; in st_sensors_spi_read() 57 memcpy(data, tb->rx_buf, len); in st_sensors_spi_read() 58 mutex_unlock(&tb->buf_lock); in st_sensors_spi_read() 62 mutex_unlock(&tb->buf_lock); in st_sensors_spi_read() 66 static int st_sensors_spi_read_byte(struct st_sensor_transfer_buffer *tb, in st_sensors_spi_read_byte() argument [all …]
|
D | st_sensors_buffer.c | 48 len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, in st_sensors_get_buffer_element() 53 len = sdata->tf->read_multiple_byte(&sdata->tb, in st_sensors_get_buffer_element() 65 len = sdata->tf->read_multiple_byte(&sdata->tb, in st_sensors_get_buffer_element() 85 len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, in st_sensors_get_buffer_element()
|
D | st_sensors_i2c.c | 29 static int st_sensors_i2c_read_byte(struct st_sensor_transfer_buffer *tb, in st_sensors_i2c_read_byte() argument 45 struct st_sensor_transfer_buffer *tb, struct device *dev, in st_sensors_i2c_read_multiple_byte() argument 55 static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb, in st_sensors_i2c_write_byte() argument
|
D | st_sensors_core.c | 36 err = sdata->tf->read_byte(&sdata->tb, sdata->dev, reg_addr, &new_data); in st_sensors_write_data_with_mask() 41 err = sdata->tf->write_byte(&sdata->tb, sdata->dev, reg_addr, new_data); in st_sensors_write_data_with_mask() 56 return sdata->tf->write_byte(&sdata->tb, sdata->dev, in st_sensors_debugfs_reg_access() 59 err = sdata->tf->read_byte(&sdata->tb, sdata->dev, (u8)reg, &readdata); in st_sensors_debugfs_reg_access() 453 err = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev, in st_sensors_read_axis_data() 527 err = sdata->tf->read_byte(&sdata->tb, sdata->dev, in st_sensors_check_device_support()
|
/linux-4.4.14/arch/sparc/mm/ |
D | tlb.c | 25 struct tlb_batch *tb = &get_cpu_var(tlb_batch); in flush_tlb_pending() local 26 struct mm_struct *mm = tb->mm; in flush_tlb_pending() 28 if (!tb->tlb_nr) in flush_tlb_pending() 31 flush_tsb_user(tb); in flush_tlb_pending() 34 if (tb->tlb_nr == 1) { in flush_tlb_pending() 35 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending() 38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending() 39 &tb->vaddrs[0]); in flush_tlb_pending() 41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending() 42 tb->tlb_nr, &tb->vaddrs[0]); in flush_tlb_pending() [all …]
|
D | tsb.c | 63 static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, in __flush_tsb_one() argument 68 for (i = 0; i < tb->tlb_nr; i++) in __flush_tsb_one() 69 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); in __flush_tsb_one() 72 void flush_tsb_user(struct tlb_batch *tb) in flush_tsb_user() argument 74 struct mm_struct *mm = tb->mm; in flush_tsb_user() 79 if (!tb->huge) { in flush_tsb_user() 84 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); in flush_tsb_user() 87 if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user() 92 __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); in flush_tsb_user()
|
/linux-4.4.14/net/netfilter/ |
D | nfnetlink_cthelper.c | 71 struct nlattr *tb[NFCTH_TUPLE_MAX+1]; in nfnl_cthelper_parse_tuple() local 73 err = nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, nfnl_cthelper_tuple_pol); in nfnl_cthelper_parse_tuple() 77 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) in nfnl_cthelper_parse_tuple() 83 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); in nfnl_cthelper_parse_tuple() 84 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); in nfnl_cthelper_parse_tuple() 131 struct nlattr *tb[NFCTH_POLICY_MAX+1]; in nfnl_cthelper_expect_policy() local 133 err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, nfnl_cthelper_expect_pol); in nfnl_cthelper_expect_policy() 137 if (!tb[NFCTH_POLICY_NAME] || in nfnl_cthelper_expect_policy() 138 !tb[NFCTH_POLICY_EXPECT_MAX] || in nfnl_cthelper_expect_policy() 139 !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) in nfnl_cthelper_expect_policy() [all …]
|
D | nft_nat.c | 118 const struct nlattr * const tb[]) in nft_nat_init() argument 125 if (tb[NFTA_NAT_TYPE] == NULL || in nft_nat_init() 126 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && in nft_nat_init() 127 tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) in nft_nat_init() 130 switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { in nft_nat_init() 145 if (tb[NFTA_NAT_FAMILY] == NULL) in nft_nat_init() 148 family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY])); in nft_nat_init() 164 if (tb[NFTA_NAT_REG_ADDR_MIN]) { in nft_nat_init() 166 nft_parse_register(tb[NFTA_NAT_REG_ADDR_MIN]); in nft_nat_init() 171 if (tb[NFTA_NAT_REG_ADDR_MAX]) { in nft_nat_init() [all …]
|
D | nft_ct.c | 224 const struct nlattr * const tb[]) in nft_ct_get_init() argument 230 priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY])); in nft_ct_get_init() 233 if (tb[NFTA_CT_DIRECTION] != NULL) in nft_ct_get_init() 246 if (tb[NFTA_CT_DIRECTION] != NULL) in nft_ct_get_init() 252 if (tb[NFTA_CT_DIRECTION] != NULL) in nft_ct_get_init() 258 if (tb[NFTA_CT_DIRECTION] != NULL) in nft_ct_get_init() 265 if (tb[NFTA_CT_DIRECTION] == NULL) in nft_ct_get_init() 271 if (tb[NFTA_CT_DIRECTION] == NULL) in nft_ct_get_init() 290 if (tb[NFTA_CT_DIRECTION] == NULL) in nft_ct_get_init() 298 if (tb[NFTA_CT_DIRECTION] != NULL) { in nft_ct_get_init() [all …]
|
D | nft_log.c | 50 const struct nlattr * const tb[]) in nft_log_init() argument 57 nla = tb[NFTA_LOG_PREFIX]; in nft_log_init() 68 if (tb[NFTA_LOG_LEVEL] != NULL && in nft_log_init() 69 tb[NFTA_LOG_GROUP] != NULL) in nft_log_init() 71 if (tb[NFTA_LOG_GROUP] != NULL) in nft_log_init() 76 if (tb[NFTA_LOG_LEVEL] != NULL) { in nft_log_init() 78 ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL])); in nft_log_init() 82 if (tb[NFTA_LOG_FLAGS] != NULL) { in nft_log_init() 84 ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS])); in nft_log_init() 88 li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP])); in nft_log_init() [all …]
|
D | nft_bitwise.c | 51 const struct nlattr * const tb[]) in nft_bitwise_init() argument 57 if (tb[NFTA_BITWISE_SREG] == NULL || in nft_bitwise_init() 58 tb[NFTA_BITWISE_DREG] == NULL || in nft_bitwise_init() 59 tb[NFTA_BITWISE_LEN] == NULL || in nft_bitwise_init() 60 tb[NFTA_BITWISE_MASK] == NULL || in nft_bitwise_init() 61 tb[NFTA_BITWISE_XOR] == NULL) in nft_bitwise_init() 64 priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN])); in nft_bitwise_init() 65 priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]); in nft_bitwise_init() 70 priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]); in nft_bitwise_init() 77 tb[NFTA_BITWISE_MASK]); in nft_bitwise_init() [all …]
|
D | nft_limit.c | 55 const struct nlattr * const tb[]) in nft_limit_init() argument 59 if (tb[NFTA_LIMIT_RATE] == NULL || in nft_limit_init() 60 tb[NFTA_LIMIT_UNIT] == NULL) in nft_limit_init() 63 limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE])); in nft_limit_init() 64 unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT])); in nft_limit_init() 70 if (tb[NFTA_LIMIT_BURST]) { in nft_limit_init() 73 limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); in nft_limit_init() 127 const struct nlattr * const tb[]) in nft_limit_pkts_init() argument 132 err = nft_limit_init(&priv->limit, tb); in nft_limit_pkts_init() 169 const struct nlattr * const tb[]) in nft_limit_pkt_bytes_init() argument [all …]
|
D | nft_payload.c | 118 const struct nlattr * const tb[]) in nft_payload_init() argument 122 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); in nft_payload_init() 123 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET])); in nft_payload_init() 124 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN])); in nft_payload_init() 125 priv->dreg = nft_parse_register(tb[NFTA_PAYLOAD_DREG]); in nft_payload_init() 165 const struct nlattr * const tb[]) in nft_payload_select_ops() argument 170 if (tb[NFTA_PAYLOAD_DREG] == NULL || in nft_payload_select_ops() 171 tb[NFTA_PAYLOAD_BASE] == NULL || in nft_payload_select_ops() 172 tb[NFTA_PAYLOAD_OFFSET] == NULL || in nft_payload_select_ops() 173 tb[NFTA_PAYLOAD_LEN] == NULL) in nft_payload_select_ops() [all …]
|
D | nft_cmp.c | 72 const struct nlattr * const tb[]) in nft_cmp_init() argument 79 tb[NFTA_CMP_DATA]); in nft_cmp_init() 82 priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); in nft_cmp_init() 87 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP])); in nft_cmp_init() 121 const struct nlattr * const tb[]) in nft_cmp_fast_init() argument 130 tb[NFTA_CMP_DATA]); in nft_cmp_fast_init() 133 priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); in nft_cmp_fast_init() 175 nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) in nft_cmp_select_ops() argument 182 if (tb[NFTA_CMP_SREG] == NULL || in nft_cmp_select_ops() 183 tb[NFTA_CMP_OP] == NULL || in nft_cmp_select_ops() [all …]
|
D | nft_byteorder.c | 79 const struct nlattr * const tb[]) in nft_byteorder_init() argument 84 if (tb[NFTA_BYTEORDER_SREG] == NULL || in nft_byteorder_init() 85 tb[NFTA_BYTEORDER_DREG] == NULL || in nft_byteorder_init() 86 tb[NFTA_BYTEORDER_LEN] == NULL || in nft_byteorder_init() 87 tb[NFTA_BYTEORDER_SIZE] == NULL || in nft_byteorder_init() 88 tb[NFTA_BYTEORDER_OP] == NULL) in nft_byteorder_init() 91 priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP])); in nft_byteorder_init() 100 priv->size = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SIZE])); in nft_byteorder_init() 109 priv->sreg = nft_parse_register(tb[NFTA_BYTEORDER_SREG]); in nft_byteorder_init() 110 priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN])); in nft_byteorder_init() [all …]
|
D | nft_exthdr.c | 59 const struct nlattr * const tb[]) in nft_exthdr_init() argument 63 if (tb[NFTA_EXTHDR_DREG] == NULL || in nft_exthdr_init() 64 tb[NFTA_EXTHDR_TYPE] == NULL || in nft_exthdr_init() 65 tb[NFTA_EXTHDR_OFFSET] == NULL || in nft_exthdr_init() 66 tb[NFTA_EXTHDR_LEN] == NULL) in nft_exthdr_init() 69 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); in nft_exthdr_init() 70 priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); in nft_exthdr_init() 71 priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); in nft_exthdr_init() 72 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); in nft_exthdr_init()
|
D | nfnetlink_acct.c | 51 const struct nlmsghdr *nlh, const struct nlattr * const tb[]) in nfnl_acct_new() argument 59 if (!tb[NFACCT_NAME]) in nfnl_acct_new() 62 acct_name = nla_data(tb[NFACCT_NAME]); in nfnl_acct_new() 92 if (tb[NFACCT_FLAGS]) { in nfnl_acct_new() 93 flags = ntohl(nla_get_be32(tb[NFACCT_FLAGS])); in nfnl_acct_new() 111 *quota = be64_to_cpu(nla_get_be64(tb[NFACCT_QUOTA])); in nfnl_acct_new() 115 strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); in nfnl_acct_new() 117 if (tb[NFACCT_BYTES]) { in nfnl_acct_new() 119 be64_to_cpu(nla_get_be64(tb[NFACCT_BYTES]))); in nfnl_acct_new() 121 if (tb[NFACCT_PKTS]) { in nfnl_acct_new() [all …]
|
D | nft_dynset.c | 103 const struct nlattr * const tb[]) in nft_dynset_init() argument 110 if (tb[NFTA_DYNSET_SET_NAME] == NULL || in nft_dynset_init() 111 tb[NFTA_DYNSET_OP] == NULL || in nft_dynset_init() 112 tb[NFTA_DYNSET_SREG_KEY] == NULL) in nft_dynset_init() 115 set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME]); in nft_dynset_init() 117 if (tb[NFTA_DYNSET_SET_ID]) in nft_dynset_init() 119 tb[NFTA_DYNSET_SET_ID]); in nft_dynset_init() 127 priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP])); in nft_dynset_init() 140 if (tb[NFTA_DYNSET_TIMEOUT] != NULL) { in nft_dynset_init() 143 timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT])); in nft_dynset_init() [all …]
|
D | nft_redir.c | 44 const struct nlattr * const tb[]) in nft_redir_init() argument 55 if (tb[NFTA_REDIR_REG_PROTO_MIN]) { in nft_redir_init() 57 nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]); in nft_redir_init() 63 if (tb[NFTA_REDIR_REG_PROTO_MAX]) { in nft_redir_init() 65 nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MAX]); in nft_redir_init() 76 if (tb[NFTA_REDIR_FLAGS]) { in nft_redir_init() 77 priv->flags = ntohl(nla_get_be32(tb[NFTA_REDIR_FLAGS])); in nft_redir_init()
|
D | nft_lookup.c | 54 const struct nlattr * const tb[]) in nft_lookup_init() argument 60 if (tb[NFTA_LOOKUP_SET] == NULL || in nft_lookup_init() 61 tb[NFTA_LOOKUP_SREG] == NULL) in nft_lookup_init() 64 set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]); in nft_lookup_init() 66 if (tb[NFTA_LOOKUP_SET_ID]) { in nft_lookup_init() 68 tb[NFTA_LOOKUP_SET_ID]); in nft_lookup_init() 77 priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]); in nft_lookup_init() 82 if (tb[NFTA_LOOKUP_DREG] != NULL) { in nft_lookup_init() 86 priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]); in nft_lookup_init()
|
D | nft_compat.c | 184 struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1]; in nft_parse_compat() local 188 err = nla_parse_nested(tb, NFTA_RULE_COMPAT_MAX, attr, in nft_parse_compat() 193 if (!tb[NFTA_RULE_COMPAT_PROTO] || !tb[NFTA_RULE_COMPAT_FLAGS]) in nft_parse_compat() 196 flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS])); in nft_parse_compat() 202 *proto = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO])); in nft_parse_compat() 208 const struct nlattr * const tb[]) in nft_target_init() argument 213 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); in nft_target_init() 223 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); in nft_target_init() 396 const struct nlattr * const tb[]) in nft_match_init() argument 401 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); in nft_match_init() [all …]
|
D | nft_queue.c | 65 const struct nlattr * const tb[]) in nft_queue_init() argument 69 if (tb[NFTA_QUEUE_NUM] == NULL) in nft_queue_init() 73 priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM])); in nft_queue_init() 75 if (tb[NFTA_QUEUE_TOTAL] != NULL) in nft_queue_init() 76 priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL])); in nft_queue_init() 77 if (tb[NFTA_QUEUE_FLAGS] != NULL) { in nft_queue_init() 78 priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS])); in nft_queue_init()
|
D | nft_meta.c | 224 const struct nlattr * const tb[]) in nft_meta_get_init() argument 229 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); in nft_meta_get_init() 268 priv->dreg = nft_parse_register(tb[NFTA_META_DREG]); in nft_meta_get_init() 276 const struct nlattr * const tb[]) in nft_meta_set_init() argument 282 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); in nft_meta_set_init() 295 priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); in nft_meta_set_init() 356 const struct nlattr * const tb[]) in nft_meta_select_ops() argument 358 if (tb[NFTA_META_KEY] == NULL) in nft_meta_select_ops() 361 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG]) in nft_meta_select_ops() 364 if (tb[NFTA_META_DREG]) in nft_meta_select_ops() [all …]
|
D | nft_reject.c | 31 const struct nlattr * const tb[]) in nft_reject_init() argument 35 if (tb[NFTA_REJECT_TYPE] == NULL) in nft_reject_init() 38 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); in nft_reject_init() 41 if (tb[NFTA_REJECT_ICMP_CODE] == NULL) in nft_reject_init() 43 priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); in nft_reject_init()
|
D | nf_nat_proto_common.c | 99 int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], in nf_nat_l4proto_nlattr_to_range() argument 102 if (tb[CTA_PROTONAT_PORT_MIN]) { in nf_nat_l4proto_nlattr_to_range() 103 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]); in nf_nat_l4proto_nlattr_to_range() 107 if (tb[CTA_PROTONAT_PORT_MAX]) { in nf_nat_l4proto_nlattr_to_range() 108 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]); in nf_nat_l4proto_nlattr_to_range()
|
D | nft_reject_inet.c | 66 const struct nlattr * const tb[]) in nft_reject_inet_init() argument 71 if (tb[NFTA_REJECT_TYPE] == NULL) in nft_reject_inet_init() 74 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); in nft_reject_inet_init() 78 if (tb[NFTA_REJECT_ICMP_CODE] == NULL) in nft_reject_inet_init() 81 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); in nft_reject_inet_init()
|
D | nft_immediate.c | 42 const struct nlattr * const tb[]) in nft_immediate_init() argument 48 if (tb[NFTA_IMMEDIATE_DREG] == NULL || in nft_immediate_init() 49 tb[NFTA_IMMEDIATE_DATA] == NULL) in nft_immediate_init() 53 tb[NFTA_IMMEDIATE_DATA]); in nft_immediate_init() 58 priv->dreg = nft_parse_register(tb[NFTA_IMMEDIATE_DREG]); in nft_immediate_init()
|
D | nf_conntrack_proto_tcp.c | 1232 struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1]; in nlattr_to_tcp() local 1240 err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, pattr, tcp_nla_policy); in nlattr_to_tcp() 1244 if (tb[CTA_PROTOINFO_TCP_STATE] && in nlattr_to_tcp() 1245 nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX) in nlattr_to_tcp() 1249 if (tb[CTA_PROTOINFO_TCP_STATE]) in nlattr_to_tcp() 1250 ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]); in nlattr_to_tcp() 1252 if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) { in nlattr_to_tcp() 1254 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]); in nlattr_to_tcp() 1259 if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) { in nlattr_to_tcp() 1261 nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]); in nlattr_to_tcp() [all …]
|
D | nft_masq.c | 41 const struct nlattr * const tb[]) in nft_masq_init() argument 50 if (tb[NFTA_MASQ_FLAGS] == NULL) in nft_masq_init() 53 priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); in nft_masq_init()
|
D | nft_counter.c | 95 const struct nlattr * const tb[]) in nft_counter_init() argument 107 if (tb[NFTA_COUNTER_PACKETS]) { in nft_counter_init() 109 be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); in nft_counter_init() 111 if (tb[NFTA_COUNTER_BYTES]) { in nft_counter_init() 113 be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); in nft_counter_init()
|
D | nf_conntrack_proto_dccp.c | 668 struct nlattr *tb[CTA_PROTOINFO_DCCP_MAX + 1]; in nlattr_to_dccp() local 674 err = nla_parse_nested(tb, CTA_PROTOINFO_DCCP_MAX, attr, in nlattr_to_dccp() 679 if (!tb[CTA_PROTOINFO_DCCP_STATE] || in nlattr_to_dccp() 680 !tb[CTA_PROTOINFO_DCCP_ROLE] || in nlattr_to_dccp() 681 nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) > CT_DCCP_ROLE_MAX || in nlattr_to_dccp() 682 nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]) >= CT_DCCP_IGNORE) { in nlattr_to_dccp() 687 ct->proto.dccp.state = nla_get_u8(tb[CTA_PROTOINFO_DCCP_STATE]); in nlattr_to_dccp() 688 if (nla_get_u8(tb[CTA_PROTOINFO_DCCP_ROLE]) == CT_DCCP_ROLE_CLIENT) { in nlattr_to_dccp() 695 if (tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ]) { in nlattr_to_dccp() 697 be64_to_cpu(nla_get_be64(tb[CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ])); in nlattr_to_dccp() [all …]
|
D | nf_conntrack_proto_udp.c | 161 static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], in udp_timeout_nlattr_to_obj() argument 171 if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) { in udp_timeout_nlattr_to_obj() 173 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ; in udp_timeout_nlattr_to_obj() 175 if (tb[CTA_TIMEOUT_UDP_REPLIED]) { in udp_timeout_nlattr_to_obj() 177 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ; in udp_timeout_nlattr_to_obj()
|
D | nf_conntrack_proto_udplite.c | 176 static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], in udplite_timeout_nlattr_to_obj() argument 186 if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) { in udplite_timeout_nlattr_to_obj() 188 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ; in udplite_timeout_nlattr_to_obj() 190 if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) { in udplite_timeout_nlattr_to_obj() 192 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ; in udplite_timeout_nlattr_to_obj()
|
D | nf_conntrack_proto_generic.c | 107 static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], in generic_timeout_nlattr_to_obj() argument 113 if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT]) in generic_timeout_nlattr_to_obj() 115 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ; in generic_timeout_nlattr_to_obj()
|
D | nf_conntrack_proto_sctp.c | 563 struct nlattr *tb[CTA_PROTOINFO_SCTP_MAX+1]; in nlattr_to_sctp() local 570 err = nla_parse_nested(tb, in nlattr_to_sctp() 577 if (!tb[CTA_PROTOINFO_SCTP_STATE] || in nlattr_to_sctp() 578 !tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] || in nlattr_to_sctp() 579 !tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]) in nlattr_to_sctp() 583 ct->proto.sctp.state = nla_get_u8(tb[CTA_PROTOINFO_SCTP_STATE]); in nlattr_to_sctp() 585 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]); in nlattr_to_sctp() 587 nla_get_be32(tb[CTA_PROTOINFO_SCTP_VTAG_REPLY]); in nlattr_to_sctp() 605 static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], in sctp_timeout_nlattr_to_obj() argument 618 if (tb[i]) { in sctp_timeout_nlattr_to_obj() [all …]
|
D | nf_conntrack_netlink.c | 897 struct nlattr *tb[CTA_IP_MAX+1]; in ctnetlink_parse_tuple_ip() local 901 ret = nla_parse_nested(tb, CTA_IP_MAX, attr, NULL); in ctnetlink_parse_tuple_ip() 912 ret = l3proto->nlattr_to_tuple(tb, tuple); in ctnetlink_parse_tuple_ip() 928 struct nlattr *tb[CTA_PROTO_MAX+1]; in ctnetlink_parse_tuple_proto() local 932 ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy); in ctnetlink_parse_tuple_proto() 936 if (!tb[CTA_PROTO_NUM]) in ctnetlink_parse_tuple_proto() 938 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); in ctnetlink_parse_tuple_proto() 947 ret = l4proto->nlattr_to_tuple(tb, tuple); in ctnetlink_parse_tuple_proto() 1004 struct nlattr *tb[CTA_TUPLE_MAX+1]; in ctnetlink_parse_tuple() local 1009 err = nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy); in ctnetlink_parse_tuple() [all …]
|
D | nf_conntrack_proto_gre.c | 306 static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], in gre_timeout_nlattr_to_obj() argument 316 if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) { in gre_timeout_nlattr_to_obj() 318 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ; in gre_timeout_nlattr_to_obj() 320 if (tb[CTA_TIMEOUT_GRE_REPLIED]) { in gre_timeout_nlattr_to_obj() 322 ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ; in gre_timeout_nlattr_to_obj()
|
D | nf_nat_core.c | 732 struct nlattr *tb[CTA_PROTONAT_MAX+1]; in nfnetlink_parse_nat_proto() local 736 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); in nfnetlink_parse_nat_proto() 742 err = l4proto->nlattr_to_range(tb, range); in nfnetlink_parse_nat_proto() 760 struct nlattr *tb[CTA_NAT_MAX+1]; in nfnetlink_parse_nat() local 765 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); in nfnetlink_parse_nat() 769 err = l3proto->nlattr_to_range(tb, range); in nfnetlink_parse_nat() 773 if (!tb[CTA_NAT_PROTO]) in nfnetlink_parse_nat() 776 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); in nfnetlink_parse_nat()
|
/linux-4.4.14/drivers/target/ |
D | target_core_hba.c | 53 struct target_backend *tb, *old; in transport_backend_register() local 55 tb = kzalloc(sizeof(*tb), GFP_KERNEL); in transport_backend_register() 56 if (!tb) in transport_backend_register() 58 tb->ops = ops; in transport_backend_register() 65 kfree(tb); in transport_backend_register() 69 target_setup_backend_cits(tb); in transport_backend_register() 70 list_add_tail(&tb->list, &backend_list); in transport_backend_register() 81 struct target_backend *tb; in target_backend_unregister() local 84 list_for_each_entry(tb, &backend_list, list) { in target_backend_unregister() 85 if (tb->ops == ops) { in target_backend_unregister() [all …]
|
/linux-4.4.14/net/netfilter/ipset/ |
D | ip_set_hash_ipportip.c | 111 hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipportip4_uadt() argument 122 if (tb[IPSET_ATTR_LINENO]) in hash_ipportip4_uadt() 123 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipportip4_uadt() 125 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_ipportip4_uadt() 126 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_ipportip4_uadt() 127 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO))) in hash_ipportip4_uadt() 130 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); in hash_ipportip4_uadt() 134 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipportip4_uadt() 138 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2); in hash_ipportip4_uadt() 142 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); in hash_ipportip4_uadt() [all …]
|
D | ip_set_hash_ipportnet.c | 163 hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipportnet4_uadt() argument 176 if (tb[IPSET_ATTR_LINENO]) in hash_ipportnet4_uadt() 177 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipportnet4_uadt() 179 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_ipportnet4_uadt() 180 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_ipportnet4_uadt() 181 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || in hash_ipportnet4_uadt() 182 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_ipportnet4_uadt() 185 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_ipportnet4_uadt() 189 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipportnet4_uadt() 193 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); in hash_ipportnet4_uadt() [all …]
|
D | ip_set_hash_ipport.c | 108 hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipport4_uadt() argument 119 if (tb[IPSET_ATTR_LINENO]) in hash_ipport4_uadt() 120 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipport4_uadt() 122 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_ipport4_uadt() 123 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_ipport4_uadt() 124 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO))) in hash_ipport4_uadt() 127 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); in hash_ipport4_uadt() 131 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipport4_uadt() 135 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); in hash_ipport4_uadt() 137 if (tb[IPSET_ATTR_PROTO]) { in hash_ipport4_uadt() [all …]
|
D | ip_set_hash_netportnet.c | 180 hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_netportnet4_uadt() argument 192 if (tb[IPSET_ATTR_LINENO]) in hash_netportnet4_uadt() 193 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_netportnet4_uadt() 196 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_netportnet4_uadt() 197 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_netportnet4_uadt() 198 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || in hash_netportnet4_uadt() 199 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_netportnet4_uadt() 202 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_netportnet4_uadt() 206 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); in hash_netportnet4_uadt() 210 ret = ip_set_get_extensions(set, tb, &ext); in hash_netportnet4_uadt() [all …]
|
D | ip_set_hash_netport.c | 157 hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_netport4_uadt() argument 169 if (tb[IPSET_ATTR_LINENO]) in hash_netport4_uadt() 170 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_netport4_uadt() 172 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_netport4_uadt() 173 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_netport4_uadt() 174 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || in hash_netport4_uadt() 175 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_netport4_uadt() 178 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_netport4_uadt() 182 ret = ip_set_get_extensions(set, tb, &ext); in hash_netport4_uadt() 186 if (tb[IPSET_ATTR_CIDR]) { in hash_netport4_uadt() [all …]
|
D | ip_set_hash_ipmark.c | 101 hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipmark4_uadt() argument 111 if (tb[IPSET_ATTR_LINENO]) in hash_ipmark4_uadt() 112 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipmark4_uadt() 114 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_ipmark4_uadt() 115 !ip_set_attr_netorder(tb, IPSET_ATTR_MARK))) in hash_ipmark4_uadt() 118 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); in hash_ipmark4_uadt() 122 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipmark4_uadt() 126 e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK])); in hash_ipmark4_uadt() 130 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) { in hash_ipmark4_uadt() 136 if (tb[IPSET_ATTR_IP_TO]) { in hash_ipmark4_uadt() [all …]
|
D | ip_set_hash_netnet.c | 165 hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_netnet4_uadt() argument 176 if (tb[IPSET_ATTR_LINENO]) in hash_netnet4_uadt() 177 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_netnet4_uadt() 180 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_netnet4_uadt() 181 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_netnet4_uadt() 184 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_netnet4_uadt() 188 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); in hash_netnet4_uadt() 192 ret = ip_set_get_extensions(set, tb, &ext); in hash_netnet4_uadt() 196 if (tb[IPSET_ATTR_CIDR]) { in hash_netnet4_uadt() 197 e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]); in hash_netnet4_uadt() [all …]
|
D | ip_set_hash_net.c | 139 hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_net4_uadt() argument 149 if (tb[IPSET_ATTR_LINENO]) in hash_net4_uadt() 150 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_net4_uadt() 152 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_net4_uadt() 153 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_net4_uadt() 156 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_net4_uadt() 160 ret = ip_set_get_extensions(set, tb, &ext); in hash_net4_uadt() 164 if (tb[IPSET_ATTR_CIDR]) { in hash_net4_uadt() 165 e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); in hash_net4_uadt() 170 if (tb[IPSET_ATTR_CADT_FLAGS]) { in hash_net4_uadt() [all …]
|
D | ip_set_bitmap_ip.c | 132 bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], in bitmap_ip_uadt() argument 142 if (tb[IPSET_ATTR_LINENO]) in bitmap_ip_uadt() 143 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in bitmap_ip_uadt() 145 if (unlikely(!tb[IPSET_ATTR_IP])) in bitmap_ip_uadt() 148 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in bitmap_ip_uadt() 152 ret = ip_set_get_extensions(set, tb, &ext); in bitmap_ip_uadt() 164 if (tb[IPSET_ATTR_IP_TO]) { in bitmap_ip_uadt() 165 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); in bitmap_ip_uadt() 173 } else if (tb[IPSET_ATTR_CIDR]) { in bitmap_ip_uadt() 174 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); in bitmap_ip_uadt() [all …]
|
D | ip_set_hash_netiface.c | 198 hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_netiface4_uadt() argument 208 if (tb[IPSET_ATTR_LINENO]) in hash_netiface4_uadt() 209 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_netiface4_uadt() 211 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_netiface4_uadt() 212 !tb[IPSET_ATTR_IFACE] || in hash_netiface4_uadt() 213 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_netiface4_uadt() 216 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_netiface4_uadt() 220 ret = ip_set_get_extensions(set, tb, &ext); in hash_netiface4_uadt() 224 if (tb[IPSET_ATTR_CIDR]) { in hash_netiface4_uadt() 225 e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); in hash_netiface4_uadt() [all …]
|
D | ip_set_hash_ip.c | 101 hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ip4_uadt() argument 111 if (tb[IPSET_ATTR_LINENO]) in hash_ip4_uadt() 112 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ip4_uadt() 114 if (unlikely(!tb[IPSET_ATTR_IP])) in hash_ip4_uadt() 117 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_ip4_uadt() 121 ret = ip_set_get_extensions(set, tb, &ext); in hash_ip4_uadt() 135 if (tb[IPSET_ATTR_IP_TO]) { in hash_ip4_uadt() 136 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); in hash_ip4_uadt() 141 } else if (tb[IPSET_ATTR_CIDR]) { in hash_ip4_uadt() 142 u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); in hash_ip4_uadt() [all …]
|
D | ip_set_bitmap_port.c | 129 bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], in bitmap_port_uadt() argument 140 if (tb[IPSET_ATTR_LINENO]) in bitmap_port_uadt() 141 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in bitmap_port_uadt() 143 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in bitmap_port_uadt() 144 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO))) in bitmap_port_uadt() 147 port = ip_set_get_h16(tb[IPSET_ATTR_PORT]); in bitmap_port_uadt() 150 ret = ip_set_get_extensions(set, tb, &ext); in bitmap_port_uadt() 159 if (tb[IPSET_ATTR_PORT_TO]) { in bitmap_port_uadt() 160 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); in bitmap_port_uadt() 224 bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], in bitmap_port_create() argument [all …]
|
D | ip_set_bitmap_ipmac.c | 241 bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], in bitmap_ipmac_uadt() argument 251 if (tb[IPSET_ATTR_LINENO]) in bitmap_ipmac_uadt() 252 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in bitmap_ipmac_uadt() 254 if (unlikely(!tb[IPSET_ATTR_IP])) in bitmap_ipmac_uadt() 257 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in bitmap_ipmac_uadt() 261 ret = ip_set_get_extensions(set, tb, &ext); in bitmap_ipmac_uadt() 269 if (tb[IPSET_ATTR_ETHER]) { in bitmap_ipmac_uadt() 270 memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN); in bitmap_ipmac_uadt() 315 bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], in bitmap_ipmac_create() argument 323 if (unlikely(!tb[IPSET_ATTR_IP] || in bitmap_ipmac_create() [all …]
|
D | ip_set_core.c | 294 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1]; in ip_set_get_ipaddr4() local 298 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy)) in ip_set_get_ipaddr4() 300 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4))) in ip_set_get_ipaddr4() 303 *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]); in ip_set_get_ipaddr4() 311 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1]; in ip_set_get_ipaddr6() local 316 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy)) in ip_set_get_ipaddr6() 318 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6))) in ip_set_get_ipaddr6() 321 memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]), in ip_set_get_ipaddr6() 359 add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[]) in add_extension() argument 363 !!tb[IPSET_ATTR_TIMEOUT]; in add_extension() [all …]
|
D | ip_set_list_set.c | 351 list_set_uadt(struct ip_set *set, struct nlattr *tb[], in list_set_uadt() argument 361 if (tb[IPSET_ATTR_LINENO]) in list_set_uadt() 362 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in list_set_uadt() 364 if (unlikely(!tb[IPSET_ATTR_NAME] || in list_set_uadt() 365 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in list_set_uadt() 368 ret = ip_set_get_extensions(set, tb, &ext); in list_set_uadt() 371 e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s); in list_set_uadt() 380 if (tb[IPSET_ATTR_CADT_FLAGS]) { in list_set_uadt() 381 u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); in list_set_uadt() 386 if (e.before && !tb[IPSET_ATTR_NAMEREF]) { in list_set_uadt() [all …]
|
D | ip_set_hash_mac.c | 102 hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_mac4_uadt() argument 110 if (tb[IPSET_ATTR_LINENO]) in hash_mac4_uadt() 111 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_mac4_uadt() 113 if (unlikely(!tb[IPSET_ATTR_ETHER])) in hash_mac4_uadt() 116 ret = ip_set_get_extensions(set, tb, &ext); in hash_mac4_uadt() 119 ether_addr_copy(e.ether, nla_data(tb[IPSET_ATTR_ETHER])); in hash_mac4_uadt()
|
D | ip_set_hash_gen.h | 1203 IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[], 1227 struct nlattr *tb[], u32 flags) in IPSET_TOKEN() 1255 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) || in IPSET_TOKEN() 1256 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) || in IPSET_TOKEN() 1257 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || in IPSET_TOKEN() 1258 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in IPSET_TOKEN() 1262 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK))) in IPSET_TOKEN() 1266 if (tb[IPSET_ATTR_HASHSIZE]) { in IPSET_TOKEN() 1267 hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]); in IPSET_TOKEN() 1272 if (tb[IPSET_ATTR_MAXELEM]) in IPSET_TOKEN() [all …]
|
/linux-4.4.14/drivers/net/wireless/ti/wlcore/ |
D | testmode.c | 72 static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) in wl1271_tm_cmd_test() argument 81 if (!tb[WL1271_TM_ATTR_DATA]) in wl1271_tm_cmd_test() 84 buf = nla_data(tb[WL1271_TM_ATTR_DATA]); in wl1271_tm_cmd_test() 85 buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); in wl1271_tm_cmd_test() 87 if (tb[WL1271_TM_ATTR_ANSWER]) in wl1271_tm_cmd_test() 88 answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]); in wl1271_tm_cmd_test() 151 static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) in wl1271_tm_cmd_interrogate() argument 160 if (!tb[WL1271_TM_ATTR_IE_ID]) in wl1271_tm_cmd_interrogate() 163 ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); in wl1271_tm_cmd_interrogate() 215 static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) in wl1271_tm_cmd_configure() argument [all …]
|
D | vendor_cmd.c | 35 struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR]; in wlcore_vendor_cmd_smart_config_start() local 43 ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, in wlcore_vendor_cmd_smart_config_start() 48 if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID]) in wlcore_vendor_cmd_smart_config_start() 63 nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID])); in wlcore_vendor_cmd_smart_config_start() 110 struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR]; in wlcore_vendor_cmd_smart_config_set_group_key() local 118 ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len, in wlcore_vendor_cmd_smart_config_set_group_key() 123 if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID] || in wlcore_vendor_cmd_smart_config_set_group_key() 124 !tb[WLCORE_VENDOR_ATTR_GROUP_KEY]) in wlcore_vendor_cmd_smart_config_set_group_key() 139 nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]), in wlcore_vendor_cmd_smart_config_set_group_key() 140 nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]), in wlcore_vendor_cmd_smart_config_set_group_key() [all …]
|
/linux-4.4.14/net/ipv4/ |
D | inet_hashtables.c | 66 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); in inet_bind_bucket_create() local 68 if (tb) { in inet_bind_bucket_create() 69 write_pnet(&tb->ib_net, net); in inet_bind_bucket_create() 70 tb->port = snum; in inet_bind_bucket_create() 71 tb->fastreuse = 0; in inet_bind_bucket_create() 72 tb->fastreuseport = 0; in inet_bind_bucket_create() 73 tb->num_owners = 0; in inet_bind_bucket_create() 74 INIT_HLIST_HEAD(&tb->owners); in inet_bind_bucket_create() 75 hlist_add_head(&tb->node, &head->chain); in inet_bind_bucket_create() 77 return tb; in inet_bind_bucket_create() [all …]
|
D | ip_tunnel_core.c | 238 struct nlattr *tb[LWTUNNEL_IP_MAX + 1]; in ip_tun_build_state() local 241 err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy); in ip_tun_build_state() 253 if (tb[LWTUNNEL_IP_ID]) in ip_tun_build_state() 254 tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]); in ip_tun_build_state() 256 if (tb[LWTUNNEL_IP_DST]) in ip_tun_build_state() 257 tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]); in ip_tun_build_state() 259 if (tb[LWTUNNEL_IP_SRC]) in ip_tun_build_state() 260 tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]); in ip_tun_build_state() 262 if (tb[LWTUNNEL_IP_TTL]) in ip_tun_build_state() 263 tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]); in ip_tun_build_state() [all …]
|
D | fib_frontend.c | 79 struct fib_table *tb, *alias = NULL; in fib_new_table() local 84 tb = fib_get_table(net, id); in fib_new_table() 85 if (tb) in fib_new_table() 86 return tb; in fib_new_table() 91 tb = fib_trie_table(id, alias); in fib_new_table() 92 if (!tb) in fib_new_table() 97 rcu_assign_pointer(net->ipv4.fib_local, tb); in fib_new_table() 100 rcu_assign_pointer(net->ipv4.fib_main, tb); in fib_new_table() 103 rcu_assign_pointer(net->ipv4.fib_default, tb); in fib_new_table() 110 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]); in fib_new_table() [all …]
|
D | fib_trie.c | 1081 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) in fib_table_insert() argument 1083 struct trie *t = (struct trie *)tb->tb_data; in fib_table_insert() 1099 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); in fib_table_insert() 1112 tb->tb_id) : NULL; in fib_table_insert() 1140 (fa->tb_id != tb->tb_id) || in fib_table_insert() 1174 new_fa->tb_id = tb->tb_id; in fib_table_insert() 1181 tb->tb_id); in fib_table_insert() 1196 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); in fib_table_insert() 1226 new_fa->tb_id = tb->tb_id; in fib_table_insert() 1231 cfg->fc_nlflags, tb->tb_id); in fib_table_insert() [all …]
|
D | inet_connection_sock.c | 47 const struct inet_bind_bucket *tb, bool relax) in inet_csk_bind_conflict() argument 61 sk_for_each_bound(sk2, &tb->owners) { in inet_csk_bind_conflict() 97 struct inet_bind_bucket *tb; in inet_csk_get_port() local 128 inet_bind_bucket_for_each(tb, &head->chain) in inet_csk_get_port() 129 if (net_eq(ib_net(tb), net) && tb->port == rover) { in inet_csk_get_port() 130 if (((tb->fastreuse > 0 && in inet_csk_get_port() 133 (tb->fastreuseport > 0 && in inet_csk_get_port() 135 uid_eq(tb->fastuid, uid))) && in inet_csk_get_port() 136 (tb->num_owners < smallest_size || smallest_size == -1)) { in inet_csk_get_port() 137 smallest_size = tb->num_owners; in inet_csk_get_port() [all …]
|
D | fib_rules.c | 169 struct nlattr **tb) in fib4_rule_configure() argument 198 rule4->src = nla_get_in_addr(tb[FRA_SRC]); in fib4_rule_configure() 201 rule4->dst = nla_get_in_addr(tb[FRA_DST]); in fib4_rule_configure() 204 if (tb[FRA_FLOW]) { in fib4_rule_configure() 205 rule4->tclassid = nla_get_u32(tb[FRA_FLOW]); in fib4_rule_configure() 246 struct nlattr **tb) in fib4_rule_compare() argument 260 if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW]))) in fib4_rule_compare() 264 if (frh->src_len && (rule4->src != nla_get_in_addr(tb[FRA_SRC]))) in fib4_rule_compare() 267 if (frh->dst_len && (rule4->dst != nla_get_in_addr(tb[FRA_DST]))) in fib4_rule_compare()
|
D | devinet.c | 578 struct nlattr *tb[IFA_MAX+1]; in inet_rtm_deladdr() local 586 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); in inet_rtm_deladdr() 599 if (tb[IFA_LOCAL] && in inet_rtm_deladdr() 600 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL])) in inet_rtm_deladdr() 603 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) in inet_rtm_deladdr() 606 if (tb[IFA_ADDRESS] && in inet_rtm_deladdr() 608 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa))) in inet_rtm_deladdr() 749 struct nlattr *tb[IFA_MAX+1]; in rtm_to_ifaddr() local 756 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); in rtm_to_ifaddr() 762 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL]) in rtm_to_ifaddr() [all …]
|
D | ip_gre.c | 916 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) in ipgre_tunnel_validate() argument 934 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[]) in ipgre_tap_validate() argument 938 if (tb[IFLA_ADDRESS]) { in ipgre_tap_validate() 939 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) in ipgre_tap_validate() 941 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) in ipgre_tap_validate() 955 return ipgre_tunnel_validate(tb, data); in ipgre_tap_validate() 960 struct nlattr *tb[], in ipgre_netlink_parms() argument 1070 struct nlattr *tb[], struct nlattr *data[]) in ipgre_newlink() argument 1083 ipgre_netlink_parms(dev, data, tb, &p); in ipgre_newlink() 1084 return ip_tunnel_newlink(dev, tb, &p); in ipgre_newlink() [all …]
|
/linux-4.4.14/security/keys/ |
D | trusted.c | 393 static int osap(struct tpm_buf *tb, struct osapsess *s, in osap() argument 404 INIT_BUF(tb); in osap() 405 store16(tb, TPM_TAG_RQU_COMMAND); in osap() 406 store32(tb, TPM_OSAP_SIZE); in osap() 407 store32(tb, TPM_ORD_OSAP); in osap() 408 store16(tb, type); in osap() 409 store32(tb, handle); in osap() 410 storebytes(tb, ononce, TPM_NONCE_SIZE); in osap() 412 ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); in osap() 416 s->handle = LOAD32(tb->data, TPM_DATA_OFFSET); in osap() [all …]
|
D | trusted.h | 24 #define INIT_BUF(tb) (tb->len = 0) argument
|
/linux-4.4.14/include/linux/ |
D | tty_flip.h | 19 struct tty_buffer *tb = port->buf.tail; in tty_insert_flip_char() local 22 change = (tb->flags & TTYB_NORMAL) && (flag != TTY_NORMAL); in tty_insert_flip_char() 23 if (!change && tb->used < tb->size) { in tty_insert_flip_char() 24 if (~tb->flags & TTYB_NORMAL) in tty_insert_flip_char() 25 *flag_buf_ptr(tb, tb->used) = flag; in tty_insert_flip_char() 26 *char_buf_ptr(tb, tb->used++) = ch; in tty_insert_flip_char()
|
D | rtnetlink.h | 104 struct nlattr *tb[], 110 struct nlattr *tb[],
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | smp-tbsync.c | 24 volatile u64 tb; member 47 u64 tb; in smp_generic_take_timebase() local 62 tb = tbsync->tb; in smp_generic_take_timebase() 71 set_tb(tb >> 32, tb & 0xfffffffful); in smp_generic_take_timebase() 80 u64 tb; in start_contest() local 87 tb = get_tb() + 400; in start_contest() 88 tbsync->tb = tb + offset; in start_contest() 89 tbsync->mark = mark = tb + 400; in start_contest() 97 while (get_tb() <= tb) in start_contest()
|
/linux-4.4.14/net/core/ |
D | rtnetlink.c | 1424 struct nlattr *tb[IFLA_MAX+1]; in rtnl_dump_ifinfo() local 1444 if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) { in rtnl_dump_ifinfo() 1446 if (tb[IFLA_EXT_MASK]) in rtnl_dump_ifinfo() 1447 ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); in rtnl_dump_ifinfo() 1481 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len) in rtnl_nla_parse_ifla() argument 1483 return nla_parse(tb, IFLA_MAX, head, len, ifla_policy); in rtnl_nla_parse_ifla() 1487 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) in rtnl_link_get_net() argument 1493 if (tb[IFLA_NET_NS_PID]) in rtnl_link_get_net() 1494 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); in rtnl_link_get_net() 1495 else if (tb[IFLA_NET_NS_FD]) in rtnl_link_get_net() [all …]
|
D | fib_rules.c | 246 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, in validate_rulemsg() argument 252 if (tb[FRA_SRC] == NULL || in validate_rulemsg() 254 nla_len(tb[FRA_SRC]) != ops->addr_size) in validate_rulemsg() 258 if (tb[FRA_DST] == NULL || in validate_rulemsg() 260 nla_len(tb[FRA_DST]) != ops->addr_size) in validate_rulemsg() 274 struct nlattr *tb[FRA_MAX+1]; in fib_nl_newrule() local 286 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy); in fib_nl_newrule() 290 err = validate_rulemsg(frh, tb, ops); in fib_nl_newrule() 301 rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY]) in fib_nl_newrule() 304 if (tb[FRA_IIFNAME]) { in fib_nl_newrule() [all …]
|
D | net_namespace.c | 544 struct nlattr *tb[NETNSA_MAX + 1]; in rtnl_net_newid() local 549 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, in rtnl_net_newid() 553 if (!tb[NETNSA_NSID]) in rtnl_net_newid() 555 nsid = nla_get_s32(tb[NETNSA_NSID]); in rtnl_net_newid() 557 if (tb[NETNSA_PID]) in rtnl_net_newid() 558 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); in rtnl_net_newid() 559 else if (tb[NETNSA_FD]) in rtnl_net_newid() 560 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); in rtnl_net_newid() 618 struct nlattr *tb[NETNSA_MAX + 1]; in rtnl_net_getid() local 623 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, in rtnl_net_getid() [all …]
|
D | neighbour.c | 1656 struct nlattr *tb[NDA_MAX+1]; in neigh_add() local 1664 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL); in neigh_add() 1669 if (tb[NDA_DST] == NULL) in neigh_add() 1680 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) in neigh_add() 1688 if (nla_len(tb[NDA_DST]) < tbl->key_len) in neigh_add() 1690 dst = nla_data(tb[NDA_DST]); in neigh_add() 1691 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; in neigh_add() 1941 struct nlattr *tb[NDTA_MAX+1]; in neightbl_set() local 1945 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, in neightbl_set() 1950 if (tb[NDTA_NAME] == NULL) { in neightbl_set() [all …]
|
/linux-4.4.14/net/bridge/netfilter/ |
D | nft_meta_bridge.c | 53 const struct nlattr * const tb[]) in nft_meta_bridge_get_init() argument 58 priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); in nft_meta_bridge_get_init() 65 return nft_meta_get_init(ctx, expr, tb); in nft_meta_bridge_get_init() 68 priv->dreg = nft_parse_register(tb[NFTA_META_DREG]); in nft_meta_bridge_get_init() 92 const struct nlattr * const tb[]) in nft_meta_bridge_select_ops() argument 94 if (tb[NFTA_META_KEY] == NULL) in nft_meta_bridge_select_ops() 97 if (tb[NFTA_META_DREG] && tb[NFTA_META_SREG]) in nft_meta_bridge_select_ops() 100 if (tb[NFTA_META_DREG]) in nft_meta_bridge_select_ops() 103 if (tb[NFTA_META_SREG]) in nft_meta_bridge_select_ops()
|
D | nft_reject_bridge.c | 325 const struct nlattr * const tb[]) in nft_reject_bridge_init() argument 334 if (tb[NFTA_REJECT_TYPE] == NULL) in nft_reject_bridge_init() 337 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE])); in nft_reject_bridge_init() 341 if (tb[NFTA_REJECT_ICMP_CODE] == NULL) in nft_reject_bridge_init() 344 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]); in nft_reject_bridge_init()
|
/linux-4.4.14/drivers/net/wireless/ath/ath6kl/ |
D | testmode.c | 73 struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1]; in ath6kl_tm_cmd() local 77 err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, in ath6kl_tm_cmd() 82 if (!tb[ATH6KL_TM_ATTR_CMD]) in ath6kl_tm_cmd() 85 switch (nla_get_u32(tb[ATH6KL_TM_ATTR_CMD])) { in ath6kl_tm_cmd() 87 if (!tb[ATH6KL_TM_ATTR_DATA]) in ath6kl_tm_cmd() 90 buf = nla_data(tb[ATH6KL_TM_ATTR_DATA]); in ath6kl_tm_cmd() 91 buf_len = nla_len(tb[ATH6KL_TM_ATTR_DATA]); in ath6kl_tm_cmd()
|
/linux-4.4.14/net/sched/ |
D | act_bpf.c | 176 static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) in tcf_bpf_init_from_ops() argument 184 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]); in tcf_bpf_init_from_ops() 189 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS])) in tcf_bpf_init_from_ops() 196 memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size); in tcf_bpf_init_from_ops() 215 static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) in tcf_bpf_init_from_efd() argument 221 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]); in tcf_bpf_init_from_efd() 232 if (tb[TCA_ACT_BPF_NAME]) { in tcf_bpf_init_from_efd() 233 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]), in tcf_bpf_init_from_efd() 234 nla_len(tb[TCA_ACT_BPF_NAME]), in tcf_bpf_init_from_efd() 278 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; in tcf_bpf_init() local [all …]
|
D | cls_bpf.c | 226 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) in cls_bpf_prog_from_ops() argument 234 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); in cls_bpf_prog_from_ops() 239 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) in cls_bpf_prog_from_ops() 246 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); in cls_bpf_prog_from_ops() 265 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, in cls_bpf_prog_from_efd() argument 272 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); in cls_bpf_prog_from_efd() 283 if (tb[TCA_BPF_NAME]) { in cls_bpf_prog_from_efd() 284 name = kmemdup(nla_data(tb[TCA_BPF_NAME]), in cls_bpf_prog_from_efd() 285 nla_len(tb[TCA_BPF_NAME]), in cls_bpf_prog_from_efd() 306 unsigned long base, struct nlattr **tb, in cls_bpf_modify_existing() argument [all …]
|
D | cls_flow.c | 383 struct nlattr *tb[TCA_FLOW_MAX + 1]; in flow_change() local 396 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy); in flow_change() 400 if (tb[TCA_FLOW_BASECLASS]) { in flow_change() 401 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]); in flow_change() 406 if (tb[TCA_FLOW_KEYS]) { in flow_change() 407 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]); in flow_change() 422 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); in flow_change() 426 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); in flow_change() 458 if (tb[TCA_FLOW_MODE]) in flow_change() 459 mode = nla_get_u32(tb[TCA_FLOW_MODE]); in flow_change() [all …]
|
D | cls_flower.c | 223 static void fl_set_key_val(struct nlattr **tb, in fl_set_key_val() argument 227 if (!tb[val_type]) in fl_set_key_val() 229 memcpy(val, nla_data(tb[val_type]), len); in fl_set_key_val() 230 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) in fl_set_key_val() 233 memcpy(mask, nla_data(tb[mask_type]), len); in fl_set_key_val() 236 static int fl_set_key(struct net *net, struct nlattr **tb, in fl_set_key() argument 240 if (tb[TCA_FLOWER_INDEV]) { in fl_set_key() 241 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]); in fl_set_key() 249 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, in fl_set_key() 252 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, in fl_set_key() [all …]
|
D | act_skbedit.c | 64 struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; in tcf_skbedit_init() local 74 err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy); in tcf_skbedit_init() 78 if (tb[TCA_SKBEDIT_PARMS] == NULL) in tcf_skbedit_init() 81 if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { in tcf_skbedit_init() 83 priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); in tcf_skbedit_init() 86 if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { in tcf_skbedit_init() 88 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); in tcf_skbedit_init() 91 if (tb[TCA_SKBEDIT_MARK] != NULL) { in tcf_skbedit_init() 93 mark = nla_data(tb[TCA_SKBEDIT_MARK]); in tcf_skbedit_init() 99 parm = nla_data(tb[TCA_SKBEDIT_PARMS]); in tcf_skbedit_init()
|
D | sch_codel.c | 119 struct nlattr *tb[TCA_CODEL_MAX + 1]; in codel_change() local 126 err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy); in codel_change() 132 if (tb[TCA_CODEL_TARGET]) { in codel_change() 133 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]); in codel_change() 138 if (tb[TCA_CODEL_CE_THRESHOLD]) { in codel_change() 139 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]); in codel_change() 144 if (tb[TCA_CODEL_INTERVAL]) { in codel_change() 145 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]); in codel_change() 150 if (tb[TCA_CODEL_LIMIT]) in codel_change() 151 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]); in codel_change() [all …]
|
D | sch_gred.c | 428 struct nlattr *tb[TCA_GRED_MAX + 1]; in gred_change() local 437 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy); in gred_change() 441 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) { in gred_change() 442 if (tb[TCA_GRED_LIMIT] != NULL) in gred_change() 443 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); in gred_change() 447 if (tb[TCA_GRED_PARMS] == NULL || in gred_change() 448 tb[TCA_GRED_STAB] == NULL || in gred_change() 449 tb[TCA_GRED_LIMIT] != NULL) in gred_change() 452 max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; in gred_change() 455 ctl = nla_data(tb[TCA_GRED_PARMS]); in gred_change() [all …]
|
D | act_vlan.c | 71 struct nlattr *tb[TCA_VLAN_MAX + 1]; in tcf_vlan_init() local 83 err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy); in tcf_vlan_init() 87 if (!tb[TCA_VLAN_PARMS]) in tcf_vlan_init() 89 parm = nla_data(tb[TCA_VLAN_PARMS]); in tcf_vlan_init() 94 if (!tb[TCA_VLAN_PUSH_VLAN_ID]) in tcf_vlan_init() 96 push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); in tcf_vlan_init() 100 if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { in tcf_vlan_init() 101 push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); in tcf_vlan_init()
|
D | sch_pie.c | 185 struct nlattr *tb[TCA_PIE_MAX + 1]; in pie_change() local 192 err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy); in pie_change() 199 if (tb[TCA_PIE_TARGET]) { in pie_change() 201 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]); in pie_change() 208 if (tb[TCA_PIE_TUPDATE]) in pie_change() 209 q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])); in pie_change() 211 if (tb[TCA_PIE_LIMIT]) { in pie_change() 212 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); in pie_change() 218 if (tb[TCA_PIE_ALPHA]) in pie_change() 219 q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]); in pie_change() [all …]
|
D | sch_tbf.c | 318 struct nlattr *tb[TCA_TBF_MAX + 1]; in tbf_change() local 327 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); in tbf_change() 332 if (tb[TCA_TBF_PARMS] == NULL) in tbf_change() 335 qopt = nla_data(tb[TCA_TBF_PARMS]); in tbf_change() 338 tb[TCA_TBF_RTAB])); in tbf_change() 342 tb[TCA_TBF_PTAB])); in tbf_change() 347 if (tb[TCA_TBF_RATE64]) in tbf_change() 348 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); in tbf_change() 351 if (tb[TCA_TBF_BURST]) { in tbf_change() 352 max_size = nla_get_u32(tb[TCA_TBF_BURST]); in tbf_change() [all …]
|
D | cls_fw.c | 191 struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr) in fw_change_attrs() argument 199 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); in fw_change_attrs() 203 if (tb[TCA_FW_CLASSID]) { in fw_change_attrs() 204 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); in fw_change_attrs() 209 if (tb[TCA_FW_INDEV]) { in fw_change_attrs() 211 ret = tcf_change_indev(net, tb[TCA_FW_INDEV]); in fw_change_attrs() 221 if (tb[TCA_FW_MASK]) { in fw_change_attrs() 222 mask = nla_get_u32(tb[TCA_FW_MASK]); in fw_change_attrs() 245 struct nlattr *tb[TCA_FW_MAX + 1]; in fw_change() local 251 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); in fw_change() [all …]
|
D | act_ipt.c | 89 struct nlattr *tb[TCA_IPT_MAX + 1]; in tcf_ipt_init() local 100 err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy); in tcf_ipt_init() 104 if (tb[TCA_IPT_HOOK] == NULL) in tcf_ipt_init() 106 if (tb[TCA_IPT_TARG] == NULL) in tcf_ipt_init() 109 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); in tcf_ipt_init() 110 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) in tcf_ipt_init() 113 if (tb[TCA_IPT_INDEX] != NULL) in tcf_ipt_init() 114 index = nla_get_u32(tb[TCA_IPT_INDEX]); in tcf_ipt_init() 131 hook = nla_get_u32(tb[TCA_IPT_HOOK]); in tcf_ipt_init() 137 if (tb[TCA_IPT_TABLE] == NULL || in tcf_ipt_init() [all …]
|
D | act_police.c | 120 struct nlattr *tb[TCA_POLICE_MAX + 1]; in tcf_act_police_locate() local 130 err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy); in tcf_act_police_locate() 134 if (tb[TCA_POLICE_TBF] == NULL) in tcf_act_police_locate() 136 size = nla_len(tb[TCA_POLICE_TBF]); in tcf_act_police_locate() 139 parm = nla_data(tb[TCA_POLICE_TBF]); in tcf_act_police_locate() 167 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]); in tcf_act_police_locate() 173 tb[TCA_POLICE_PEAKRATE]); in tcf_act_police_locate() 186 } else if (tb[TCA_POLICE_AVRATE] && in tcf_act_police_locate() 216 if (tb[TCA_POLICE_RESULT]) in tcf_act_police_locate() 217 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); in tcf_act_police_locate() [all …]
|
D | act_gact.c | 60 struct nlattr *tb[TCA_GACT_MAX + 1]; in tcf_gact_init() local 72 err = nla_parse_nested(tb, TCA_GACT_MAX, nla, gact_policy); in tcf_gact_init() 76 if (tb[TCA_GACT_PARMS] == NULL) in tcf_gact_init() 78 parm = nla_data(tb[TCA_GACT_PARMS]); in tcf_gact_init() 81 if (tb[TCA_GACT_PROB] != NULL) in tcf_gact_init() 84 if (tb[TCA_GACT_PROB]) { in tcf_gact_init() 85 p_parm = nla_data(tb[TCA_GACT_PROB]); in tcf_gact_init()
|
D | cls_route.c | 383 struct nlattr **tb, struct nlattr *est, int new, in route4_set_parms() argument 394 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); in route4_set_parms() 399 if (tb[TCA_ROUTE4_TO]) { in route4_set_parms() 402 to = nla_get_u32(tb[TCA_ROUTE4_TO]); in route4_set_parms() 408 if (tb[TCA_ROUTE4_FROM]) { in route4_set_parms() 409 if (tb[TCA_ROUTE4_IIF]) in route4_set_parms() 411 id = nla_get_u32(tb[TCA_ROUTE4_FROM]); in route4_set_parms() 415 } else if (tb[TCA_ROUTE4_IIF]) { in route4_set_parms() 416 id = nla_get_u32(tb[TCA_ROUTE4_IIF]); in route4_set_parms() 449 if (tb[TCA_ROUTE4_TO]) in route4_set_parms() [all …]
|
D | sch_fq.c | 663 struct nlattr *tb[TCA_FQ_MAX + 1]; in fq_change() local 671 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy); in fq_change() 679 if (tb[TCA_FQ_BUCKETS_LOG]) { in fq_change() 680 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); in fq_change() 687 if (tb[TCA_FQ_PLIMIT]) in fq_change() 688 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); in fq_change() 690 if (tb[TCA_FQ_FLOW_PLIMIT]) in fq_change() 691 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); in fq_change() 693 if (tb[TCA_FQ_QUANTUM]) { in fq_change() 694 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); in fq_change() [all …]
|
D | act_api.c | 511 struct nlattr *tb[TCA_ACT_MAX + 1]; in tcf_action_init_1() local 516 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); in tcf_action_init_1() 520 kind = tb[TCA_ACT_KIND]; in tcf_action_init_1() 564 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind); in tcf_action_init_1() 591 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; in tcf_action_init() local 596 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); in tcf_action_init() 600 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { in tcf_action_init() 601 act = tcf_action_init_1(net, tb[i], est, name, ovr, bind); in tcf_action_init() 727 struct nlattr *tb[TCA_ACT_MAX + 1]; in tcf_action_get_1() local 732 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); in tcf_action_get_1() [all …]
|
D | sch_hhf.c | 531 struct nlattr *tb[TCA_HHF_MAX + 1]; in hhf_change() local 541 err = nla_parse_nested(tb, TCA_HHF_MAX, opt, hhf_policy); in hhf_change() 545 if (tb[TCA_HHF_QUANTUM]) in hhf_change() 546 new_quantum = nla_get_u32(tb[TCA_HHF_QUANTUM]); in hhf_change() 548 if (tb[TCA_HHF_NON_HH_WEIGHT]) in hhf_change() 549 new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]); in hhf_change() 557 if (tb[TCA_HHF_BACKLOG_LIMIT]) in hhf_change() 558 sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); in hhf_change() 563 if (tb[TCA_HHF_HH_FLOWS_LIMIT]) in hhf_change() 564 q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]); in hhf_change() [all …]
|
D | act_simple.c | 83 struct nlattr *tb[TCA_DEF_MAX + 1]; in tcf_simp_init() local 92 err = nla_parse_nested(tb, TCA_DEF_MAX, nla, simple_policy); in tcf_simp_init() 96 if (tb[TCA_DEF_PARMS] == NULL) in tcf_simp_init() 99 if (tb[TCA_DEF_DATA] == NULL) in tcf_simp_init() 102 parm = nla_data(tb[TCA_DEF_PARMS]); in tcf_simp_init() 103 defdata = nla_data(tb[TCA_DEF_DATA]); in tcf_simp_init()
|
D | cls_tcindex.c | 239 struct tcindex_filter_result *r, struct nlattr **tb, in tcindex_set_parms() argument 250 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); in tcindex_set_parms() 289 if (tb[TCA_TCINDEX_HASH]) in tcindex_set_parms() 290 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); in tcindex_set_parms() 292 if (tb[TCA_TCINDEX_MASK]) in tcindex_set_parms() 293 cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); in tcindex_set_parms() 295 if (tb[TCA_TCINDEX_SHIFT]) in tcindex_set_parms() 296 cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); in tcindex_set_parms() 312 if (tb[TCA_TCINDEX_FALL_THROUGH]) in tcindex_set_parms() 313 cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); in tcindex_set_parms() [all …]
|
D | sch_fq_codel.c | 331 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; in fq_codel_change() local 337 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy); in fq_codel_change() 340 if (tb[TCA_FQ_CODEL_FLOWS]) { in fq_codel_change() 343 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); in fq_codel_change() 350 if (tb[TCA_FQ_CODEL_TARGET]) { in fq_codel_change() 351 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); in fq_codel_change() 356 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { in fq_codel_change() 357 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); in fq_codel_change() 362 if (tb[TCA_FQ_CODEL_INTERVAL]) { in fq_codel_change() 363 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); in fq_codel_change() [all …]
|
D | cls_basic.c | 134 struct nlattr **tb, in basic_set_parms() argument 142 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); in basic_set_parms() 146 err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &t); in basic_set_parms() 150 if (tb[TCA_BASIC_CLASSID]) { in basic_set_parms() 151 f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]); in basic_set_parms() 171 struct nlattr *tb[TCA_BASIC_MAX + 1]; in basic_change() local 178 err = nla_parse_nested(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS], in basic_change() 213 err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr); in basic_change()
|
D | sch_dsmark.c | 117 struct nlattr *tb[TCA_DSMARK_MAX + 1]; in dsmark_change() local 131 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy); in dsmark_change() 135 if (tb[TCA_DSMARK_VALUE]) in dsmark_change() 136 p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]); in dsmark_change() 138 if (tb[TCA_DSMARK_MASK]) in dsmark_change() 139 p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]); in dsmark_change() 343 struct nlattr *tb[TCA_DSMARK_MAX + 1]; in dsmark_init() local 354 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy); in dsmark_init() 359 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]); in dsmark_init() 364 if (tb[TCA_DSMARK_DEFAULT_INDEX]) in dsmark_init() [all …]
|
D | sch_netem.c | 840 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, in parse_attr() argument 851 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), in parse_attr() 854 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); in parse_attr() 862 struct nlattr *tb[TCA_NETEM_MAX + 1]; in netem_change() local 872 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); in netem_change() 880 if (tb[TCA_NETEM_LOSS]) { in netem_change() 881 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); in netem_change() 890 if (tb[TCA_NETEM_DELAY_DIST]) { in netem_change() 891 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]); in netem_change() 919 if (tb[TCA_NETEM_CORR]) in netem_change() [all …]
|
D | sch_cbq.c | 1346 struct nlattr *tb[TCA_CBQ_MAX + 1]; in cbq_init() local 1350 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); in cbq_init() 1354 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL) in cbq_init() 1357 r = nla_data(tb[TCA_CBQ_RATE]); in cbq_init() 1359 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) in cbq_init() 1396 if (tb[TCA_CBQ_LSSOPT]) in cbq_init() 1397 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); in cbq_init() 1727 struct nlattr *tb[TCA_CBQ_MAX + 1]; in cbq_change_class() local 1734 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy); in cbq_change_class() 1748 if (tb[TCA_CBQ_RATE]) { in cbq_change_class() [all …]
|
D | cls_u32.c | 594 struct tc_u_knode *n, struct nlattr **tb, in u32_set_parms() argument 601 err = tcf_exts_validate(net, tp, tb, est, &e, ovr); in u32_set_parms() 606 if (tb[TCA_U32_LINK]) { in u32_set_parms() 607 u32 handle = nla_get_u32(tb[TCA_U32_LINK]); in u32_set_parms() 627 if (tb[TCA_U32_CLASSID]) { in u32_set_parms() 628 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]); in u32_set_parms() 633 if (tb[TCA_U32_INDEV]) { in u32_set_parms() 635 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]); in u32_set_parms() 735 struct nlattr *tb[TCA_U32_MAX + 1]; in u32_change() local 745 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy); in u32_change() [all …]
|
D | sch_red.c | 182 struct nlattr *tb[TCA_RED_MAX + 1]; in red_change() local 191 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy); in red_change() 195 if (tb[TCA_RED_PARMS] == NULL || in red_change() 196 tb[TCA_RED_STAB] == NULL) in red_change() 199 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; in red_change() 201 ctl = nla_data(tb[TCA_RED_PARMS]); in red_change() 222 nla_data(tb[TCA_RED_STAB]), in red_change()
|
D | cls_rsvp.h | 477 struct nlattr *tb[TCA_RSVP_MAX + 1]; in rsvp_change() local 486 err = nla_parse_nested(tb, TCA_RSVP_MAX, opt, rsvp_policy); in rsvp_change() 491 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); in rsvp_change() 511 if (tb[TCA_RSVP_CLASSID]) { in rsvp_change() 512 n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); in rsvp_change() 525 if (tb[TCA_RSVP_DST] == NULL) in rsvp_change() 535 if (tb[TCA_RSVP_SRC]) { in rsvp_change() 536 memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); in rsvp_change() 539 if (tb[TCA_RSVP_PINFO]) { in rsvp_change() 540 pinfo = nla_data(tb[TCA_RSVP_PINFO]); in rsvp_change() [all …]
|
D | act_pedit.c | 36 struct nlattr *tb[TCA_PEDIT_MAX + 1]; in tcf_pedit_init() local 46 err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy); in tcf_pedit_init() 50 if (tb[TCA_PEDIT_PARMS] == NULL) in tcf_pedit_init() 52 parm = nla_data(tb[TCA_PEDIT_PARMS]); in tcf_pedit_init() 54 if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize) in tcf_pedit_init()
|
D | cls_cgroup.c | 76 struct nlattr *tb[TCA_CGROUP_MAX + 1]; in cls_cgroup_change() local 99 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], in cls_cgroup_change() 105 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); in cls_cgroup_change() 109 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); in cls_cgroup_change()
|
D | sch_choke.c | 419 struct nlattr *tb[TCA_CHOKE_MAX + 1]; in choke_change() local 429 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy); in choke_change() 433 if (tb[TCA_CHOKE_PARMS] == NULL || in choke_change() 434 tb[TCA_CHOKE_STAB] == NULL) in choke_change() 437 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; in choke_change() 439 ctl = nla_data(tb[TCA_CHOKE_PARMS]); in choke_change() 491 nla_data(tb[TCA_CHOKE_STAB]), in choke_change()
|
D | act_mirred.c | 57 struct nlattr *tb[TCA_MIRRED_MAX + 1]; in tcf_mirred_init() local 65 ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); in tcf_mirred_init() 68 if (tb[TCA_MIRRED_PARMS] == NULL) in tcf_mirred_init() 70 parm = nla_data(tb[TCA_MIRRED_PARMS]); in tcf_mirred_init()
|
D | sch_atm.c | 192 struct nlattr *tb[TCA_ATM_MAX + 1]; in atm_tc_change() local 216 error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy); in atm_tc_change() 220 if (!tb[TCA_ATM_FD]) in atm_tc_change() 222 fd = nla_get_u32(tb[TCA_ATM_FD]); in atm_tc_change() 224 if (tb[TCA_ATM_HDR]) { in atm_tc_change() 225 hdr_len = nla_len(tb[TCA_ATM_HDR]); in atm_tc_change() 226 hdr = nla_data(tb[TCA_ATM_HDR]); in atm_tc_change() 231 if (!tb[TCA_ATM_EXCESS]) in atm_tc_change() 235 atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS])); in atm_tc_change()
|
D | act_connmark.c | 100 struct nlattr *tb[TCA_CONNMARK_MAX + 1]; in tcf_connmark_init() local 108 ret = nla_parse_nested(tb, TCA_CONNMARK_MAX, nla, connmark_policy); in tcf_connmark_init() 112 parm = nla_data(tb[TCA_CONNMARK_PARMS]); in tcf_connmark_init()
|
D | act_nat.c | 41 struct nlattr *tb[TCA_NAT_MAX + 1]; in tcf_nat_init() local 49 err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy); in tcf_nat_init() 53 if (tb[TCA_NAT_PARMS] == NULL) in tcf_nat_init() 55 parm = nla_data(tb[TCA_NAT_PARMS]); in tcf_nat_init()
|
D | cls_api.c | 518 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, in tcf_exts_validate() argument 526 if (exts->police && tb[exts->police]) { in tcf_exts_validate() 527 act = tcf_action_init_1(net, tb[exts->police], rate_tlv, in tcf_exts_validate() 535 } else if (exts->action && tb[exts->action]) { in tcf_exts_validate() 537 err = tcf_action_init(net, tb[exts->action], rate_tlv, in tcf_exts_validate() 545 if ((exts->action && tb[exts->action]) || in tcf_exts_validate() 546 (exts->police && tb[exts->police])) in tcf_exts_validate()
|
D | sch_htb.c | 1023 struct nlattr *tb[TCA_HTB_MAX + 1]; in htb_init() local 1031 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy); in htb_init() 1035 if (!tb[TCA_HTB_INIT]) in htb_init() 1038 gopt = nla_data(tb[TCA_HTB_INIT]); in htb_init() 1052 if (tb[TCA_HTB_DIRECT_QLEN]) in htb_init() 1053 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); in htb_init() 1338 struct nlattr *tb[TCA_HTB_MAX + 1]; in htb_change_class() local 1346 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy); in htb_change_class() 1351 if (tb[TCA_HTB_PARMS] == NULL) in htb_change_class() 1356 hopt = nla_data(tb[TCA_HTB_PARMS]); in htb_change_class() [all …]
|
/linux-4.4.14/drivers/tty/ |
D | tty_buffer.c | 313 struct tty_buffer *tb = port->buf.tail; in tty_insert_flip_string_fixed_flag() local 316 memcpy(char_buf_ptr(tb, tb->used), chars, space); in tty_insert_flip_string_fixed_flag() 317 if (~tb->flags & TTYB_NORMAL) in tty_insert_flip_string_fixed_flag() 318 memset(flag_buf_ptr(tb, tb->used), flag, space); in tty_insert_flip_string_fixed_flag() 319 tb->used += space; in tty_insert_flip_string_fixed_flag() 348 struct tty_buffer *tb = port->buf.tail; in tty_insert_flip_string_flags() local 351 memcpy(char_buf_ptr(tb, tb->used), chars, space); in tty_insert_flip_string_flags() 352 memcpy(flag_buf_ptr(tb, tb->used), flags, space); in tty_insert_flip_string_flags() 353 tb->used += space; in tty_insert_flip_string_flags() 403 struct tty_buffer *tb = port->buf.tail; in tty_prepare_flip_string() local [all …]
|
/linux-4.4.14/net/dcb/ |
D | dcbnl.c | 222 u32 seq, struct nlattr **tb, struct sk_buff *skb) in dcbnl_getstate() argument 233 u32 seq, struct nlattr **tb, struct sk_buff *skb) in dcbnl_getpfccfg() argument 241 if (!tb[DCB_ATTR_PFC_CFG]) in dcbnl_getpfccfg() 248 tb[DCB_ATTR_PFC_CFG], in dcbnl_getpfccfg() 278 u32 seq, struct nlattr **tb, struct sk_buff *skb) in dcbnl_getperm_hwaddr() argument 292 u32 seq, struct nlattr **tb, struct sk_buff *skb) in dcbnl_getcap() argument 300 if (!tb[DCB_ATTR_CAP]) in dcbnl_getcap() 306 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], in dcbnl_getcap() 336 u32 seq, struct nlattr **tb, struct sk_buff *skb) in dcbnl_getnumtcs() argument 344 if (!tb[DCB_ATTR_NUMTCS]) in dcbnl_getnumtcs() [all …]
|
/linux-4.4.14/include/net/ |
D | rtnetlink.h | 61 int (*validate)(struct nlattr *tb[], 66 struct nlattr *tb[], 69 struct nlattr *tb[], 86 int (*slave_validate)(struct nlattr *tb[], 90 struct nlattr *tb[], 141 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); 145 struct nlattr *tb[]); 149 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
|
D | ip_fib.h | 196 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, 205 void fib_free_table(struct fib_table *tb); 234 struct fib_table *tb; in fib_lookup() local 239 tb = fib_get_table(net, RT_TABLE_MAIN); in fib_lookup() 240 if (tb) in fib_lookup() 241 err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF); in fib_lookup() 264 struct fib_table *tb; in fib_lookup() local 275 tb = rcu_dereference_rtnl(net->ipv4.fib_main); in fib_lookup() 276 if (tb) in fib_lookup() 277 err = fib_table_lookup(tb, flp, res, flags); in fib_lookup() [all …]
|
D | switchdev.h | 198 int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], 201 int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], 305 static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], in switchdev_port_fdb_add() argument 313 static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], in switchdev_port_fdb_del() argument
|
D | inet_hashtables.h | 93 #define inet_bind_bucket_for_each(tb, head) \ argument 94 hlist_for_each_entry(tb, head, node) 179 struct inet_bind_bucket *tb); 187 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
|
/linux-4.4.14/crypto/ |
D | zlib.c | 86 struct nlattr *tb[ZLIB_COMP_MAX + 1]; in zlib_compress_setup() local 91 ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL); in zlib_compress_setup() 97 window_bits = tb[ZLIB_COMP_WINDOWBITS] in zlib_compress_setup() 98 ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS]) in zlib_compress_setup() 100 mem_level = tb[ZLIB_COMP_MEMLEVEL] in zlib_compress_setup() 101 ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL]) in zlib_compress_setup() 110 tb[ZLIB_COMP_LEVEL] in zlib_compress_setup() 111 ? nla_get_u32(tb[ZLIB_COMP_LEVEL]) in zlib_compress_setup() 113 tb[ZLIB_COMP_METHOD] in zlib_compress_setup() 114 ? nla_get_u32(tb[ZLIB_COMP_METHOD]) in zlib_compress_setup() [all …]
|
D | algboss.c | 29 struct rtattr *tb[CRYPTO_MAX_ATTRS + 2]; member 75 err = tmpl->create(tmpl, param->tb); in cryptomgr_probe() 79 inst = tmpl->alloc(param->tb); in cryptomgr_probe() 163 param->tb[i + 1] = ¶m->attrs[i].attr; in cryptomgr_schedule_probe() 179 param->tb[i + 1] = NULL; in cryptomgr_schedule_probe() 185 param->tb[0] = ¶m->type.attr; in cryptomgr_schedule_probe()
|
D | seqiv.c | 254 struct rtattr **tb) in seqiv_ablkcipher_create() argument 259 inst = skcipher_geniv_alloc(tmpl, tb, 0, 0); in seqiv_ablkcipher_create() 288 static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) in seqiv_aead_create() argument 295 inst = aead_geniv_alloc(tmpl, tb, 0, 0); in seqiv_aead_create() 330 static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) in seqiv_create() argument 335 algt = crypto_get_attr_type(tb); in seqiv_create() 340 err = seqiv_ablkcipher_create(tmpl, tb); in seqiv_create() 342 err = seqiv_aead_create(tmpl, tb); in seqiv_create()
|
D | cryptd.c | 171 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, in cryptd_check_internal() argument 176 algt = crypto_get_attr_type(tb); in cryptd_check_internal() 341 struct rtattr **tb, in cryptd_create_blkcipher() argument 351 cryptd_check_internal(tb, &type, &mask); in cryptd_create_blkcipher() 353 alg = crypto_get_attr_alg(tb, type, mask); in cryptd_create_blkcipher() 602 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, in cryptd_create_hash() argument 613 cryptd_check_internal(tb, &type, &mask); in cryptd_create_hash() 615 salg = shash_attr_alg(tb[1], type, mask); in cryptd_create_hash() 773 struct rtattr **tb, in cryptd_create_aead() argument 784 cryptd_check_internal(tb, &type, &mask); in cryptd_create_aead() [all …]
|
D | ccm.c | 505 struct rtattr **tb, in crypto_ccm_create_common() argument 517 algt = crypto_get_attr_type(tb); in crypto_ccm_create_common() 608 static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) in crypto_ccm_create() argument 614 cipher_name = crypto_attr_alg_name(tb[1]); in crypto_ccm_create() 626 return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, in crypto_ccm_create() 637 struct rtattr **tb) in crypto_ccm_base_create() argument 643 ctr_name = crypto_attr_alg_name(tb[1]); in crypto_ccm_base_create() 647 cipher_name = crypto_attr_alg_name(tb[2]); in crypto_ccm_base_create() 655 return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name, in crypto_ccm_base_create() 808 struct rtattr **tb) in crypto_rfc4309_create() argument [all …]
|
D | arc4.c | 58 u32 ty, ta, tb; in arc4_crypt() local 77 tb = S[ty]; in arc4_crypt() 83 b = tb; in arc4_crypt()
|
D | pcrypt.c | 273 static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, in pcrypt_create_aead() argument 283 algt = crypto_get_attr_type(tb); in pcrypt_create_aead() 287 name = crypto_attr_alg_name(tb[1]); in pcrypt_create_aead() 336 static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) in pcrypt_create() argument 340 algt = crypto_get_attr_type(tb); in pcrypt_create() 346 return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); in pcrypt_create()
|
D | gcm.c | 620 struct rtattr **tb, in crypto_gcm_create_common() argument 633 algt = crypto_get_attr_type(tb); in crypto_gcm_create_common() 726 static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) in crypto_gcm_create() argument 732 cipher_name = crypto_attr_alg_name(tb[1]); in crypto_gcm_create() 744 return crypto_gcm_create_common(tmpl, tb, full_name, in crypto_gcm_create() 755 struct rtattr **tb) in crypto_gcm_base_create() argument 761 ctr_name = crypto_attr_alg_name(tb[1]); in crypto_gcm_base_create() 765 ghash_name = crypto_attr_alg_name(tb[2]); in crypto_gcm_base_create() 773 return crypto_gcm_create_common(tmpl, tb, full_name, in crypto_gcm_base_create() 923 struct rtattr **tb) in crypto_rfc4106_create() argument [all …]
|
D | ecb.c | 118 static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) in crypto_ecb_alloc() argument 124 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); in crypto_ecb_alloc() 128 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, in crypto_ecb_alloc()
|
D | mcryptd.c | 261 static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type, in mcryptd_check_internal() argument 266 algt = crypto_get_attr_type(tb); in mcryptd_check_internal() 490 static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, in mcryptd_create_hash() argument 501 mcryptd_check_internal(tb, &type, &mask); in mcryptd_create_hash() 503 salg = shash_attr_alg(tb[1], type, mask); in mcryptd_create_hash() 557 static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) in mcryptd_create() argument 561 algt = crypto_get_attr_type(tb); in mcryptd_create() 567 return mcryptd_create_hash(tmpl, tb, &mqueue); in mcryptd_create()
|
D | ctr.c | 181 static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) in crypto_ctr_alloc() argument 187 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); in crypto_ctr_alloc() 191 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, in crypto_ctr_alloc() 336 static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) in crypto_rfc3686_alloc() argument 345 algt = crypto_get_attr_type(tb); in crypto_rfc3686_alloc() 352 cipher_name = crypto_attr_alg_name(tb[1]); in crypto_rfc3686_alloc()
|
D | chacha20poly1305.c | 569 static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, in chachapoly_create() argument 584 algt = crypto_get_attr_type(tb); in chachapoly_create() 591 chacha_name = crypto_attr_alg_name(tb[1]); in chachapoly_create() 594 poly_name = crypto_attr_alg_name(tb[2]); in chachapoly_create() 681 static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb) in rfc7539_create() argument 683 return chachapoly_create(tmpl, tb, "rfc7539", 12); in rfc7539_create() 686 static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb) in rfc7539esp_create() argument 688 return chachapoly_create(tmpl, tb, "rfc7539esp", 8); in rfc7539esp_create()
|
/linux-4.4.14/drivers/net/wireless/ath/ath10k/ |
D | wmi-tlv.c | 135 const void **tb = data; in ath10k_wmi_tlv_iter_parse() local 138 tb[tag] = ptr; in ath10k_wmi_tlv_iter_parse() 143 static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb, in ath10k_wmi_tlv_parse() argument 147 (void *)tb); in ath10k_wmi_tlv_parse() 154 const void **tb; in ath10k_wmi_tlv_parse_alloc() local 157 tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp); in ath10k_wmi_tlv_parse_alloc() 158 if (!tb) in ath10k_wmi_tlv_parse_alloc() 161 ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len); in ath10k_wmi_tlv_parse_alloc() 163 kfree(tb); in ath10k_wmi_tlv_parse_alloc() 167 return tb; in ath10k_wmi_tlv_parse_alloc() [all …]
|
D | testmode.c | 110 static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[]) in ath10k_tm_cmd_get_version() argument 312 static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) in ath10k_tm_cmd_utf_start() argument 429 static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[]) in ath10k_tm_cmd_utf_stop() argument 453 static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[]) in ath10k_tm_cmd_wmi() argument 467 if (!tb[ATH10K_TM_ATTR_DATA]) { in ath10k_tm_cmd_wmi() 472 if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) { in ath10k_tm_cmd_wmi() 477 buf = nla_data(tb[ATH10K_TM_ATTR_DATA]); in ath10k_tm_cmd_wmi() 478 buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]); in ath10k_tm_cmd_wmi() 479 cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]); in ath10k_tm_cmd_wmi() 513 struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1]; in ath10k_tm_cmd() local [all …]
|
/linux-4.4.14/net/decnet/ |
D | dn_rules.c | 123 struct nlattr **tb) in dn_fib_rule_configure() argument 146 r->src = nla_get_le16(tb[FRA_SRC]); in dn_fib_rule_configure() 149 r->dst = nla_get_le16(tb[FRA_DST]); in dn_fib_rule_configure() 161 struct nlattr **tb) in dn_fib_rule_compare() argument 171 if (frh->src_len && (r->src != nla_get_le16(tb[FRA_SRC]))) in dn_fib_rule_compare() 174 if (frh->dst_len && (r->dst != nla_get_le16(tb[FRA_DST]))) in dn_fib_rule_compare() 185 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); in dnet_addr_type() local 189 if (tb) { in dnet_addr_type() 190 if (!tb->lookup(tb, &fld, &res)) { in dnet_addr_type()
|
D | dn_table.c | 408 struct dn_fib_table *tb, in dn_hash_dump_bucket() argument 423 tb->n, in dn_hash_dump_bucket() 437 struct dn_fib_table *tb, in dn_hash_dump_zone() argument 450 if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) { in dn_hash_dump_zone() 459 static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, in dn_fib_table_dump() argument 464 struct dn_hash *table = (struct dn_hash *)tb->data; in dn_fib_table_dump() 474 if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) { in dn_fib_table_dump() 491 struct dn_fib_table *tb; in dn_fib_dump() local 506 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) { in dn_fib_dump() 512 if (tb->dump(tb, skb, cb) < 0) in dn_fib_dump() [all …]
|
D | dn_fib.c | 504 struct dn_fib_table *tb; in dn_fib_rtm_delroute() local 519 tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 0); in dn_fib_rtm_delroute() 520 if (!tb) in dn_fib_rtm_delroute() 523 return tb->delete(tb, r, attrs, nlh, &NETLINK_CB(skb)); in dn_fib_rtm_delroute() 529 struct dn_fib_table *tb; in dn_fib_rtm_newroute() local 544 tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 1); in dn_fib_rtm_newroute() 545 if (!tb) in dn_fib_rtm_newroute() 548 return tb->insert(tb, r, attrs, nlh, &NETLINK_CB(skb)); in dn_fib_rtm_newroute() 553 struct dn_fib_table *tb; in fib_magic() local 585 tb = dn_fib_get_table(RT_MIN_TABLE, 1); in fib_magic() [all …]
|
D | dn_dev.c | 571 struct nlattr *tb[IFA_MAX+1]; in dn_nl_deladdr() local 584 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); in dn_nl_deladdr() 597 if (tb[IFA_LOCAL] && in dn_nl_deladdr() 598 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) in dn_nl_deladdr() 601 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) in dn_nl_deladdr() 615 struct nlattr *tb[IFA_MAX+1]; in dn_nl_newaddr() local 628 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); in dn_nl_newaddr() 632 if (tb[IFA_LOCAL] == NULL) in dn_nl_newaddr() 648 if (tb[IFA_ADDRESS] == NULL) in dn_nl_newaddr() 649 tb[IFA_ADDRESS] = tb[IFA_LOCAL]; in dn_nl_newaddr() [all …]
|
/linux-4.4.14/arch/sparc/kernel/ |
D | irq_64.c | 1002 struct trap_per_cpu *tb = &trap_block[this_cpu]; in sun4v_register_mondo_queues() local 1004 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, in sun4v_register_mondo_queues() 1005 tb->cpu_mondo_qmask); in sun4v_register_mondo_queues() 1006 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, in sun4v_register_mondo_queues() 1007 tb->dev_mondo_qmask); in sun4v_register_mondo_queues() 1008 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, in sun4v_register_mondo_queues() 1009 tb->resum_qmask); in sun4v_register_mondo_queues() 1010 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, in sun4v_register_mondo_queues() 1011 tb->nonresum_qmask); in sun4v_register_mondo_queues() 1033 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) in init_cpu_send_mondo_info() argument [all …]
|
D | smp_64.c | 289 struct trap_per_cpu *tb; in ldom_startcpu_cpuid() local 308 tb = &trap_block[cpu]; in ldom_startcpu_cpuid() 310 hdesc->fault_info_va = (unsigned long) &tb->fault_info; in ldom_startcpu_cpuid() 311 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); in ldom_startcpu_cpuid() 456 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt) in spitfire_xcall_deliver() argument 464 cpu_list = __va(tb->cpu_list_pa); in spitfire_xcall_deliver() 465 mondo = __va(tb->cpu_mondo_block_pa); in spitfire_xcall_deliver() 477 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) in cheetah_xcall_deliver() argument 483 cpu_list = __va(tb->cpu_list_pa); in cheetah_xcall_deliver() 484 mondo = __va(tb->cpu_mondo_block_pa); in cheetah_xcall_deliver() [all …]
|
D | mdesc.c | 827 struct trap_per_cpu *tb) in get_mondo_data() argument 833 get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2)); in get_mondo_data() 836 get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8); in get_mondo_data() 839 get_one_mondo_bits(val, &tb->resum_qmask, 6, 7); in get_mondo_data() 842 get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2); in get_mondo_data() 846 tb->cpu_mondo_qmask + 1, in get_mondo_data() 847 tb->dev_mondo_qmask + 1, in get_mondo_data() 848 tb->resum_qmask + 1, in get_mondo_data() 849 tb->nonresum_qmask + 1); in get_mondo_data() 930 struct trap_per_cpu *tb; in fill_in_one_cpu() local [all …]
|
/linux-4.4.14/arch/m68k/coldfire/ |
D | intc-simr.c | 132 u16 pa, tb; in intc_irq_set_type() local 136 tb = 0x1; in intc_irq_set_type() 139 tb = 0x2; in intc_irq_set_type() 142 tb = 0x3; in intc_irq_set_type() 146 tb = 0; in intc_irq_set_type() 150 if (tb) in intc_irq_set_type() 155 pa = (pa & ~(0x3 << ebit)) | (tb << ebit); in intc_irq_set_type()
|
D | intc-2.c | 148 u16 pa, tb; in intc_irq_set_type() local 152 tb = 0x1; in intc_irq_set_type() 155 tb = 0x2; in intc_irq_set_type() 158 tb = 0x3; in intc_irq_set_type() 162 tb = 0; in intc_irq_set_type() 166 if (tb) in intc_irq_set_type() 171 pa = (pa & ~(0x3 << (irq * 2))) | (tb << (irq * 2)); in intc_irq_set_type()
|
/linux-4.4.14/net/8021q/ |
D | vlan_netlink.c | 41 static int vlan_validate(struct nlattr *tb[], struct nlattr *data[]) in vlan_validate() argument 47 if (tb[IFLA_ADDRESS]) { in vlan_validate() 48 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) in vlan_validate() 50 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) in vlan_validate() 90 struct nlattr *tb[], struct nlattr *data[]) in vlan_changelink() argument 117 struct nlattr *tb[], struct nlattr *data[]) in vlan_newlink() argument 127 if (!tb[IFLA_LINK]) in vlan_newlink() 129 real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); in vlan_newlink() 147 if (!tb[IFLA_MTU]) in vlan_newlink() 152 err = vlan_changelink(dev, tb, data); in vlan_newlink()
|
/linux-4.4.14/net/ipv6/netfilter/ |
D | nft_dup_ipv6.c | 36 const struct nlattr * const tb[]) in nft_dup_ipv6_init() argument 41 if (tb[NFTA_DUP_SREG_ADDR] == NULL) in nft_dup_ipv6_init() 44 priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]); in nft_dup_ipv6_init() 49 if (tb[NFTA_DUP_SREG_DEV] != NULL) { in nft_dup_ipv6_init() 50 priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]); in nft_dup_ipv6_init()
|
D | nf_conntrack_proto_icmpv6.c | 261 static int icmpv6_nlattr_to_tuple(struct nlattr *tb[], in icmpv6_nlattr_to_tuple() argument 264 if (!tb[CTA_PROTO_ICMPV6_TYPE] || in icmpv6_nlattr_to_tuple() 265 !tb[CTA_PROTO_ICMPV6_CODE] || in icmpv6_nlattr_to_tuple() 266 !tb[CTA_PROTO_ICMPV6_ID]) in icmpv6_nlattr_to_tuple() 269 tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE]); in icmpv6_nlattr_to_tuple() 270 tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE]); in icmpv6_nlattr_to_tuple() 271 tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMPV6_ID]); in icmpv6_nlattr_to_tuple() 292 static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], in icmpv6_timeout_nlattr_to_obj() argument 298 if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) { in icmpv6_timeout_nlattr_to_obj() 300 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ; in icmpv6_timeout_nlattr_to_obj()
|
D | nf_conntrack_l3proto_ipv6.c | 293 static int ipv6_nlattr_to_tuple(struct nlattr *tb[], in ipv6_nlattr_to_tuple() argument 296 if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST]) in ipv6_nlattr_to_tuple() 299 t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]); in ipv6_nlattr_to_tuple() 300 t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]); in ipv6_nlattr_to_tuple()
|
/linux-4.4.14/net/bridge/ |
D | br_netlink.c | 607 static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], in br_set_port_flag() argument 610 if (tb[attrtype]) { in br_set_port_flag() 611 u8 flag = nla_get_u8(tb[attrtype]); in br_set_port_flag() 620 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) in br_setport() argument 625 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); in br_setport() 626 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); in br_setport() 627 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); in br_setport() 628 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); in br_setport() 629 br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); in br_setport() 630 br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); in br_setport() [all …]
|
/linux-4.4.14/arch/powerpc/boot/ |
D | 4xx.c | 339 u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; in ibm440gp_fixup_clocks() local 363 tb = sys_clk; in ibm440gp_fixup_clocks() 366 tb = cpu; in ibm440gp_fixup_clocks() 385 dt_fixup_cpu_clocks(cpu, tb, 0); in ibm440gp_fixup_clocks() 426 u32 ccr1, tb = tmr_clk; in __ibm440eplike_fixup_clocks() local 469 if (tb == 0) { in __ibm440eplike_fixup_clocks() 474 tb = cpu; in __ibm440eplike_fixup_clocks() 476 dt_fixup_cpu_clocks(cpu, tb, 0); in __ibm440eplike_fixup_clocks() 558 u32 cpu, plb, opb, ebc, tb, uart0, uart1, m; in ibm405gp_fixup_clocks() local 614 tb = cpu; in ibm405gp_fixup_clocks() [all …]
|
D | devtree.c | 62 void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus) in dt_fixup_cpu_clocks() argument 67 printf("CPU timebase-frequency <- 0x%x (%dMHz)\n\r", tb, MHZ(tb)); in dt_fixup_cpu_clocks() 73 setprop_val(devp, "timebase-frequency", tb); in dt_fixup_cpu_clocks() 78 timebase_period_ns = 1000000000 / tb; in dt_fixup_cpu_clocks()
|
/linux-4.4.14/net/ipv4/netfilter/ |
D | nft_dup_ipv4.c | 38 const struct nlattr * const tb[]) in nft_dup_ipv4_init() argument 43 if (tb[NFTA_DUP_SREG_ADDR] == NULL) in nft_dup_ipv4_init() 46 priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]); in nft_dup_ipv4_init() 51 if (tb[NFTA_DUP_SREG_DEV] != NULL) { in nft_dup_ipv4_init() 52 priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]); in nft_dup_ipv4_init()
|
D | nf_conntrack_proto_icmp.c | 255 static int icmp_nlattr_to_tuple(struct nlattr *tb[], in icmp_nlattr_to_tuple() argument 258 if (!tb[CTA_PROTO_ICMP_TYPE] || in icmp_nlattr_to_tuple() 259 !tb[CTA_PROTO_ICMP_CODE] || in icmp_nlattr_to_tuple() 260 !tb[CTA_PROTO_ICMP_ID]) in icmp_nlattr_to_tuple() 263 tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]); in icmp_nlattr_to_tuple() 264 tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]); in icmp_nlattr_to_tuple() 265 tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]); in icmp_nlattr_to_tuple() 285 static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], in icmp_timeout_nlattr_to_obj() argument 291 if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { in icmp_timeout_nlattr_to_obj() 293 ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ; in icmp_timeout_nlattr_to_obj()
|
D | nf_conntrack_l3proto_ipv4.c | 325 static int ipv4_nlattr_to_tuple(struct nlattr *tb[], in ipv4_nlattr_to_tuple() argument 328 if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST]) in ipv4_nlattr_to_tuple() 331 t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]); in ipv4_nlattr_to_tuple() 332 t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]); in ipv4_nlattr_to_tuple()
|
/linux-4.4.14/net/can/ |
D | gw.c | 635 struct nlattr *tb[CGW_MAX+1]; in cgw_parse_attr() local 643 err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, in cgw_parse_attr() 648 if (tb[CGW_LIM_HOPS]) { in cgw_parse_attr() 649 *limhops = nla_get_u8(tb[CGW_LIM_HOPS]); in cgw_parse_attr() 657 if (tb[CGW_MOD_AND]) { in cgw_parse_attr() 658 nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); in cgw_parse_attr() 673 if (tb[CGW_MOD_OR]) { in cgw_parse_attr() 674 nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); in cgw_parse_attr() 689 if (tb[CGW_MOD_XOR]) { in cgw_parse_attr() 690 nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); in cgw_parse_attr() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/ |
D | config.c | 912 struct lnet_text_buf_t *tb; in lnet_splitnets() local 922 tb = list_entry(nets->next, struct lnet_text_buf_t, ltb_list); in lnet_splitnets() 925 sep = strchr(tb->ltb_text, ','); in lnet_splitnets() 926 bracket = strchr(tb->ltb_text, '('); in lnet_splitnets() 933 offset2 = offset + (int)(bracket - tb->ltb_text); in lnet_splitnets() 950 net = lnet_netspec2net(tb->ltb_text); in lnet_splitnets() 953 strlen(tb->ltb_text)); in lnet_splitnets() 960 if (tb2 == tb) in lnet_splitnets() 966 strlen(tb->ltb_text)); in lnet_splitnets() 974 offset += (int)(sep - tb->ltb_text); in lnet_splitnets() [all …]
|
/linux-4.4.14/net/phonet/ |
D | pn_netlink.c | 67 struct nlattr *tb[IFA_MAX+1]; in addr_doit() local 81 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy); in addr_doit() 86 if (tb[IFA_LOCAL] == NULL) in addr_doit() 88 pnaddr = nla_get_u8(tb[IFA_LOCAL]); in addr_doit() 232 struct nlattr *tb[RTA_MAX+1]; in route_doit() local 246 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy); in route_doit() 253 if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL) in route_doit() 255 dst = nla_get_u8(tb[RTA_DST]); in route_doit() 259 dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF])); in route_doit()
|
/linux-4.4.14/net/ieee802154/6lowpan/ |
D | core.c | 115 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) in lowpan_validate() argument 117 if (tb[IFLA_ADDRESS]) { in lowpan_validate() 118 if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) in lowpan_validate() 125 struct nlattr *tb[], struct nlattr *data[]) in lowpan_newlink() argument 134 if (!tb[IFLA_LINK] || in lowpan_newlink() 138 wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK])); in lowpan_newlink()
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
D | ipoib_netlink.c | 68 struct nlattr *tb[], struct nlattr *data[]) in ipoib_changelink() argument 96 struct nlattr *tb[], struct nlattr *data[]) in ipoib_new_child_link() argument 103 if (!tb[IFLA_LINK]) in ipoib_new_child_link() 106 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); in ipoib_new_child_link() 135 err = ipoib_changelink(dev, tb, data); in ipoib_new_child_link()
|
/linux-4.4.14/drivers/net/ |
D | veth.c | 324 static int veth_validate(struct nlattr *tb[], struct nlattr *data[]) in veth_validate() argument 326 if (tb[IFLA_ADDRESS]) { in veth_validate() 327 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) in veth_validate() 329 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) in veth_validate() 332 if (tb[IFLA_MTU]) { in veth_validate() 333 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU]))) in veth_validate() 342 struct nlattr *tb[], struct nlattr *data[]) in veth_newlink() argument 374 tbp = tb; in veth_newlink() 421 if (tb[IFLA_ADDRESS] == NULL) in veth_newlink() 424 if (tb[IFLA_IFNAME]) in veth_newlink() [all …]
|
D | dummy.c | 159 static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) in dummy_validate() argument 161 if (tb[IFLA_ADDRESS]) { in dummy_validate() 162 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) in dummy_validate() 164 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) in dummy_validate()
|
D | ifb.c | 279 static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) in ifb_validate() argument 281 if (tb[IFLA_ADDRESS]) { in ifb_validate() 282 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) in ifb_validate() 284 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) in ifb_validate()
|
D | macvlan.c | 890 static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], in macvlan_fdb_add() argument 915 static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], in macvlan_fdb_del() argument 1126 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) in macvlan_validate() argument 1128 if (tb[IFLA_ADDRESS]) { in macvlan_validate() 1129 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) in macvlan_validate() 1131 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) in macvlan_validate() 1245 struct nlattr *tb[], struct nlattr *data[]) in macvlan_common_newlink() argument 1253 if (!tb[IFLA_LINK]) in macvlan_common_newlink() 1256 lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); in macvlan_common_newlink() 1266 if (!tb[IFLA_MTU]) in macvlan_common_newlink() [all …]
|
/linux-4.4.14/drivers/net/fddi/skfp/ |
D | fplustm.c | 1065 struct s_fpmc *tb ; in mac_get_mc_table() local 1080 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ in mac_get_mc_table() 1081 if (!tb->n) { /* not used */ in mac_get_mc_table() 1083 slot = tb ; in mac_get_mc_table() 1086 if (!ether_addr_equal((char *)&tb->a, (char *)own)) in mac_get_mc_table() 1088 return tb; in mac_get_mc_table() 1106 struct s_fpmc *tb ; in mac_clear_multicast() local 1111 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ in mac_clear_multicast() 1112 if (!tb->perm) { in mac_clear_multicast() 1113 tb->n = 0 ; in mac_clear_multicast() [all …]
|
/linux-4.4.14/include/net/netfilter/ |
D | nf_conntrack_l4proto.h | 75 int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); 81 int (*nlattr_to_tuple)(struct nlattr *tb[], 90 int (*nlattr_to_obj)(struct nlattr *tb[], 145 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
|
D | nft_meta.h | 16 const struct nlattr * const tb[]); 20 const struct nlattr * const tb[]);
|
D | nf_nat_l4proto.h | 40 int (*nlattr_to_range)(struct nlattr *tb[], 69 int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
|
/linux-4.4.14/include/linux/netfilter/ipset/ |
D | ip_set.h | 165 int (*uadt)(struct ip_set *set, struct nlattr *tb[], 215 struct nlattr *tb[], u32 flags); 423 extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], 425 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], 459 ip_set_attr_netorder(struct nlattr *tb[], int type) in ip_set_attr_netorder() argument 461 return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER); in ip_set_attr_netorder() 465 ip_set_optattr_netorder(struct nlattr *tb[], int type) in ip_set_optattr_netorder() argument 467 return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER); in ip_set_optattr_netorder()
|
D | ip_set_timeout.h | 30 ip_set_timeout_uget(struct nlattr *tb) in ip_set_timeout_uget() argument 32 unsigned int timeout = ip_set_get_h32(tb); in ip_set_timeout_uget()
|
D | ip_set_comment.h | 14 ip_set_comment_uget(struct nlattr *tb) in ip_set_comment_uget() argument 16 return nla_data(tb); in ip_set_comment_uget()
|
/linux-4.4.14/net/wireless/ |
D | nl80211.c | 682 struct nlattr *tb[NL80211_KEY_MAX + 1]; in nl80211_parse_key_new() local 683 int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, in nl80211_parse_key_new() 688 k->def = !!tb[NL80211_KEY_DEFAULT]; in nl80211_parse_key_new() 689 k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; in nl80211_parse_key_new() 698 if (tb[NL80211_KEY_IDX]) in nl80211_parse_key_new() 699 k->idx = nla_get_u8(tb[NL80211_KEY_IDX]); in nl80211_parse_key_new() 701 if (tb[NL80211_KEY_DATA]) { in nl80211_parse_key_new() 702 k->p.key = nla_data(tb[NL80211_KEY_DATA]); in nl80211_parse_key_new() 703 k->p.key_len = nla_len(tb[NL80211_KEY_DATA]); in nl80211_parse_key_new() 706 if (tb[NL80211_KEY_SEQ]) { in nl80211_parse_key_new() [all …]
|
/linux-4.4.14/net/ipv6/ |
D | addrlabel.c | 411 struct nlattr *tb[IFAL_MAX+1]; in ip6addrlbl_newdel() local 416 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); in ip6addrlbl_newdel() 426 if (!tb[IFAL_ADDRESS]) in ip6addrlbl_newdel() 428 pfx = nla_data(tb[IFAL_ADDRESS]); in ip6addrlbl_newdel() 430 if (!tb[IFAL_LABEL]) in ip6addrlbl_newdel() 432 label = nla_get_u32(tb[IFAL_LABEL]); in ip6addrlbl_newdel() 528 struct nlattr *tb[IFAL_MAX+1]; in ip6addrlbl_get() local 535 err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); in ip6addrlbl_get() 549 if (!tb[IFAL_ADDRESS]) in ip6addrlbl_get() 551 addr = nla_data(tb[IFAL_ADDRESS]); in ip6addrlbl_get()
|
D | fib6_rules.c | 196 struct nlattr **tb) in fib6_rule_configure() argument 213 rule6->src.addr = nla_get_in6_addr(tb[FRA_SRC]); in fib6_rule_configure() 216 rule6->dst.addr = nla_get_in6_addr(tb[FRA_DST]); in fib6_rule_configure() 228 struct nlattr **tb) in fib6_rule_compare() argument 242 nla_memcmp(tb[FRA_SRC], &rule6->src.addr, sizeof(struct in6_addr))) in fib6_rule_compare() 246 nla_memcmp(tb[FRA_DST], &rule6->dst.addr, sizeof(struct in6_addr))) in fib6_rule_compare()
|
D | ila.c | 135 struct nlattr *tb[ILA_ATTR_MAX + 1]; in ila_build_state() local 144 ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, in ila_build_state() 149 if (!tb[ILA_ATTR_LOCATOR]) in ila_build_state() 159 p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]); in ila_build_state()
|
D | route.c | 2709 struct nlattr *tb[RTA_MAX+1]; in rtm_to_fib6_config() local 2713 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); in rtm_to_fib6_config() 2744 if (tb[RTA_GATEWAY]) { in rtm_to_fib6_config() 2745 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); in rtm_to_fib6_config() 2749 if (tb[RTA_DST]) { in rtm_to_fib6_config() 2752 if (nla_len(tb[RTA_DST]) < plen) in rtm_to_fib6_config() 2755 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen); in rtm_to_fib6_config() 2758 if (tb[RTA_SRC]) { in rtm_to_fib6_config() 2761 if (nla_len(tb[RTA_SRC]) < plen) in rtm_to_fib6_config() 2764 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); in rtm_to_fib6_config() [all …]
|
D | ip6_fib.c | 193 static void fib6_link_table(struct net *net, struct fib6_table *tb) in fib6_link_table() argument 201 rwlock_init(&tb->tb6_lock); in fib6_link_table() 203 h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); in fib6_link_table() 209 hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); in fib6_link_table() 231 struct fib6_table *tb; in fib6_new_table() local 235 tb = fib6_get_table(net, id); in fib6_new_table() 236 if (tb) in fib6_new_table() 237 return tb; in fib6_new_table() 239 tb = fib6_alloc_table(net, id); in fib6_new_table() 240 if (tb) in fib6_new_table() [all …]
|
/linux-4.4.14/block/partitions/ |
D | ldm.c | 423 struct tocblock *tb[4]; in ldm_validate_tocblocks() local 432 tb[0] = &ldb->toc; in ldm_validate_tocblocks() 433 tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL); in ldm_validate_tocblocks() 434 if (!tb[1]) { in ldm_validate_tocblocks() 438 tb[2] = (struct tocblock*)((u8*)tb[1] + sizeof(*tb[1])); in ldm_validate_tocblocks() 439 tb[3] = (struct tocblock*)((u8*)tb[2] + sizeof(*tb[2])); in ldm_validate_tocblocks() 452 if (ldm_parse_tocblock(data, tb[nr_tbs])) in ldm_validate_tocblocks() 461 if (((tb[0]->bitmap1_start + tb[0]->bitmap1_size) > ph->config_size) || in ldm_validate_tocblocks() 462 ((tb[0]->bitmap2_start + tb[0]->bitmap2_size) > in ldm_validate_tocblocks() 469 if (!ldm_compare_tocblocks(tb[0], tb[i])) { in ldm_validate_tocblocks() [all …]
|
/linux-4.4.14/include/linux/iio/common/ |
D | st_sensors.h | 157 int (*read_byte) (struct st_sensor_transfer_buffer *tb, 159 int (*write_byte) (struct st_sensor_transfer_buffer *tb, 161 int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb, 236 struct st_sensor_transfer_buffer tb; member
|
/linux-4.4.14/net/mpls/ |
D | mpls_iptunnel.c | 130 struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1]; in mpls_build_state() local 135 ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla, in mpls_build_state() 140 if (!tb[MPLS_IPTUNNEL_DST]) in mpls_build_state() 151 ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS, in mpls_build_state()
|
/linux-4.4.14/drivers/block/drbd/ |
D | drbd_nla.c | 30 int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, in drbd_nla_parse_nested() argument 37 err = nla_parse_nested(tb, maxtype, nla, policy); in drbd_nla_parse_nested()
|
/linux-4.4.14/include/crypto/ |
D | algapi.h | 56 struct crypto_instance *(*alloc)(struct rtattr **tb); 58 int (*create)(struct crypto_template *tmpl, struct rtattr **tb); 163 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); 164 int crypto_check_attr_type(struct rtattr **tb, u32 type); 353 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, in crypto_get_attr_alg() argument 356 return crypto_attr_alg(tb[1], type, mask); in crypto_get_attr_alg()
|
/linux-4.4.14/arch/powerpc/kvm/ |
D | book3s_64_mmu.c | 58 if (vcpu->arch.slb[i].tb) in kvmppc_mmu_book3s_64_find_slbe() 72 vcpu->arch.slb[i].tb ? 't' : ' ', in kvmppc_mmu_book3s_64_find_slbe() 82 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; in kvmppc_slb_sid_shift() 146 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; in kvmppc_mmu_book3s_64_get_pteg() 249 if (slbe->tb) in kvmppc_mmu_book3s_64_xlate() 399 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; in kvmppc_mmu_book3s_64_slbmte() 400 slbe->esid = slbe->tb ? esid_1t : esid; in kvmppc_mmu_book3s_64_slbmte() 592 if (slb->tb) { in kvmppc_mmu_book3s_64_esid_to_vsid()
|
/linux-4.4.14/arch/x86/crypto/ |
D | fpu.c | 102 static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) in crypto_fpu_alloc() argument 108 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); in crypto_fpu_alloc() 112 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, in crypto_fpu_alloc()
|
/linux-4.4.14/drivers/net/bonding/ |
D | bond_netlink.c | 121 static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) in bond_validate() argument 123 if (tb[IFLA_ADDRESS]) { in bond_validate() 124 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) in bond_validate() 126 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) in bond_validate() 134 struct nlattr *tb[], struct nlattr *data[]) in bond_slave_changelink() argument 160 struct nlattr *tb[], struct nlattr *data[]) in bond_changelink() argument 441 struct nlattr *tb[], struct nlattr *data[]) in bond_newlink() argument 445 err = bond_changelink(bond_dev, tb, data); in bond_newlink()
|
/linux-4.4.14/arch/um/kernel/ |
D | dyn.lds.S | 41 .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } 42 .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } 115 .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
|