Lines Matching refs:tab
928 static void gfar_set_mask(u32 mask, struct filer_table *tab) in gfar_set_mask() argument
930 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT; in gfar_set_mask()
931 tab->fe[tab->index].prop = mask; in gfar_set_mask()
932 tab->index++; in gfar_set_mask()
936 static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab) in gfar_set_parse_bits() argument
938 gfar_set_mask(mask, tab); in gfar_set_parse_bits()
939 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | in gfar_set_parse_bits()
941 tab->fe[tab->index].prop = value; in gfar_set_parse_bits()
942 tab->index++; in gfar_set_parse_bits()
946 struct filer_table *tab) in gfar_set_general_attribute() argument
948 gfar_set_mask(mask, tab); in gfar_set_general_attribute()
949 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag; in gfar_set_general_attribute()
950 tab->fe[tab->index].prop = value; in gfar_set_general_attribute()
951 tab->index++; in gfar_set_general_attribute()
969 struct filer_table *tab) in gfar_set_attribute() argument
1022 gfar_set_general_attribute(value, mask, flag, tab); in gfar_set_attribute()
1028 struct filer_table *tab) in gfar_set_basic_ip() argument
1032 RQFCR_PID_SIA, tab); in gfar_set_basic_ip()
1035 RQFCR_PID_DIA, tab); in gfar_set_basic_ip()
1038 RQFCR_PID_DPT, tab); in gfar_set_basic_ip()
1041 RQFCR_PID_SPT, tab); in gfar_set_basic_ip()
1042 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); in gfar_set_basic_ip()
1048 struct filer_table *tab) in gfar_set_user_ip() argument
1052 RQFCR_PID_SIA, tab); in gfar_set_user_ip()
1055 RQFCR_PID_DIA, tab); in gfar_set_user_ip()
1056 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab); in gfar_set_user_ip()
1057 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab); in gfar_set_user_ip()
1060 RQFCR_PID_ARB, tab); in gfar_set_user_ip()
1066 struct filer_table *tab) in gfar_set_ether() argument
1088 upper_temp_mask, RQFCR_PID_SAH, tab); in gfar_set_ether()
1093 lower_temp_mask, RQFCR_PID_SAL, tab); in gfar_set_ether()
1100 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab); in gfar_set_ether()
1118 upper_temp_mask, RQFCR_PID_DAH, tab); in gfar_set_ether()
1123 lower_temp_mask, RQFCR_PID_DAL, tab); in gfar_set_ether()
1129 RQFCR_PID_ETY, tab); in gfar_set_ether()
1166 struct filer_table *tab) in gfar_convert_to_filer() argument
1172 u32 old_index = tab->index; in gfar_convert_to_filer()
1203 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab); in gfar_convert_to_filer()
1205 &rule->m_u.tcp_ip4_spec, tab); in gfar_convert_to_filer()
1209 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab); in gfar_convert_to_filer()
1211 &rule->m_u.udp_ip4_spec, tab); in gfar_convert_to_filer()
1215 tab); in gfar_convert_to_filer()
1216 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab); in gfar_convert_to_filer()
1219 tab); in gfar_convert_to_filer()
1223 tab); in gfar_convert_to_filer()
1226 tab); in gfar_convert_to_filer()
1230 gfar_set_parse_bits(vlan, vlan_mask, tab); in gfar_convert_to_filer()
1232 (struct ethhdr *) &rule->m_u, tab); in gfar_convert_to_filer()
1240 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab); in gfar_convert_to_filer()
1241 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab); in gfar_convert_to_filer()
1245 if (tab->index == old_index) { in gfar_convert_to_filer()
1246 gfar_set_mask(0xFFFFFFFF, tab); in gfar_convert_to_filer()
1247 tab->fe[tab->index].ctrl = 0x20; in gfar_convert_to_filer()
1248 tab->fe[tab->index].prop = 0x0; in gfar_convert_to_filer()
1249 tab->index++; in gfar_convert_to_filer()
1253 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND); in gfar_convert_to_filer()
1257 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE; in gfar_convert_to_filer()
1259 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10); in gfar_convert_to_filer()
1262 if (tab->index > (old_index + 2)) { in gfar_convert_to_filer()
1263 tab->fe[old_index + 1].ctrl |= RQFCR_CLE; in gfar_convert_to_filer()
1264 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE; in gfar_convert_to_filer()
1270 if (tab->index > MAX_FILER_CACHE_IDX - 1) in gfar_convert_to_filer()
1290 static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) in gfar_trim_filer_entries() argument
1301 while (end < tab->index) { in gfar_trim_filer_entries()
1302 tab->fe[begin].ctrl = tab->fe[end].ctrl; in gfar_trim_filer_entries()
1303 tab->fe[begin++].prop = tab->fe[end++].prop; in gfar_trim_filer_entries()
1307 while (begin < tab->index) { in gfar_trim_filer_entries()
1308 tab->fe[begin].ctrl = 0x60; in gfar_trim_filer_entries()
1309 tab->fe[begin].prop = 0xFFFFFFFF; in gfar_trim_filer_entries()
1313 tab->index -= length; in gfar_trim_filer_entries()
1319 struct filer_table *tab) in gfar_expand_filer_entries() argument
1321 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || in gfar_expand_filer_entries()
1325 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), in gfar_expand_filer_entries()
1326 tab->index - length + 1); in gfar_expand_filer_entries()
1328 tab->index += length; in gfar_expand_filer_entries()
1332 static int gfar_get_next_cluster_start(int start, struct filer_table *tab) in gfar_get_next_cluster_start() argument
1334 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); in gfar_get_next_cluster_start()
1336 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == in gfar_get_next_cluster_start()
1343 static int gfar_get_next_cluster_end(int start, struct filer_table *tab) in gfar_get_next_cluster_end() argument
1345 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); in gfar_get_next_cluster_end()
1347 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == in gfar_get_next_cluster_end()
1357 static void gfar_cluster_filer(struct filer_table *tab) in gfar_cluster_filer() argument
1361 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { in gfar_cluster_filer()
1363 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { in gfar_cluster_filer()
1367 if (tab->fe[i].ctrl != tab->fe[j].ctrl) in gfar_cluster_filer()
1369 if (tab->fe[i].prop != tab->fe[j].prop) in gfar_cluster_filer()
1371 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) in gfar_cluster_filer()
1373 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) in gfar_cluster_filer()
1375 iend = gfar_get_next_cluster_end(i, tab); in gfar_cluster_filer()
1376 jend = gfar_get_next_cluster_end(j, tab); in gfar_cluster_filer()
1384 if (gfar_expand_filer_entries(iend, (jend - j), tab) == in gfar_cluster_filer()
1388 gfar_copy_filer_entries(&(tab->fe[iend + 1]), in gfar_cluster_filer()
1389 &(tab->fe[jend + 1]), jend - j); in gfar_cluster_filer()
1393 tab) == -EINVAL) in gfar_cluster_filer()
1397 tab->fe[iend].ctrl &= ~(RQFCR_CLE); in gfar_cluster_filer()
1430 struct filer_table *tab) in gfar_generate_mask_table() argument
1434 for (i = 0; i < tab->index; i++) { in gfar_generate_mask_table()
1437 if (!(tab->fe[i].ctrl & 0xF)) { in gfar_generate_mask_table()
1438 mask_table[and_index].mask = tab->fe[i].prop; in gfar_generate_mask_table()
1448 if (tab->fe[i].ctrl & RQFCR_CLE) in gfar_generate_mask_table()
1451 if (!(tab->fe[i].ctrl & RQFCR_AND)) in gfar_generate_mask_table()
1516 static int gfar_optimize_filer_masks(struct filer_table *tab) in gfar_optimize_filer_masks() argument
1527 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); in gfar_optimize_filer_masks()
1539 and_index = gfar_generate_mask_table(mask_table, tab); in gfar_optimize_filer_masks()
1548 gfar_copy_filer_entries(&(tab->fe[j]), in gfar_optimize_filer_masks()
1556 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { in gfar_optimize_filer_masks()
1557 if (tab->fe[i].ctrl == 0x80) { in gfar_optimize_filer_masks()
1562 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { in gfar_optimize_filer_masks()
1563 if (tab->fe[i].ctrl == 0x80) { in gfar_optimize_filer_masks()
1564 if (tab->fe[i].prop == tab->fe[previous_mask].prop) { in gfar_optimize_filer_masks()
1568 gfar_trim_filer_entries(i, i, tab); in gfar_optimize_filer_masks()
1582 struct filer_table *tab) in gfar_write_filer_table() argument
1585 if (tab->index > MAX_FILER_IDX - 1) in gfar_write_filer_table()
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); in gfar_write_filer_table()
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); in gfar_write_filer_table()
1626 struct filer_table *tab; in gfar_process_filer_changes() local
1631 tab = kzalloc(sizeof(*tab), GFP_KERNEL); in gfar_process_filer_changes()
1632 if (tab == NULL) in gfar_process_filer_changes()
1639 ret = gfar_convert_to_filer(&j->fs, tab); in gfar_process_filer_changes()
1652 i = tab->index; in gfar_process_filer_changes()
1655 gfar_cluster_filer(tab); in gfar_process_filer_changes()
1656 gfar_optimize_filer_masks(tab); in gfar_process_filer_changes()
1661 tab->index, 100 - (100 * tab->index) / i); in gfar_process_filer_changes()
1664 ret = gfar_write_filer_table(priv, tab); in gfar_process_filer_changes()
1671 kfree(tab); in gfar_process_filer_changes()