rbe 525 drivers/net/ethernet/natsemi/sonic.c bool rbe = false; rbe 571 drivers/net/ethernet/natsemi/sonic.c rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE; rbe 595 drivers/net/ethernet/natsemi/sonic.c if (rbe) rbe 1725 kernel/trace/trace.h struct ring_buffer_event *rbe); rbe 85 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 155 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 163 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 171 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 181 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 193 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 203 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 208 kernel/trace/trace_events_hist.c u64 val = operand->fn(operand, elt, rbe, event); rbe 215 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 221 kernel/trace/trace_events_hist.c u64 val1 = operand1->fn(operand1, elt, rbe, event); rbe 222 kernel/trace/trace_events_hist.c u64 val2 = operand2->fn(operand2, elt, rbe, event); rbe 229 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 235 kernel/trace/trace_events_hist.c u64 val1 = operand1->fn(operand1, elt, rbe, event); rbe 236 kernel/trace/trace_events_hist.c u64 val2 = operand2->fn(operand2, elt, rbe, event); rbe 243 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 248 kernel/trace/trace_events_hist.c s64 sval = (s64)operand->fn(operand, elt, rbe, event); rbe 257 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, \ rbe 443 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, void *key, rbe 1305 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, void *key, rbe 1533 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 1539 kernel/trace/trace_events_hist.c u64 ts = ring_buffer_event_time_stamp(rbe); rbe 1549 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 1929 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 3462 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 3477 kernel/trace/trace_events_hist.c var_val = val->fn(val, elt, rbe, rec); rbe 3493 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 3496 kernel/trace/trace_events_hist.c __update_field_vars(elt, rbe, rec, hist_data->field_vars, rbe 3502 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, void *key, rbe 3505 kernel/trace/trace_events_hist.c __update_field_vars(elt, rbe, rec, hist_data->save_vars, rbe 3682 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, void *key, rbe 3686 kernel/trace/trace_events_hist.c data->track_data.save_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); rbe 3738 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, void *key, rbe 3807 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, void *key, rbe 3849 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, void *key, rbe 3856 kernel/trace/trace_events_hist.c save_track_data(hist_data, elt, rec, rbe, key, data, var_ref_vals); rbe 5278 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, rbe 5291 kernel/trace/trace_events_hist.c hist_val = hist_field->fn(hist_field, elt, rbe, rec); rbe 5303 kernel/trace/trace_events_hist.c hist_val = hist_field->fn(hist_field, elt, rbe, rec); rbe 5309 kernel/trace/trace_events_hist.c update_field_vars(hist_data, elt, rbe, rec); rbe 5340 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe, void *key, rbe 5348 kernel/trace/trace_events_hist.c data->fn(hist_data, elt, rec, rbe, key, data, var_ref_vals); rbe 5353 kernel/trace/trace_events_hist.c struct ring_buffer_event *rbe) rbe 5377 kernel/trace/trace_events_hist.c field_contents = key_field->fn(key_field, elt, rbe, rec); rbe 5400 kernel/trace/trace_events_hist.c hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals); rbe 5403 kernel/trace/trace_events_hist.c hist_trigger_actions(hist_data, elt, rec, rbe, key, var_ref_vals); rbe 30 net/netfilter/nft_set_rbtree.c static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe) rbe 32 net/netfilter/nft_set_rbtree.c return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) && rbe 33 net/netfilter/nft_set_rbtree.c (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END); rbe 36 net/netfilter/nft_set_rbtree.c static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe) rbe 38 net/netfilter/nft_set_rbtree.c return !nft_rbtree_interval_end(rbe); rbe 52 net/netfilter/nft_set_rbtree.c const struct nft_rbtree_elem *rbe, *interval = NULL; rbe 63 net/netfilter/nft_set_rbtree.c rbe = rb_entry(parent, struct nft_rbtree_elem, node); rbe 65 net/netfilter/nft_set_rbtree.c this = nft_set_ext_key(&rbe->ext); rbe 71 net/netfilter/nft_set_rbtree.c nft_rbtree_interval_end(rbe) && rbe 74 net/netfilter/nft_set_rbtree.c interval = rbe; rbe 78 net/netfilter/nft_set_rbtree.c if (!nft_set_elem_active(&rbe->ext, genmask)) { rbe 83 net/netfilter/nft_set_rbtree.c if (nft_set_elem_expired(&rbe->ext)) rbe 86 net/netfilter/nft_set_rbtree.c if (nft_rbtree_interval_end(rbe)) { rbe 94 net/netfilter/nft_set_rbtree.c *ext = &rbe->ext; rbe 133 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe, *interval = NULL; rbe 144 net/netfilter/nft_set_rbtree.c rbe = rb_entry(parent, struct nft_rbtree_elem, node); rbe 146 net/netfilter/nft_set_rbtree.c this = nft_set_ext_key(&rbe->ext); rbe 151 net/netfilter/nft_set_rbtree.c interval = rbe; rbe 155 net/netfilter/nft_set_rbtree.c interval = rbe; rbe 157 net/netfilter/nft_set_rbtree.c if (!nft_set_elem_active(&rbe->ext, genmask)) { rbe 162 net/netfilter/nft_set_rbtree.c if (nft_set_elem_expired(&rbe->ext)) rbe 165 net/netfilter/nft_set_rbtree.c if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) || rbe 166 net/netfilter/nft_set_rbtree.c (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) == rbe 168 net/netfilter/nft_set_rbtree.c *elem = rbe; rbe 172 net/netfilter/nft_set_rbtree.c if (nft_rbtree_interval_end(rbe)) rbe 198 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT); rbe 203 net/netfilter/nft_set_rbtree.c ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask); rbe 205 net/netfilter/nft_set_rbtree.c return rbe; rbe 209 net/netfilter/nft_set_rbtree.c ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask); rbe 211 net/netfilter/nft_set_rbtree.c rbe = ERR_PTR(-ENOENT); rbe 214 net/netfilter/nft_set_rbtree.c return rbe; rbe 223 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe; rbe 231 net/netfilter/nft_set_rbtree.c rbe = rb_entry(parent, struct nft_rbtree_elem, node); rbe 232 net/netfilter/nft_set_rbtree.c d = memcmp(nft_set_ext_key(&rbe->ext), rbe 240 net/netfilter/nft_set_rbtree.c if (nft_rbtree_interval_end(rbe) && rbe 243 net/netfilter/nft_set_rbtree.c } else if (nft_rbtree_interval_start(rbe) && rbe 246 net/netfilter/nft_set_rbtree.c } else if (nft_set_elem_active(&rbe->ext, genmask)) { rbe 247 net/netfilter/nft_set_rbtree.c *ext = &rbe->ext; rbe 264 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe = elem->priv; rbe 269 net/netfilter/nft_set_rbtree.c err = __nft_rbtree_insert(net, set, rbe, ext); rbe 281 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe = elem->priv; rbe 285 net/netfilter/nft_set_rbtree.c rb_erase(&rbe->node, &priv->root); rbe 294 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe = elem->priv; rbe 296 net/netfilter/nft_set_rbtree.c nft_set_elem_change_active(net, set, &rbe->ext); rbe 297 net/netfilter/nft_set_rbtree.c nft_set_elem_clear_busy(&rbe->ext); rbe 303 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe = priv; rbe 305 net/netfilter/nft_set_rbtree.c if (!nft_set_elem_mark_busy(&rbe->ext) || rbe 306 net/netfilter/nft_set_rbtree.c !nft_is_active(net, &rbe->ext)) { rbe 307 net/netfilter/nft_set_rbtree.c nft_set_elem_change_active(net, set, &rbe->ext); rbe 319 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe, *this = elem->priv; rbe 324 net/netfilter/nft_set_rbtree.c rbe = rb_entry(parent, struct nft_rbtree_elem, node); rbe 326 net/netfilter/nft_set_rbtree.c d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val, rbe 333 net/netfilter/nft_set_rbtree.c if (nft_rbtree_interval_end(rbe) && rbe 337 net/netfilter/nft_set_rbtree.c } else if (nft_rbtree_interval_start(rbe) && rbe 341 net/netfilter/nft_set_rbtree.c } else if (!nft_set_elem_active(&rbe->ext, genmask)) { rbe 345 net/netfilter/nft_set_rbtree.c nft_rbtree_flush(net, set, rbe); rbe 346 net/netfilter/nft_set_rbtree.c return rbe; rbe 357 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe; rbe 363 net/netfilter/nft_set_rbtree.c rbe = rb_entry(node, struct nft_rbtree_elem, node); rbe 367 net/netfilter/nft_set_rbtree.c if (nft_set_elem_expired(&rbe->ext)) rbe 369 net/netfilter/nft_set_rbtree.c if (!nft_set_elem_active(&rbe->ext, iter->genmask)) rbe 372 net/netfilter/nft_set_rbtree.c elem.priv = rbe; rbe 387 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL; rbe 399 net/netfilter/nft_set_rbtree.c rbe = rb_entry(node, struct nft_rbtree_elem, node); rbe 401 net/netfilter/nft_set_rbtree.c if (nft_rbtree_interval_end(rbe)) { rbe 402 net/netfilter/nft_set_rbtree.c rbe_end = rbe; rbe 405 net/netfilter/nft_set_rbtree.c if (!nft_set_elem_expired(&rbe->ext)) rbe 407 net/netfilter/nft_set_rbtree.c if (nft_set_elem_mark_busy(&rbe->ext)) rbe 419 net/netfilter/nft_set_rbtree.c nft_set_gc_batch_add(gcb, rbe); rbe 420 net/netfilter/nft_set_rbtree.c rbe_prev = rbe; rbe 470 net/netfilter/nft_set_rbtree.c struct nft_rbtree_elem *rbe; rbe 477 net/netfilter/nft_set_rbtree.c rbe = rb_entry(node, struct nft_rbtree_elem, node); rbe 478 net/netfilter/nft_set_rbtree.c nft_set_elem_destroy(set, rbe, true); rbe 289 scripts/sortextable.c r = rbe;